dpdk/drivers/net/txgbe/txgbe_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2015-2020
   3 */
   4
   5#include <stdio.h>
   6#include <errno.h>
   7#include <stdint.h>
   8#include <string.h>
   9#include <rte_common.h>
  10#include <ethdev_pci.h>
  11
  12#include <rte_interrupts.h>
  13#include <rte_log.h>
  14#include <rte_debug.h>
  15#include <rte_pci.h>
  16#include <rte_memory.h>
  17#include <rte_eal.h>
  18#include <rte_alarm.h>
  19
  20#include "txgbe_logs.h"
  21#include "base/txgbe.h"
  22#include "txgbe_ethdev.h"
  23#include "txgbe_rxtx.h"
  24#include "txgbe_regs_group.h"
  25
  26static const struct reg_info txgbe_regs_general[] = {
  27        {TXGBE_RST, 1, 1, "TXGBE_RST"},
  28        {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
  29        {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
  30        {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
  31        {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
  32        {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
  33        {0, 0, 0, ""}
  34};
  35
  36static const struct reg_info txgbe_regs_nvm[] = {
  37        {0, 0, 0, ""}
  38};
  39
  40static const struct reg_info txgbe_regs_interrupt[] = {
  41        {0, 0, 0, ""}
  42};
  43
  44static const struct reg_info txgbe_regs_fctl_others[] = {
  45        {0, 0, 0, ""}
  46};
  47
  48static const struct reg_info txgbe_regs_rxdma[] = {
  49        {0, 0, 0, ""}
  50};
  51
  52static const struct reg_info txgbe_regs_rx[] = {
  53        {0, 0, 0, ""}
  54};
  55
  56static struct reg_info txgbe_regs_tx[] = {
  57        {0, 0, 0, ""}
  58};
  59
  60static const struct reg_info txgbe_regs_wakeup[] = {
  61        {0, 0, 0, ""}
  62};
  63
  64static const struct reg_info txgbe_regs_dcb[] = {
  65        {0, 0, 0, ""}
  66};
  67
  68static const struct reg_info txgbe_regs_mac[] = {
  69        {0, 0, 0, ""}
  70};
  71
  72static const struct reg_info txgbe_regs_diagnostic[] = {
  73        {0, 0, 0, ""},
  74};
  75
  76/* PF registers */
  77static const struct reg_info *txgbe_regs_others[] = {
  78                                txgbe_regs_general,
  79                                txgbe_regs_nvm,
  80                                txgbe_regs_interrupt,
  81                                txgbe_regs_fctl_others,
  82                                txgbe_regs_rxdma,
  83                                txgbe_regs_rx,
  84                                txgbe_regs_tx,
  85                                txgbe_regs_wakeup,
  86                                txgbe_regs_dcb,
  87                                txgbe_regs_mac,
  88                                txgbe_regs_diagnostic,
  89                                NULL};
  90
  91static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
  92static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
  93static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
  94static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
  95static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
  96static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
  97static int txgbe_dev_close(struct rte_eth_dev *dev);
  98static int txgbe_dev_link_update(struct rte_eth_dev *dev,
  99                                int wait_to_complete);
 100static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
 101static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
 102static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
 103                                        uint16_t queue);
 104
 105static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
 106static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
 107static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 108static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 109static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 110static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
 111                                      struct rte_intr_handle *handle);
 112static void txgbe_dev_interrupt_handler(void *param);
 113static void txgbe_dev_interrupt_delayed_handler(void *param);
 114static void txgbe_configure_msix(struct rte_eth_dev *dev);
 115
 116static int txgbe_filter_restore(struct rte_eth_dev *dev);
 117static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 118
 119#define TXGBE_SET_HWSTRIP(h, q) do {\
 120                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
 121                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
 122                (h)->bitmap[idx] |= 1 << bit;\
 123        } while (0)
 124
 125#define TXGBE_CLEAR_HWSTRIP(h, q) do {\
 126                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
 127                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
 128                (h)->bitmap[idx] &= ~(1 << bit);\
 129        } while (0)
 130
 131#define TXGBE_GET_HWSTRIP(h, q, r) do {\
 132                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
 133                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
 134                (r) = (h)->bitmap[idx] >> bit & 1;\
 135        } while (0)
 136
 137/*
 138 * The set of PCI devices this driver supports
 139 */
 140static const struct rte_pci_id pci_id_txgbe_map[] = {
 141        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
 142        { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
 143        { .vendor_id = 0, /* sentinel */ },
 144};
 145
 146static const struct rte_eth_desc_lim rx_desc_lim = {
 147        .nb_max = TXGBE_RING_DESC_MAX,
 148        .nb_min = TXGBE_RING_DESC_MIN,
 149        .nb_align = TXGBE_RXD_ALIGN,
 150};
 151
 152static const struct rte_eth_desc_lim tx_desc_lim = {
 153        .nb_max = TXGBE_RING_DESC_MAX,
 154        .nb_min = TXGBE_RING_DESC_MIN,
 155        .nb_align = TXGBE_TXD_ALIGN,
 156        .nb_seg_max = TXGBE_TX_MAX_SEG,
 157        .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
 158};
 159
 160static const struct eth_dev_ops txgbe_eth_dev_ops;
 161
 162#define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
 163#define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
 164static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
 165        /* MNG RxTx */
 166        HW_XSTAT(mng_bmc2host_packets),
 167        HW_XSTAT(mng_host2bmc_packets),
 168        /* Basic RxTx */
 169        HW_XSTAT(rx_packets),
 170        HW_XSTAT(tx_packets),
 171        HW_XSTAT(rx_bytes),
 172        HW_XSTAT(tx_bytes),
 173        HW_XSTAT(rx_total_bytes),
 174        HW_XSTAT(rx_total_packets),
 175        HW_XSTAT(tx_total_packets),
 176        HW_XSTAT(rx_total_missed_packets),
 177        HW_XSTAT(rx_broadcast_packets),
 178        HW_XSTAT(rx_multicast_packets),
 179        HW_XSTAT(rx_management_packets),
 180        HW_XSTAT(tx_management_packets),
 181        HW_XSTAT(rx_management_dropped),
 182
 183        /* Basic Error */
 184        HW_XSTAT(rx_crc_errors),
 185        HW_XSTAT(rx_illegal_byte_errors),
 186        HW_XSTAT(rx_error_bytes),
 187        HW_XSTAT(rx_mac_short_packet_dropped),
 188        HW_XSTAT(rx_length_errors),
 189        HW_XSTAT(rx_undersize_errors),
 190        HW_XSTAT(rx_fragment_errors),
 191        HW_XSTAT(rx_oversize_errors),
 192        HW_XSTAT(rx_jabber_errors),
 193        HW_XSTAT(rx_l3_l4_xsum_error),
 194        HW_XSTAT(mac_local_errors),
 195        HW_XSTAT(mac_remote_errors),
 196
 197        /* Flow Director */
 198        HW_XSTAT(flow_director_added_filters),
 199        HW_XSTAT(flow_director_removed_filters),
 200        HW_XSTAT(flow_director_filter_add_errors),
 201        HW_XSTAT(flow_director_filter_remove_errors),
 202        HW_XSTAT(flow_director_matched_filters),
 203        HW_XSTAT(flow_director_missed_filters),
 204
 205        /* FCoE */
 206        HW_XSTAT(rx_fcoe_crc_errors),
 207        HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
 208        HW_XSTAT(rx_fcoe_dropped),
 209        HW_XSTAT(rx_fcoe_packets),
 210        HW_XSTAT(tx_fcoe_packets),
 211        HW_XSTAT(rx_fcoe_bytes),
 212        HW_XSTAT(tx_fcoe_bytes),
 213        HW_XSTAT(rx_fcoe_no_ddp),
 214        HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
 215
 216        /* MACSEC */
 217        HW_XSTAT(tx_macsec_pkts_untagged),
 218        HW_XSTAT(tx_macsec_pkts_encrypted),
 219        HW_XSTAT(tx_macsec_pkts_protected),
 220        HW_XSTAT(tx_macsec_octets_encrypted),
 221        HW_XSTAT(tx_macsec_octets_protected),
 222        HW_XSTAT(rx_macsec_pkts_untagged),
 223        HW_XSTAT(rx_macsec_pkts_badtag),
 224        HW_XSTAT(rx_macsec_pkts_nosci),
 225        HW_XSTAT(rx_macsec_pkts_unknownsci),
 226        HW_XSTAT(rx_macsec_octets_decrypted),
 227        HW_XSTAT(rx_macsec_octets_validated),
 228        HW_XSTAT(rx_macsec_sc_pkts_unchecked),
 229        HW_XSTAT(rx_macsec_sc_pkts_delayed),
 230        HW_XSTAT(rx_macsec_sc_pkts_late),
 231        HW_XSTAT(rx_macsec_sa_pkts_ok),
 232        HW_XSTAT(rx_macsec_sa_pkts_invalid),
 233        HW_XSTAT(rx_macsec_sa_pkts_notvalid),
 234        HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
 235        HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
 236
 237        /* MAC RxTx */
 238        HW_XSTAT(rx_size_64_packets),
 239        HW_XSTAT(rx_size_65_to_127_packets),
 240        HW_XSTAT(rx_size_128_to_255_packets),
 241        HW_XSTAT(rx_size_256_to_511_packets),
 242        HW_XSTAT(rx_size_512_to_1023_packets),
 243        HW_XSTAT(rx_size_1024_to_max_packets),
 244        HW_XSTAT(tx_size_64_packets),
 245        HW_XSTAT(tx_size_65_to_127_packets),
 246        HW_XSTAT(tx_size_128_to_255_packets),
 247        HW_XSTAT(tx_size_256_to_511_packets),
 248        HW_XSTAT(tx_size_512_to_1023_packets),
 249        HW_XSTAT(tx_size_1024_to_max_packets),
 250
 251        /* Flow Control */
 252        HW_XSTAT(tx_xon_packets),
 253        HW_XSTAT(rx_xon_packets),
 254        HW_XSTAT(tx_xoff_packets),
 255        HW_XSTAT(rx_xoff_packets),
 256
 257        HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
 258        HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
 259        HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
 260        HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
 261};
 262
 263#define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
 264                           sizeof(rte_txgbe_stats_strings[0]))
 265
 266/* Per-priority statistics */
 267#define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
 268static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
 269        UP_XSTAT(rx_up_packets),
 270        UP_XSTAT(tx_up_packets),
 271        UP_XSTAT(rx_up_bytes),
 272        UP_XSTAT(tx_up_bytes),
 273        UP_XSTAT(rx_up_drop_packets),
 274
 275        UP_XSTAT(tx_up_xon_packets),
 276        UP_XSTAT(rx_up_xon_packets),
 277        UP_XSTAT(tx_up_xoff_packets),
 278        UP_XSTAT(rx_up_xoff_packets),
 279        UP_XSTAT(rx_up_dropped),
 280        UP_XSTAT(rx_up_mbuf_alloc_errors),
 281        UP_XSTAT(tx_up_xon2off_packets),
 282};
 283
 284#define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
 285                           sizeof(rte_txgbe_up_strings[0]))
 286
 287/* Per-queue statistics */
 288#define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
 289static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
 290        QP_XSTAT(rx_qp_packets),
 291        QP_XSTAT(tx_qp_packets),
 292        QP_XSTAT(rx_qp_bytes),
 293        QP_XSTAT(tx_qp_bytes),
 294        QP_XSTAT(rx_qp_mc_packets),
 295};
 296
 297#define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
 298                           sizeof(rte_txgbe_qp_strings[0]))
 299
 300static inline int
 301txgbe_is_sfp(struct txgbe_hw *hw)
 302{
 303        switch (hw->phy.type) {
 304        case txgbe_phy_sfp_avago:
 305        case txgbe_phy_sfp_ftl:
 306        case txgbe_phy_sfp_intel:
 307        case txgbe_phy_sfp_unknown:
 308        case txgbe_phy_sfp_tyco_passive:
 309        case txgbe_phy_sfp_unknown_passive:
 310                return 1;
 311        default:
 312                return 0;
 313        }
 314}
 315
 316static inline int32_t
 317txgbe_pf_reset_hw(struct txgbe_hw *hw)
 318{
 319        uint32_t ctrl_ext;
 320        int32_t status;
 321
 322        status = hw->mac.reset_hw(hw);
 323
 324        ctrl_ext = rd32(hw, TXGBE_PORTCTL);
 325        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
 326        ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
 327        wr32(hw, TXGBE_PORTCTL, ctrl_ext);
 328        txgbe_flush(hw);
 329
 330        if (status == TXGBE_ERR_SFP_NOT_PRESENT)
 331                status = 0;
 332        return status;
 333}
 334
 335static inline void
 336txgbe_enable_intr(struct rte_eth_dev *dev)
 337{
 338        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
 339        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 340
 341        wr32(hw, TXGBE_IENMISC, intr->mask_misc);
 342        wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
 343        wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
 344        txgbe_flush(hw);
 345}
 346
 347static void
 348txgbe_disable_intr(struct txgbe_hw *hw)
 349{
 350        PMD_INIT_FUNC_TRACE();
 351
 352        wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
 353        wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
 354        wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
 355        txgbe_flush(hw);
 356}
 357
 358static int
 359txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 360                                  uint16_t queue_id,
 361                                  uint8_t stat_idx,
 362                                  uint8_t is_rx)
 363{
 364        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 365        struct txgbe_stat_mappings *stat_mappings =
 366                TXGBE_DEV_STAT_MAPPINGS(eth_dev);
 367        uint32_t qsmr_mask = 0;
 368        uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
 369        uint32_t q_map;
 370        uint8_t n, offset;
 371
 372        if (hw->mac.type != txgbe_mac_raptor)
 373                return -ENOSYS;
 374
 375        if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
 376                return -EIO;
 377
 378        PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
 379                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
 380                     queue_id, stat_idx);
 381
 382        n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
 383        if (n >= TXGBE_NB_STAT_MAPPING) {
 384                PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
 385                return -EIO;
 386        }
 387        offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
 388
 389        /* Now clear any previous stat_idx set */
 390        clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
 391        if (!is_rx)
 392                stat_mappings->tqsm[n] &= ~clearing_mask;
 393        else
 394                stat_mappings->rqsm[n] &= ~clearing_mask;
 395
 396        q_map = (uint32_t)stat_idx;
 397        q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
 398        qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
 399        if (!is_rx)
 400                stat_mappings->tqsm[n] |= qsmr_mask;
 401        else
 402                stat_mappings->rqsm[n] |= qsmr_mask;
 403
 404        PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
 405                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
 406                     queue_id, stat_idx);
 407        PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
 408                     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
 409        return 0;
 410}
 411
 412static void
 413txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
 414{
 415        int i;
 416        u8 bwgp;
 417        struct txgbe_dcb_tc_config *tc;
 418
 419        UNREFERENCED_PARAMETER(hw);
 420
 421        dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
 422        dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
 423        bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
 424        for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
 425                tc = &dcb_config->tc_config[i];
 426                tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
 427                tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
 428                tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
 429                tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
 430                tc->pfc = txgbe_dcb_pfc_disabled;
 431        }
 432
 433        /* Initialize default user to priority mapping, UPx->TC0 */
 434        tc = &dcb_config->tc_config[0];
 435        tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
 436        tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
 437        for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
 438                dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
 439                dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
 440        }
 441        dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
 442        dcb_config->pfc_mode_enable = false;
 443        dcb_config->vt_mode = true;
 444        dcb_config->round_robin_enable = false;
 445        /* support all DCB capabilities */
 446        dcb_config->support.capabilities = 0xFF;
 447}
 448
 449/*
 450 * Ensure that all locks are released before first NVM or PHY access
 451 */
 452static void
 453txgbe_swfw_lock_reset(struct txgbe_hw *hw)
 454{
 455        uint16_t mask;
 456
 457        /*
 458         * These ones are more tricky since they are common to all ports; but
 459         * swfw_sync retries last long enough (1s) to be almost sure that if
 460         * lock can not be taken it is due to an improper lock of the
 461         * semaphore.
 462         */
 463        mask = TXGBE_MNGSEM_SWPHY |
 464               TXGBE_MNGSEM_SWMBX |
 465               TXGBE_MNGSEM_SWFLASH;
 466        if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
 467                PMD_DRV_LOG(DEBUG, "SWFW common locks released");
 468
 469        hw->mac.release_swfw_sync(hw, mask);
 470}
 471
 472static int
 473eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 474{
 475        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 476        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 477        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
 478        struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
 479        struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
 480        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
 481        struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
 482        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 483        const struct rte_memzone *mz;
 484        uint32_t ctrl_ext;
 485        uint16_t csum;
 486        int err, i, ret;
 487
 488        PMD_INIT_FUNC_TRACE();
 489
 490        eth_dev->dev_ops = &txgbe_eth_dev_ops;
 491        eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
 492        eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
 493        eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
 494        eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
 495        eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
 496        eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
 497
 498        /*
 499         * For secondary processes, we don't initialise any further as primary
 500         * has already done this work. Only check we don't need a different
 501         * RX and TX function.
 502         */
 503        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 504                struct txgbe_tx_queue *txq;
 505                /* TX queue function in primary, set by last queue initialized
 506                 * Tx queue may not initialized by primary process
 507                 */
 508                if (eth_dev->data->tx_queues) {
 509                        uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
 510                        txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
 511                        txgbe_set_tx_function(eth_dev, txq);
 512                } else {
 513                        /* Use default TX function if we get here */
 514                        PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
 515                                     "Using default TX function.");
 516                }
 517
 518                txgbe_set_rx_function(eth_dev);
 519
 520                return 0;
 521        }
 522
 523        rte_eth_copy_pci_info(eth_dev, pci_dev);
 524
 525        /* Vendor and Device ID need to be set before init of shared code */
 526        hw->device_id = pci_dev->id.device_id;
 527        hw->vendor_id = pci_dev->id.vendor_id;
 528        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
 529        hw->allow_unsupported_sfp = 1;
 530
 531        /* Reserve memory for interrupt status block */
 532        mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
 533                16, TXGBE_ALIGN, SOCKET_ID_ANY);
 534        if (mz == NULL)
 535                return -ENOMEM;
 536
 537        hw->isb_dma = TMZ_PADDR(mz);
 538        hw->isb_mem = TMZ_VADDR(mz);
 539
 540        /* Initialize the shared code (base driver) */
 541        err = txgbe_init_shared_code(hw);
 542        if (err != 0) {
 543                PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
 544                return -EIO;
 545        }
 546
 547        /* Unlock any pending hardware semaphore */
 548        txgbe_swfw_lock_reset(hw);
 549
 550#ifdef RTE_LIB_SECURITY
 551        /* Initialize security_ctx only for primary process*/
 552        if (txgbe_ipsec_ctx_create(eth_dev))
 553                return -ENOMEM;
 554#endif
 555
 556        /* Initialize DCB configuration*/
 557        memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
 558        txgbe_dcb_init(hw, dcb_config);
 559
 560        /* Get Hardware Flow Control setting */
 561        hw->fc.requested_mode = txgbe_fc_full;
 562        hw->fc.current_mode = txgbe_fc_full;
 563        hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
 564        for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
 565                hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
 566                hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
 567        }
 568        hw->fc.send_xon = 1;
 569
 570        err = hw->rom.init_params(hw);
 571        if (err != 0) {
 572                PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
 573                return -EIO;
 574        }
 575
 576        /* Make sure we have a good EEPROM before we read from it */
 577        err = hw->rom.validate_checksum(hw, &csum);
 578        if (err != 0) {
 579                PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
 580                return -EIO;
 581        }
 582
 583        err = hw->mac.init_hw(hw);
 584
 585        /*
 586         * Devices with copper phys will fail to initialise if txgbe_init_hw()
 587         * is called too soon after the kernel driver unbinding/binding occurs.
 588         * The failure occurs in txgbe_identify_phy() for all devices,
 589         * but for non-copper devies, txgbe_identify_sfp_module() is
 590         * also called. See txgbe_identify_phy(). The reason for the
 591         * failure is not known, and only occuts when virtualisation features
 592         * are disabled in the bios. A delay of 200ms  was found to be enough by
 593         * trial-and-error, and is doubled to be safe.
 594         */
 595        if (err && hw->phy.media_type == txgbe_media_type_copper) {
 596                rte_delay_ms(200);
 597                err = hw->mac.init_hw(hw);
 598        }
 599
 600        if (err == TXGBE_ERR_SFP_NOT_PRESENT)
 601                err = 0;
 602
 603        if (err == TXGBE_ERR_EEPROM_VERSION) {
 604                PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
 605                             "LOM.  Please be aware there may be issues associated "
 606                             "with your hardware.");
 607                PMD_INIT_LOG(ERR, "If you are experiencing problems "
 608                             "please contact your hardware representative "
 609                             "who provided you with this hardware.");
 610        } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
 611                PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
 612        }
 613        if (err) {
 614                PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
 615                return -EIO;
 616        }
 617
 618        /* Reset the hw statistics */
 619        txgbe_dev_stats_reset(eth_dev);
 620
 621        /* disable interrupt */
 622        txgbe_disable_intr(hw);
 623
 624        /* Allocate memory for storing MAC addresses */
 625        eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
 626                                               hw->mac.num_rar_entries, 0);
 627        if (eth_dev->data->mac_addrs == NULL) {
 628                PMD_INIT_LOG(ERR,
 629                             "Failed to allocate %u bytes needed to store "
 630                             "MAC addresses",
 631                             RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
 632                return -ENOMEM;
 633        }
 634
 635        /* Copy the permanent MAC address */
 636        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
 637                        &eth_dev->data->mac_addrs[0]);
 638
 639        /* Allocate memory for storing hash filter MAC addresses */
 640        eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
 641                        RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
 642        if (eth_dev->data->hash_mac_addrs == NULL) {
 643                PMD_INIT_LOG(ERR,
 644                             "Failed to allocate %d bytes needed to store MAC addresses",
 645                             RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
 646                return -ENOMEM;
 647        }
 648
 649        /* initialize the vfta */
 650        memset(shadow_vfta, 0, sizeof(*shadow_vfta));
 651
 652        /* initialize the hw strip bitmap*/
 653        memset(hwstrip, 0, sizeof(*hwstrip));
 654
 655        /* initialize PF if max_vfs not zero */
 656        ret = txgbe_pf_host_init(eth_dev);
 657        if (ret) {
 658                rte_free(eth_dev->data->mac_addrs);
 659                eth_dev->data->mac_addrs = NULL;
 660                rte_free(eth_dev->data->hash_mac_addrs);
 661                eth_dev->data->hash_mac_addrs = NULL;
 662                return ret;
 663        }
 664
 665        ctrl_ext = rd32(hw, TXGBE_PORTCTL);
 666        /* let hardware know driver is loaded */
 667        ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
 668        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
 669        ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
 670        wr32(hw, TXGBE_PORTCTL, ctrl_ext);
 671        txgbe_flush(hw);
 672
 673        if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
 674                PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
 675                             (int)hw->mac.type, (int)hw->phy.type,
 676                             (int)hw->phy.sfp_type);
 677        else
 678                PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
 679                             (int)hw->mac.type, (int)hw->phy.type);
 680
 681        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
 682                     eth_dev->data->port_id, pci_dev->id.vendor_id,
 683                     pci_dev->id.device_id);
 684
 685        rte_intr_callback_register(intr_handle,
 686                                   txgbe_dev_interrupt_handler, eth_dev);
 687
 688        /* enable uio/vfio intr/eventfd mapping */
 689        rte_intr_enable(intr_handle);
 690
 691        /* enable support intr */
 692        txgbe_enable_intr(eth_dev);
 693
 694        /* initialize filter info */
 695        memset(filter_info, 0,
 696               sizeof(struct txgbe_filter_info));
 697
 698        /* initialize 5tuple filter list */
 699        TAILQ_INIT(&filter_info->fivetuple_list);
 700
 701        /* initialize flow director filter list & hash */
 702        txgbe_fdir_filter_init(eth_dev);
 703
 704        /* initialize l2 tunnel filter list & hash */
 705        txgbe_l2_tn_filter_init(eth_dev);
 706
 707        /* initialize flow filter lists */
 708        txgbe_filterlist_init();
 709
 710        /* initialize bandwidth configuration info */
 711        memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 712
 713        /* initialize Traffic Manager configuration */
 714        txgbe_tm_conf_init(eth_dev);
 715
 716        return 0;
 717}
 718
 719static int
 720eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 721{
 722        PMD_INIT_FUNC_TRACE();
 723
 724        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 725                return 0;
 726
 727        txgbe_dev_close(eth_dev);
 728
 729        return 0;
 730}
 731
 732static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
 733{
 734        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
 735        struct txgbe_5tuple_filter *p_5tuple;
 736
 737        while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
 738                TAILQ_REMOVE(&filter_info->fivetuple_list,
 739                             p_5tuple,
 740                             entries);
 741                rte_free(p_5tuple);
 742        }
 743        memset(filter_info->fivetuple_mask, 0,
 744               sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
 745
 746        return 0;
 747}
 748
 749static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
 750{
 751        struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
 752        struct txgbe_fdir_filter *fdir_filter;
 753
 754        if (fdir_info->hash_map)
 755                rte_free(fdir_info->hash_map);
 756        if (fdir_info->hash_handle)
 757                rte_hash_free(fdir_info->hash_handle);
 758
 759        while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
 760                TAILQ_REMOVE(&fdir_info->fdir_list,
 761                             fdir_filter,
 762                             entries);
 763                rte_free(fdir_filter);
 764        }
 765
 766        return 0;
 767}
 768
 769static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
 770{
 771        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
 772        struct txgbe_l2_tn_filter *l2_tn_filter;
 773
 774        if (l2_tn_info->hash_map)
 775                rte_free(l2_tn_info->hash_map);
 776        if (l2_tn_info->hash_handle)
 777                rte_hash_free(l2_tn_info->hash_handle);
 778
 779        while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
 780                TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
 781                             l2_tn_filter,
 782                             entries);
 783                rte_free(l2_tn_filter);
 784        }
 785
 786        return 0;
 787}
 788
 789static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
 790{
 791        struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
 792        char fdir_hash_name[RTE_HASH_NAMESIZE];
 793        struct rte_hash_parameters fdir_hash_params = {
 794                .name = fdir_hash_name,
 795                .entries = TXGBE_MAX_FDIR_FILTER_NUM,
 796                .key_len = sizeof(struct txgbe_atr_input),
 797                .hash_func = rte_hash_crc,
 798                .hash_func_init_val = 0,
 799                .socket_id = rte_socket_id(),
 800        };
 801
 802        TAILQ_INIT(&fdir_info->fdir_list);
 803        snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
 804                 "fdir_%s", TDEV_NAME(eth_dev));
 805        fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
 806        if (!fdir_info->hash_handle) {
 807                PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
 808                return -EINVAL;
 809        }
 810        fdir_info->hash_map = rte_zmalloc("txgbe",
 811                                          sizeof(struct txgbe_fdir_filter *) *
 812                                          TXGBE_MAX_FDIR_FILTER_NUM,
 813                                          0);
 814        if (!fdir_info->hash_map) {
 815                PMD_INIT_LOG(ERR,
 816                             "Failed to allocate memory for fdir hash map!");
 817                return -ENOMEM;
 818        }
 819        fdir_info->mask_added = FALSE;
 820
 821        return 0;
 822}
 823
 824static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 825{
 826        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
 827        char l2_tn_hash_name[RTE_HASH_NAMESIZE];
 828        struct rte_hash_parameters l2_tn_hash_params = {
 829                .name = l2_tn_hash_name,
 830                .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
 831                .key_len = sizeof(struct txgbe_l2_tn_key),
 832                .hash_func = rte_hash_crc,
 833                .hash_func_init_val = 0,
 834                .socket_id = rte_socket_id(),
 835        };
 836
 837        TAILQ_INIT(&l2_tn_info->l2_tn_list);
 838        snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
 839                 "l2_tn_%s", TDEV_NAME(eth_dev));
 840        l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
 841        if (!l2_tn_info->hash_handle) {
 842                PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
 843                return -EINVAL;
 844        }
 845        l2_tn_info->hash_map = rte_zmalloc("txgbe",
 846                                   sizeof(struct txgbe_l2_tn_filter *) *
 847                                   TXGBE_MAX_L2_TN_FILTER_NUM,
 848                                   0);
 849        if (!l2_tn_info->hash_map) {
 850                PMD_INIT_LOG(ERR,
 851                        "Failed to allocate memory for L2 TN hash map!");
 852                return -ENOMEM;
 853        }
 854        l2_tn_info->e_tag_en = FALSE;
 855        l2_tn_info->e_tag_fwd_en = FALSE;
 856        l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
 857
 858        return 0;
 859}
 860
 861static int
 862eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 863                struct rte_pci_device *pci_dev)
 864{
 865        struct rte_eth_dev *pf_ethdev;
 866        struct rte_eth_devargs eth_da;
 867        int retval;
 868
 869        if (pci_dev->device.devargs) {
 870                retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
 871                                &eth_da);
 872                if (retval)
 873                        return retval;
 874        } else {
 875                memset(&eth_da, 0, sizeof(eth_da));
 876        }
 877
 878        retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
 879                        sizeof(struct txgbe_adapter),
 880                        eth_dev_pci_specific_init, pci_dev,
 881                        eth_txgbe_dev_init, NULL);
 882
 883        if (retval || eth_da.nb_representor_ports < 1)
 884                return retval;
 885
 886        pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
 887        if (pf_ethdev == NULL)
 888                return -ENODEV;
 889
 890        return 0;
 891}
 892
 893static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
 894{
 895        struct rte_eth_dev *ethdev;
 896
 897        ethdev = rte_eth_dev_allocated(pci_dev->device.name);
 898        if (!ethdev)
 899                return -ENODEV;
 900
 901        return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
 902}
 903
 904static struct rte_pci_driver rte_txgbe_pmd = {
 905        .id_table = pci_id_txgbe_map,
 906        .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
 907                     RTE_PCI_DRV_INTR_LSC,
 908        .probe = eth_txgbe_pci_probe,
 909        .remove = eth_txgbe_pci_remove,
 910};
 911
 912static int
 913txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 914{
 915        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 916        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
 917        uint32_t vfta;
 918        uint32_t vid_idx;
 919        uint32_t vid_bit;
 920
 921        vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
 922        vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
 923        vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
 924        if (on)
 925                vfta |= vid_bit;
 926        else
 927                vfta &= ~vid_bit;
 928        wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
 929
 930        /* update local VFTA copy */
 931        shadow_vfta->vfta[vid_idx] = vfta;
 932
 933        return 0;
 934}
 935
 936static void
 937txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 938{
 939        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 940        struct txgbe_rx_queue *rxq;
 941        bool restart;
 942        uint32_t rxcfg, rxbal, rxbah;
 943
 944        if (on)
 945                txgbe_vlan_hw_strip_enable(dev, queue);
 946        else
 947                txgbe_vlan_hw_strip_disable(dev, queue);
 948
 949        rxq = dev->data->rx_queues[queue];
 950        rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
 951        rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
 952        rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
 953        if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
 954                restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 955                        !(rxcfg & TXGBE_RXCFG_VLAN);
 956                rxcfg |= TXGBE_RXCFG_VLAN;
 957        } else {
 958                restart = (rxcfg & TXGBE_RXCFG_ENA) &&
 959                        (rxcfg & TXGBE_RXCFG_VLAN);
 960                rxcfg &= ~TXGBE_RXCFG_VLAN;
 961        }
 962        rxcfg &= ~TXGBE_RXCFG_ENA;
 963
 964        if (restart) {
 965                /* set vlan strip for ring */
 966                txgbe_dev_rx_queue_stop(dev, queue);
 967                wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
 968                wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
 969                wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
 970                txgbe_dev_rx_queue_start(dev, queue);
 971        }
 972}
 973
 974static int
 975txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
 976                    enum rte_vlan_type vlan_type,
 977                    uint16_t tpid)
 978{
 979        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 980        int ret = 0;
 981        uint32_t portctrl, vlan_ext, qinq;
 982
 983        portctrl = rd32(hw, TXGBE_PORTCTL);
 984
 985        vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
 986        qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
 987        switch (vlan_type) {
 988        case ETH_VLAN_TYPE_INNER:
 989                if (vlan_ext) {
 990                        wr32m(hw, TXGBE_VLANCTL,
 991                                TXGBE_VLANCTL_TPID_MASK,
 992                                TXGBE_VLANCTL_TPID(tpid));
 993                        wr32m(hw, TXGBE_DMATXCTRL,
 994                                TXGBE_DMATXCTRL_TPID_MASK,
 995                                TXGBE_DMATXCTRL_TPID(tpid));
 996                } else {
 997                        ret = -ENOTSUP;
 998                        PMD_DRV_LOG(ERR, "Inner type is not supported"
 999                                    " by single VLAN");
1000                }
1001
1002                if (qinq) {
1003                        wr32m(hw, TXGBE_TAGTPID(0),
1004                                TXGBE_TAGTPID_LSB_MASK,
1005                                TXGBE_TAGTPID_LSB(tpid));
1006                }
1007                break;
1008        case ETH_VLAN_TYPE_OUTER:
1009                if (vlan_ext) {
1010                        /* Only the high 16-bits is valid */
1011                        wr32m(hw, TXGBE_EXTAG,
1012                                TXGBE_EXTAG_VLAN_MASK,
1013                                TXGBE_EXTAG_VLAN(tpid));
1014                } else {
1015                        wr32m(hw, TXGBE_VLANCTL,
1016                                TXGBE_VLANCTL_TPID_MASK,
1017                                TXGBE_VLANCTL_TPID(tpid));
1018                        wr32m(hw, TXGBE_DMATXCTRL,
1019                                TXGBE_DMATXCTRL_TPID_MASK,
1020                                TXGBE_DMATXCTRL_TPID(tpid));
1021                }
1022
1023                if (qinq) {
1024                        wr32m(hw, TXGBE_TAGTPID(0),
1025                                TXGBE_TAGTPID_MSB_MASK,
1026                                TXGBE_TAGTPID_MSB(tpid));
1027                }
1028                break;
1029        default:
1030                PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1031                return -EINVAL;
1032        }
1033
1034        return ret;
1035}
1036
1037void
1038txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1039{
1040        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1041        uint32_t vlnctrl;
1042
1043        PMD_INIT_FUNC_TRACE();
1044
1045        /* Filter Table Disable */
1046        vlnctrl = rd32(hw, TXGBE_VLANCTL);
1047        vlnctrl &= ~TXGBE_VLANCTL_VFE;
1048        wr32(hw, TXGBE_VLANCTL, vlnctrl);
1049}
1050
1051void
1052txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1053{
1054        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1055        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1056        uint32_t vlnctrl;
1057        uint16_t i;
1058
1059        PMD_INIT_FUNC_TRACE();
1060
1061        /* Filter Table Enable */
1062        vlnctrl = rd32(hw, TXGBE_VLANCTL);
1063        vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1064        vlnctrl |= TXGBE_VLANCTL_VFE;
1065        wr32(hw, TXGBE_VLANCTL, vlnctrl);
1066
1067        /* write whatever is in local vfta copy */
1068        for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1069                wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1070}
1071
1072void
1073txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1074{
1075        struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1076        struct txgbe_rx_queue *rxq;
1077
1078        if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1079                return;
1080
1081        if (on)
1082                TXGBE_SET_HWSTRIP(hwstrip, queue);
1083        else
1084                TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1085
1086        if (queue >= dev->data->nb_rx_queues)
1087                return;
1088
1089        rxq = dev->data->rx_queues[queue];
1090
1091        if (on) {
1092                rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1093                rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1094        } else {
1095                rxq->vlan_flags = PKT_RX_VLAN;
1096                rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1097        }
1098}
1099
1100static void
1101txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1102{
1103        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1104        uint32_t ctrl;
1105
1106        PMD_INIT_FUNC_TRACE();
1107
1108        ctrl = rd32(hw, TXGBE_RXCFG(queue));
1109        ctrl &= ~TXGBE_RXCFG_VLAN;
1110        wr32(hw, TXGBE_RXCFG(queue), ctrl);
1111
1112        /* record those setting for HW strip per queue */
1113        txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1114}
1115
1116static void
1117txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1118{
1119        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1120        uint32_t ctrl;
1121
1122        PMD_INIT_FUNC_TRACE();
1123
1124        ctrl = rd32(hw, TXGBE_RXCFG(queue));
1125        ctrl |= TXGBE_RXCFG_VLAN;
1126        wr32(hw, TXGBE_RXCFG(queue), ctrl);
1127
1128        /* record those setting for HW strip per queue */
1129        txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1130}
1131
1132static void
1133txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1134{
1135        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1136        uint32_t ctrl;
1137
1138        PMD_INIT_FUNC_TRACE();
1139
1140        ctrl = rd32(hw, TXGBE_PORTCTL);
1141        ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1142        ctrl &= ~TXGBE_PORTCTL_QINQ;
1143        wr32(hw, TXGBE_PORTCTL, ctrl);
1144}
1145
1146static void
1147txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1148{
1149        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1150        struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1151        struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1152        uint32_t ctrl;
1153
1154        PMD_INIT_FUNC_TRACE();
1155
1156        ctrl  = rd32(hw, TXGBE_PORTCTL);
1157        ctrl |= TXGBE_PORTCTL_VLANEXT;
1158        if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1159            txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1160                ctrl |= TXGBE_PORTCTL_QINQ;
1161        wr32(hw, TXGBE_PORTCTL, ctrl);
1162}
1163
1164void
1165txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1166{
1167        struct txgbe_rx_queue *rxq;
1168        uint16_t i;
1169
1170        PMD_INIT_FUNC_TRACE();
1171
1172        for (i = 0; i < dev->data->nb_rx_queues; i++) {
1173                rxq = dev->data->rx_queues[i];
1174
1175                if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1176                        txgbe_vlan_strip_queue_set(dev, i, 1);
1177                else
1178                        txgbe_vlan_strip_queue_set(dev, i, 0);
1179        }
1180}
1181
1182void
1183txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1184{
1185        uint16_t i;
1186        struct rte_eth_rxmode *rxmode;
1187        struct txgbe_rx_queue *rxq;
1188
1189        if (mask & ETH_VLAN_STRIP_MASK) {
1190                rxmode = &dev->data->dev_conf.rxmode;
1191                if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1192                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
1193                                rxq = dev->data->rx_queues[i];
1194                                rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1195                        }
1196                else
1197                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
1198                                rxq = dev->data->rx_queues[i];
1199                                rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1200                        }
1201        }
1202}
1203
1204static int
1205txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1206{
1207        struct rte_eth_rxmode *rxmode;
1208        rxmode = &dev->data->dev_conf.rxmode;
1209
1210        if (mask & ETH_VLAN_STRIP_MASK)
1211                txgbe_vlan_hw_strip_config(dev);
1212
1213        if (mask & ETH_VLAN_FILTER_MASK) {
1214                if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1215                        txgbe_vlan_hw_filter_enable(dev);
1216                else
1217                        txgbe_vlan_hw_filter_disable(dev);
1218        }
1219
1220        if (mask & ETH_VLAN_EXTEND_MASK) {
1221                if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1222                        txgbe_vlan_hw_extend_enable(dev);
1223                else
1224                        txgbe_vlan_hw_extend_disable(dev);
1225        }
1226
1227        return 0;
1228}
1229
1230static int
1231txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1232{
1233        txgbe_config_vlan_strip_on_all_queues(dev, mask);
1234
1235        txgbe_vlan_offload_config(dev, mask);
1236
1237        return 0;
1238}
1239
1240static void
1241txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1242{
1243        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1244        /* VLNCTL: enable vlan filtering and allow all vlan tags through */
1245        uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1246
1247        vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1248        wr32(hw, TXGBE_VLANCTL, vlanctrl);
1249}
1250
1251static int
1252txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1253{
1254        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1255
1256        switch (nb_rx_q) {
1257        case 1:
1258        case 2:
1259                RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1260                break;
1261        case 4:
1262                RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1263                break;
1264        default:
1265                return -EINVAL;
1266        }
1267
1268        RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1269                TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1270        RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1271                pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1272        return 0;
1273}
1274
1275static int
1276txgbe_check_mq_mode(struct rte_eth_dev *dev)
1277{
1278        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1279        uint16_t nb_rx_q = dev->data->nb_rx_queues;
1280        uint16_t nb_tx_q = dev->data->nb_tx_queues;
1281
1282        if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1283                /* check multi-queue mode */
1284                switch (dev_conf->rxmode.mq_mode) {
1285                case ETH_MQ_RX_VMDQ_DCB:
1286                        PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1287                        break;
1288                case ETH_MQ_RX_VMDQ_DCB_RSS:
1289                        /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1290                        PMD_INIT_LOG(ERR, "SRIOV active,"
1291                                        " unsupported mq_mode rx %d.",
1292                                        dev_conf->rxmode.mq_mode);
1293                        return -EINVAL;
1294                case ETH_MQ_RX_RSS:
1295                case ETH_MQ_RX_VMDQ_RSS:
1296                        dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1297                        if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1298                                if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1299                                        PMD_INIT_LOG(ERR, "SRIOV is active,"
1300                                                " invalid queue number"
1301                                                " for VMDQ RSS, allowed"
1302                                                " value are 1, 2 or 4.");
1303                                        return -EINVAL;
1304                                }
1305                        break;
1306                case ETH_MQ_RX_VMDQ_ONLY:
1307                case ETH_MQ_RX_NONE:
1308                        /* if nothing mq mode configure, use default scheme */
1309                        dev->data->dev_conf.rxmode.mq_mode =
1310                                ETH_MQ_RX_VMDQ_ONLY;
1311                        break;
1312                default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1313                        /* SRIOV only works in VMDq enable mode */
1314                        PMD_INIT_LOG(ERR, "SRIOV is active,"
1315                                        " wrong mq_mode rx %d.",
1316                                        dev_conf->rxmode.mq_mode);
1317                        return -EINVAL;
1318                }
1319
1320                switch (dev_conf->txmode.mq_mode) {
1321                case ETH_MQ_TX_VMDQ_DCB:
1322                        PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1323                        dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1324                        break;
1325                default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1326                        dev->data->dev_conf.txmode.mq_mode =
1327                                ETH_MQ_TX_VMDQ_ONLY;
1328                        break;
1329                }
1330
1331                /* check valid queue number */
1332                if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1333                    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1334                        PMD_INIT_LOG(ERR, "SRIOV is active,"
1335                                        " nb_rx_q=%d nb_tx_q=%d queue number"
1336                                        " must be less than or equal to %d.",
1337                                        nb_rx_q, nb_tx_q,
1338                                        RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1339                        return -EINVAL;
1340                }
1341        } else {
1342                if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1343                        PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1344                                          " not supported.");
1345                        return -EINVAL;
1346                }
1347                /* check configuration for vmdb+dcb mode */
1348                if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1349                        const struct rte_eth_vmdq_dcb_conf *conf;
1350
1351                        if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1352                                PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1353                                                TXGBE_VMDQ_DCB_NB_QUEUES);
1354                                return -EINVAL;
1355                        }
1356                        conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1357                        if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1358                               conf->nb_queue_pools == ETH_32_POOLS)) {
1359                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1360                                                " nb_queue_pools must be %d or %d.",
1361                                                ETH_16_POOLS, ETH_32_POOLS);
1362                                return -EINVAL;
1363                        }
1364                }
1365                if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1366                        const struct rte_eth_vmdq_dcb_tx_conf *conf;
1367
1368                        if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1369                                PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1370                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
1371                                return -EINVAL;
1372                        }
1373                        conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1374                        if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1375                               conf->nb_queue_pools == ETH_32_POOLS)) {
1376                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1377                                                " nb_queue_pools != %d and"
1378                                                " nb_queue_pools != %d.",
1379                                                ETH_16_POOLS, ETH_32_POOLS);
1380                                return -EINVAL;
1381                        }
1382                }
1383
1384                /* For DCB mode check our configuration before we go further */
1385                if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1386                        const struct rte_eth_dcb_rx_conf *conf;
1387
1388                        conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1389                        if (!(conf->nb_tcs == ETH_4_TCS ||
1390                               conf->nb_tcs == ETH_8_TCS)) {
1391                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1392                                                " and nb_tcs != %d.",
1393                                                ETH_4_TCS, ETH_8_TCS);
1394                                return -EINVAL;
1395                        }
1396                }
1397
1398                if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1399                        const struct rte_eth_dcb_tx_conf *conf;
1400
1401                        conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1402                        if (!(conf->nb_tcs == ETH_4_TCS ||
1403                               conf->nb_tcs == ETH_8_TCS)) {
1404                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1405                                                " and nb_tcs != %d.",
1406                                                ETH_4_TCS, ETH_8_TCS);
1407                                return -EINVAL;
1408                        }
1409                }
1410        }
1411        return 0;
1412}
1413
1414static int
1415txgbe_dev_configure(struct rte_eth_dev *dev)
1416{
1417        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1418        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1419        int ret;
1420
1421        PMD_INIT_FUNC_TRACE();
1422
1423        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1424                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1425
1426        /* multiple queue mode checking */
1427        ret  = txgbe_check_mq_mode(dev);
1428        if (ret != 0) {
1429                PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1430                            ret);
1431                return ret;
1432        }
1433
1434        /* set flag to update link status after init */
1435        intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1436
1437        /*
1438         * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1439         * allocation Rx preconditions we will reset it.
1440         */
1441        adapter->rx_bulk_alloc_allowed = true;
1442
1443        return 0;
1444}
1445
1446static void
1447txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1448{
1449        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1450        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1451        uint32_t gpie;
1452
1453        gpie = rd32(hw, TXGBE_GPIOINTEN);
1454        gpie |= TXGBE_GPIOBIT_6;
1455        wr32(hw, TXGBE_GPIOINTEN, gpie);
1456        intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1457}
1458
1459int
1460txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1461                        uint16_t tx_rate, uint64_t q_msk)
1462{
1463        struct txgbe_hw *hw;
1464        struct txgbe_vf_info *vfinfo;
1465        struct rte_eth_link link;
1466        uint8_t  nb_q_per_pool;
1467        uint32_t queue_stride;
1468        uint32_t queue_idx, idx = 0, vf_idx;
1469        uint32_t queue_end;
1470        uint16_t total_rate = 0;
1471        struct rte_pci_device *pci_dev;
1472        int ret;
1473
1474        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1475        ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1476        if (ret < 0)
1477                return ret;
1478
1479        if (vf >= pci_dev->max_vfs)
1480                return -EINVAL;
1481
1482        if (tx_rate > link.link_speed)
1483                return -EINVAL;
1484
1485        if (q_msk == 0)
1486                return 0;
1487
1488        hw = TXGBE_DEV_HW(dev);
1489        vfinfo = *(TXGBE_DEV_VFDATA(dev));
1490        nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1491        queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1492        queue_idx = vf * queue_stride;
1493        queue_end = queue_idx + nb_q_per_pool - 1;
1494        if (queue_end >= hw->mac.max_tx_queues)
1495                return -EINVAL;
1496
1497        if (vfinfo) {
1498                for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1499                        if (vf_idx == vf)
1500                                continue;
1501                        for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1502                                idx++)
1503                                total_rate += vfinfo[vf_idx].tx_rate[idx];
1504                }
1505        } else {
1506                return -EINVAL;
1507        }
1508
1509        /* Store tx_rate for this vf. */
1510        for (idx = 0; idx < nb_q_per_pool; idx++) {
1511                if (((uint64_t)0x1 << idx) & q_msk) {
1512                        if (vfinfo[vf].tx_rate[idx] != tx_rate)
1513                                vfinfo[vf].tx_rate[idx] = tx_rate;
1514                        total_rate += tx_rate;
1515                }
1516        }
1517
1518        if (total_rate > dev->data->dev_link.link_speed) {
1519                /* Reset stored TX rate of the VF if it causes exceed
1520                 * link speed.
1521                 */
1522                memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1523                return -EINVAL;
1524        }
1525
1526        /* Set ARBTXRATE of each queue/pool for vf X  */
1527        for (; queue_idx <= queue_end; queue_idx++) {
1528                if (0x1 & q_msk)
1529                        txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1530                q_msk = q_msk >> 1;
1531        }
1532
1533        return 0;
1534}
1535
1536/*
1537 * Configure device link speed and setup link.
1538 * It returns 0 on success.
1539 */
1540static int
1541txgbe_dev_start(struct rte_eth_dev *dev)
1542{
1543        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1544        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1545        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1546        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1547        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1548        uint32_t intr_vector = 0;
1549        int err;
1550        bool link_up = false, negotiate = 0;
1551        uint32_t speed = 0;
1552        uint32_t allowed_speeds = 0;
1553        int mask = 0;
1554        int status;
1555        uint16_t vf, idx;
1556        uint32_t *link_speeds;
1557        struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1558
1559        PMD_INIT_FUNC_TRACE();
1560
1561        /* TXGBE devices don't support:
1562         *    - half duplex (checked afterwards for valid speeds)
1563         *    - fixed speed: TODO implement
1564         */
1565        if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1566                PMD_INIT_LOG(ERR,
1567                "Invalid link_speeds for port %u, fix speed not supported",
1568                                dev->data->port_id);
1569                return -EINVAL;
1570        }
1571
1572        /* Stop the link setup handler before resetting the HW. */
1573        rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1574
1575        /* disable uio/vfio intr/eventfd mapping */
1576        rte_intr_disable(intr_handle);
1577
1578        /* stop adapter */
1579        hw->adapter_stopped = 0;
1580        txgbe_stop_hw(hw);
1581
1582        /* reinitialize adapter
1583         * this calls reset and start
1584         */
1585        hw->nb_rx_queues = dev->data->nb_rx_queues;
1586        hw->nb_tx_queues = dev->data->nb_tx_queues;
1587        status = txgbe_pf_reset_hw(hw);
1588        if (status != 0)
1589                return -1;
1590        hw->mac.start_hw(hw);
1591        hw->mac.get_link_status = true;
1592
1593        /* configure PF module if SRIOV enabled */
1594        txgbe_pf_host_configure(dev);
1595
1596        txgbe_dev_phy_intr_setup(dev);
1597
1598        /* check and configure queue intr-vector mapping */
1599        if ((rte_intr_cap_multiple(intr_handle) ||
1600             !RTE_ETH_DEV_SRIOV(dev).active) &&
1601            dev->data->dev_conf.intr_conf.rxq != 0) {
1602                intr_vector = dev->data->nb_rx_queues;
1603                if (rte_intr_efd_enable(intr_handle, intr_vector))
1604                        return -1;
1605        }
1606
1607        if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1608                intr_handle->intr_vec =
1609                        rte_zmalloc("intr_vec",
1610                                    dev->data->nb_rx_queues * sizeof(int), 0);
1611                if (intr_handle->intr_vec == NULL) {
1612                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1613                                     " intr_vec", dev->data->nb_rx_queues);
1614                        return -ENOMEM;
1615                }
1616        }
1617
1618        /* confiugre msix for sleep until rx interrupt */
1619        txgbe_configure_msix(dev);
1620
1621        /* initialize transmission unit */
1622        txgbe_dev_tx_init(dev);
1623
1624        /* This can fail when allocating mbufs for descriptor rings */
1625        err = txgbe_dev_rx_init(dev);
1626        if (err) {
1627                PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1628                goto error;
1629        }
1630
1631        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1632                ETH_VLAN_EXTEND_MASK;
1633        err = txgbe_vlan_offload_config(dev, mask);
1634        if (err) {
1635                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1636                goto error;
1637        }
1638
1639        if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1640                /* Enable vlan filtering for VMDq */
1641                txgbe_vmdq_vlan_hw_filter_enable(dev);
1642        }
1643
1644        /* Configure DCB hw */
1645        txgbe_configure_pb(dev);
1646        txgbe_configure_port(dev);
1647        txgbe_configure_dcb(dev);
1648
1649        if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1650                err = txgbe_fdir_configure(dev);
1651                if (err)
1652                        goto error;
1653        }
1654
1655        /* Restore vf rate limit */
1656        if (vfinfo != NULL) {
1657                for (vf = 0; vf < pci_dev->max_vfs; vf++)
1658                        for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1659                                if (vfinfo[vf].tx_rate[idx] != 0)
1660                                        txgbe_set_vf_rate_limit(dev, vf,
1661                                                vfinfo[vf].tx_rate[idx],
1662                                                1 << idx);
1663        }
1664
1665        err = txgbe_dev_rxtx_start(dev);
1666        if (err < 0) {
1667                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1668                goto error;
1669        }
1670
1671        /* Skip link setup if loopback mode is enabled. */
1672        if (hw->mac.type == txgbe_mac_raptor &&
1673            dev->data->dev_conf.lpbk_mode)
1674                goto skip_link_setup;
1675
1676        if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1677                err = hw->mac.setup_sfp(hw);
1678                if (err)
1679                        goto error;
1680        }
1681
1682        if (hw->phy.media_type == txgbe_media_type_copper) {
1683                /* Turn on the copper */
1684                hw->phy.set_phy_power(hw, true);
1685        } else {
1686                /* Turn on the laser */
1687                hw->mac.enable_tx_laser(hw);
1688        }
1689
1690        err = hw->mac.check_link(hw, &speed, &link_up, 0);
1691        if (err)
1692                goto error;
1693        dev->data->dev_link.link_status = link_up;
1694
1695        err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1696        if (err)
1697                goto error;
1698
1699        allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1700                        ETH_LINK_SPEED_10G;
1701
1702        link_speeds = &dev->data->dev_conf.link_speeds;
1703        if (*link_speeds & ~allowed_speeds) {
1704                PMD_INIT_LOG(ERR, "Invalid link setting");
1705                goto error;
1706        }
1707
1708        speed = 0x0;
1709        if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1710                speed = (TXGBE_LINK_SPEED_100M_FULL |
1711                         TXGBE_LINK_SPEED_1GB_FULL |
1712                         TXGBE_LINK_SPEED_10GB_FULL);
1713        } else {
1714                if (*link_speeds & ETH_LINK_SPEED_10G)
1715                        speed |= TXGBE_LINK_SPEED_10GB_FULL;
1716                if (*link_speeds & ETH_LINK_SPEED_5G)
1717                        speed |= TXGBE_LINK_SPEED_5GB_FULL;
1718                if (*link_speeds & ETH_LINK_SPEED_2_5G)
1719                        speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1720                if (*link_speeds & ETH_LINK_SPEED_1G)
1721                        speed |= TXGBE_LINK_SPEED_1GB_FULL;
1722                if (*link_speeds & ETH_LINK_SPEED_100M)
1723                        speed |= TXGBE_LINK_SPEED_100M_FULL;
1724        }
1725
1726        err = hw->mac.setup_link(hw, speed, link_up);
1727        if (err)
1728                goto error;
1729
1730skip_link_setup:
1731
1732        if (rte_intr_allow_others(intr_handle)) {
1733                /* check if lsc interrupt is enabled */
1734                if (dev->data->dev_conf.intr_conf.lsc != 0)
1735                        txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1736                else
1737                        txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1738                txgbe_dev_macsec_interrupt_setup(dev);
1739                txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1740        } else {
1741                rte_intr_callback_unregister(intr_handle,
1742                                             txgbe_dev_interrupt_handler, dev);
1743                if (dev->data->dev_conf.intr_conf.lsc != 0)
1744                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
1745                                     " no intr multiplex");
1746        }
1747
1748        /* check if rxq interrupt is enabled */
1749        if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1750            rte_intr_dp_is_en(intr_handle))
1751                txgbe_dev_rxq_interrupt_setup(dev);
1752
1753        /* enable uio/vfio intr/eventfd mapping */
1754        rte_intr_enable(intr_handle);
1755
1756        /* resume enabled intr since hw reset */
1757        txgbe_enable_intr(dev);
1758        txgbe_l2_tunnel_conf(dev);
1759        txgbe_filter_restore(dev);
1760
1761        if (tm_conf->root && !tm_conf->committed)
1762                PMD_DRV_LOG(WARNING,
1763                            "please call hierarchy_commit() "
1764                            "before starting the port");
1765
1766        /*
1767         * Update link status right before return, because it may
1768         * start link configuration process in a separate thread.
1769         */
1770        txgbe_dev_link_update(dev, 0);
1771
1772        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1773
1774        txgbe_read_stats_registers(hw, hw_stats);
1775        hw->offset_loaded = 1;
1776
1777        return 0;
1778
1779error:
1780        PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1781        txgbe_dev_clear_queues(dev);
1782        return -EIO;
1783}
1784
1785/*
1786 * Stop device: disable rx and tx functions to allow for reconfiguring.
1787 */
1788static int
1789txgbe_dev_stop(struct rte_eth_dev *dev)
1790{
1791        struct rte_eth_link link;
1792        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1793        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1794        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1795        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1796        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1797        int vf;
1798        struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1799
1800        if (hw->adapter_stopped)
1801                return 0;
1802
1803        PMD_INIT_FUNC_TRACE();
1804
1805        rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1806
1807        /* disable interrupts */
1808        txgbe_disable_intr(hw);
1809
1810        /* reset the NIC */
1811        txgbe_pf_reset_hw(hw);
1812        hw->adapter_stopped = 0;
1813
1814        /* stop adapter */
1815        txgbe_stop_hw(hw);
1816
1817        for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1818                vfinfo[vf].clear_to_send = false;
1819
1820        if (hw->phy.media_type == txgbe_media_type_copper) {
1821                /* Turn off the copper */
1822                hw->phy.set_phy_power(hw, false);
1823        } else {
1824                /* Turn off the laser */
1825                hw->mac.disable_tx_laser(hw);
1826        }
1827
1828        txgbe_dev_clear_queues(dev);
1829
1830        /* Clear stored conf */
1831        dev->data->scattered_rx = 0;
1832        dev->data->lro = 0;
1833
1834        /* Clear recorded link status */
1835        memset(&link, 0, sizeof(link));
1836        rte_eth_linkstatus_set(dev, &link);
1837
1838        if (!rte_intr_allow_others(intr_handle))
1839                /* resume to the default handler */
1840                rte_intr_callback_register(intr_handle,
1841                                           txgbe_dev_interrupt_handler,
1842                                           (void *)dev);
1843
1844        /* Clean datapath event and queue/vec mapping */
1845        rte_intr_efd_disable(intr_handle);
1846        if (intr_handle->intr_vec != NULL) {
1847                rte_free(intr_handle->intr_vec);
1848                intr_handle->intr_vec = NULL;
1849        }
1850
1851        /* reset hierarchy commit */
1852        tm_conf->committed = false;
1853
1854        adapter->rss_reta_updated = 0;
1855        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1856
1857        hw->adapter_stopped = true;
1858        dev->data->dev_started = 0;
1859
1860        return 0;
1861}
1862
1863/*
1864 * Set device link up: enable tx.
1865 */
1866static int
1867txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1868{
1869        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1870
1871        if (hw->phy.media_type == txgbe_media_type_copper) {
1872                /* Turn on the copper */
1873                hw->phy.set_phy_power(hw, true);
1874        } else {
1875                /* Turn on the laser */
1876                hw->mac.enable_tx_laser(hw);
1877                txgbe_dev_link_update(dev, 0);
1878        }
1879
1880        return 0;
1881}
1882
1883/*
1884 * Set device link down: disable tx.
1885 */
1886static int
1887txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1888{
1889        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1890
1891        if (hw->phy.media_type == txgbe_media_type_copper) {
1892                /* Turn off the copper */
1893                hw->phy.set_phy_power(hw, false);
1894        } else {
1895                /* Turn off the laser */
1896                hw->mac.disable_tx_laser(hw);
1897                txgbe_dev_link_update(dev, 0);
1898        }
1899
1900        return 0;
1901}
1902
1903/*
1904 * Reset and stop device.
1905 */
1906static int
1907txgbe_dev_close(struct rte_eth_dev *dev)
1908{
1909        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1910        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1911        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1912        int retries = 0;
1913        int ret;
1914
1915        PMD_INIT_FUNC_TRACE();
1916
1917        txgbe_pf_reset_hw(hw);
1918
1919        ret = txgbe_dev_stop(dev);
1920
1921        txgbe_dev_free_queues(dev);
1922
1923        /* reprogram the RAR[0] in case user changed it. */
1924        txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1925
1926        /* Unlock any pending hardware semaphore */
1927        txgbe_swfw_lock_reset(hw);
1928
1929        /* disable uio intr before callback unregister */
1930        rte_intr_disable(intr_handle);
1931
1932        do {
1933                ret = rte_intr_callback_unregister(intr_handle,
1934                                txgbe_dev_interrupt_handler, dev);
1935                if (ret >= 0 || ret == -ENOENT) {
1936                        break;
1937                } else if (ret != -EAGAIN) {
1938                        PMD_INIT_LOG(ERR,
1939                                "intr callback unregister failed: %d",
1940                                ret);
1941                }
1942                rte_delay_ms(100);
1943        } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1944
1945        /* cancel the delay handler before remove dev */
1946        rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1947
1948        /* uninitialize PF if max_vfs not zero */
1949        txgbe_pf_host_uninit(dev);
1950
1951        rte_free(dev->data->mac_addrs);
1952        dev->data->mac_addrs = NULL;
1953
1954        rte_free(dev->data->hash_mac_addrs);
1955        dev->data->hash_mac_addrs = NULL;
1956
1957        /* remove all the fdir filters & hash */
1958        txgbe_fdir_filter_uninit(dev);
1959
1960        /* remove all the L2 tunnel filters & hash */
1961        txgbe_l2_tn_filter_uninit(dev);
1962
1963        /* Remove all ntuple filters of the device */
1964        txgbe_ntuple_filter_uninit(dev);
1965
1966        /* clear all the filters list */
1967        txgbe_filterlist_flush();
1968
1969        /* Remove all Traffic Manager configuration */
1970        txgbe_tm_conf_uninit(dev);
1971
1972#ifdef RTE_LIB_SECURITY
1973        rte_free(dev->security_ctx);
1974#endif
1975
1976        return ret;
1977}
1978
1979/*
1980 * Reset PF device.
1981 */
1982static int
1983txgbe_dev_reset(struct rte_eth_dev *dev)
1984{
1985        int ret;
1986
1987        /* When a DPDK PMD PF begin to reset PF port, it should notify all
1988         * its VF to make them align with it. The detailed notification
1989         * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1990         * To avoid unexpected behavior in VF, currently reset of PF with
1991         * SR-IOV activation is not supported. It might be supported later.
1992         */
1993        if (dev->data->sriov.active)
1994                return -ENOTSUP;
1995
1996        ret = eth_txgbe_dev_uninit(dev);
1997        if (ret)
1998                return ret;
1999
2000        ret = eth_txgbe_dev_init(dev, NULL);
2001
2002        return ret;
2003}
2004
2005#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
2006        {                                                       \
2007                uint32_t current_counter = rd32(hw, reg);       \
2008                if (current_counter < last_counter)             \
2009                        current_counter += 0x100000000LL;       \
2010                if (!hw->offset_loaded)                         \
2011                        last_counter = current_counter;         \
2012                counter = current_counter - last_counter;       \
2013                counter &= 0xFFFFFFFFLL;                        \
2014        }
2015
2016#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2017        {                                                                \
2018                uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
2019                uint64_t current_counter_msb = rd32(hw, reg_msb);        \
2020                uint64_t current_counter = (current_counter_msb << 32) | \
2021                        current_counter_lsb;                             \
2022                if (current_counter < last_counter)                      \
2023                        current_counter += 0x1000000000LL;               \
2024                if (!hw->offset_loaded)                                  \
2025                        last_counter = current_counter;                  \
2026                counter = current_counter - last_counter;                \
2027                counter &= 0xFFFFFFFFFLL;                                \
2028        }
2029
2030void
2031txgbe_read_stats_registers(struct txgbe_hw *hw,
2032                           struct txgbe_hw_stats *hw_stats)
2033{
2034        unsigned int i;
2035
2036        /* QP Stats */
2037        for (i = 0; i < hw->nb_rx_queues; i++) {
2038                UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2039                        hw->qp_last[i].rx_qp_packets,
2040                        hw_stats->qp[i].rx_qp_packets);
2041                UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2042                        hw->qp_last[i].rx_qp_bytes,
2043                        hw_stats->qp[i].rx_qp_bytes);
2044                UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2045                        hw->qp_last[i].rx_qp_mc_packets,
2046                        hw_stats->qp[i].rx_qp_mc_packets);
2047        }
2048
2049        for (i = 0; i < hw->nb_tx_queues; i++) {
2050                UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2051                        hw->qp_last[i].tx_qp_packets,
2052                        hw_stats->qp[i].tx_qp_packets);
2053                UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2054                        hw->qp_last[i].tx_qp_bytes,
2055                        hw_stats->qp[i].tx_qp_bytes);
2056        }
2057        /* PB Stats */
2058        for (i = 0; i < TXGBE_MAX_UP; i++) {
2059                hw_stats->up[i].rx_up_xon_packets +=
2060                                rd32(hw, TXGBE_PBRXUPXON(i));
2061                hw_stats->up[i].rx_up_xoff_packets +=
2062                                rd32(hw, TXGBE_PBRXUPXOFF(i));
2063                hw_stats->up[i].tx_up_xon_packets +=
2064                                rd32(hw, TXGBE_PBTXUPXON(i));
2065                hw_stats->up[i].tx_up_xoff_packets +=
2066                                rd32(hw, TXGBE_PBTXUPXOFF(i));
2067                hw_stats->up[i].tx_up_xon2off_packets +=
2068                                rd32(hw, TXGBE_PBTXUPOFF(i));
2069                hw_stats->up[i].rx_up_dropped +=
2070                                rd32(hw, TXGBE_PBRXMISS(i));
2071        }
2072        hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2073        hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2074        hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2075        hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2076
2077        /* DMA Stats */
2078        hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2079        hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2080
2081        hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2082        hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2083        hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2084
2085        /* MAC Stats */
2086        hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2087        hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2088        hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2089
2090        hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2091        hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2092        hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2093
2094        hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2095        hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2096
2097        hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2098        hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2099        hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2100        hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2101        hw_stats->rx_size_512_to_1023_packets +=
2102                        rd64(hw, TXGBE_MACRX512TO1023L);
2103        hw_stats->rx_size_1024_to_max_packets +=
2104                        rd64(hw, TXGBE_MACRX1024TOMAXL);
2105        hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2106        hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2107        hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2108        hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2109        hw_stats->tx_size_512_to_1023_packets +=
2110                        rd64(hw, TXGBE_MACTX512TO1023L);
2111        hw_stats->tx_size_1024_to_max_packets +=
2112                        rd64(hw, TXGBE_MACTX1024TOMAXL);
2113
2114        hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2115        hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2116        hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2117
2118        /* MNG Stats */
2119        hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2120        hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2121        hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2122        hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2123
2124        /* FCoE Stats */
2125        hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2126        hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2127        hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2128        hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2129        hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2130        hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2131        hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2132
2133        /* Flow Director Stats */
2134        hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2135        hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2136        hw_stats->flow_director_added_filters +=
2137                TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2138        hw_stats->flow_director_removed_filters +=
2139                TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2140        hw_stats->flow_director_filter_add_errors +=
2141                TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2142        hw_stats->flow_director_filter_remove_errors +=
2143                TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2144
2145        /* MACsec Stats */
2146        hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2147        hw_stats->tx_macsec_pkts_encrypted +=
2148                        rd32(hw, TXGBE_LSECTX_ENCPKT);
2149        hw_stats->tx_macsec_pkts_protected +=
2150                        rd32(hw, TXGBE_LSECTX_PROTPKT);
2151        hw_stats->tx_macsec_octets_encrypted +=
2152                        rd32(hw, TXGBE_LSECTX_ENCOCT);
2153        hw_stats->tx_macsec_octets_protected +=
2154                        rd32(hw, TXGBE_LSECTX_PROTOCT);
2155        hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2156        hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2157        hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2158        hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2159        hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2160        hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2161        hw_stats->rx_macsec_sc_pkts_unchecked +=
2162                        rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2163        hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2164        hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2165        for (i = 0; i < 2; i++) {
2166                hw_stats->rx_macsec_sa_pkts_ok +=
2167                        rd32(hw, TXGBE_LSECRX_OKPKT(i));
2168                hw_stats->rx_macsec_sa_pkts_invalid +=
2169                        rd32(hw, TXGBE_LSECRX_INVPKT(i));
2170                hw_stats->rx_macsec_sa_pkts_notvalid +=
2171                        rd32(hw, TXGBE_LSECRX_BADPKT(i));
2172        }
2173        hw_stats->rx_macsec_sa_pkts_unusedsa +=
2174                        rd32(hw, TXGBE_LSECRX_INVSAPKT);
2175        hw_stats->rx_macsec_sa_pkts_notusingsa +=
2176                        rd32(hw, TXGBE_LSECRX_BADSAPKT);
2177
2178        hw_stats->rx_total_missed_packets = 0;
2179        for (i = 0; i < TXGBE_MAX_UP; i++) {
2180                hw_stats->rx_total_missed_packets +=
2181                        hw_stats->up[i].rx_up_dropped;
2182        }
2183}
2184
2185static int
2186txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2187{
2188        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2189        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2190        struct txgbe_stat_mappings *stat_mappings =
2191                        TXGBE_DEV_STAT_MAPPINGS(dev);
2192        uint32_t i, j;
2193
2194        txgbe_read_stats_registers(hw, hw_stats);
2195
2196        if (stats == NULL)
2197                return -EINVAL;
2198
2199        /* Fill out the rte_eth_stats statistics structure */
2200        stats->ipackets = hw_stats->rx_packets;
2201        stats->ibytes = hw_stats->rx_bytes;
2202        stats->opackets = hw_stats->tx_packets;
2203        stats->obytes = hw_stats->tx_bytes;
2204
2205        memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2206        memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2207        memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2208        memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2209        memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2210        for (i = 0; i < TXGBE_MAX_QP; i++) {
2211                uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2212                uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2213                uint32_t q_map;
2214
2215                q_map = (stat_mappings->rqsm[n] >> offset)
2216                                & QMAP_FIELD_RESERVED_BITS_MASK;
2217                j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2218                     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2219                stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2220                stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2221
2222                q_map = (stat_mappings->tqsm[n] >> offset)
2223                                & QMAP_FIELD_RESERVED_BITS_MASK;
2224                j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2225                     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2226                stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2227                stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2228        }
2229
2230        /* Rx Errors */
2231        stats->imissed  = hw_stats->rx_total_missed_packets;
2232        stats->ierrors  = hw_stats->rx_crc_errors +
2233                          hw_stats->rx_mac_short_packet_dropped +
2234                          hw_stats->rx_length_errors +
2235                          hw_stats->rx_undersize_errors +
2236                          hw_stats->rx_oversize_errors +
2237                          hw_stats->rx_drop_packets +
2238                          hw_stats->rx_illegal_byte_errors +
2239                          hw_stats->rx_error_bytes +
2240                          hw_stats->rx_fragment_errors +
2241                          hw_stats->rx_fcoe_crc_errors +
2242                          hw_stats->rx_fcoe_mbuf_allocation_errors;
2243
2244        /* Tx Errors */
2245        stats->oerrors  = 0;
2246        return 0;
2247}
2248
2249static int
2250txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2251{
2252        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2253        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2254
2255        /* HW registers are cleared on read */
2256        hw->offset_loaded = 0;
2257        txgbe_dev_stats_get(dev, NULL);
2258        hw->offset_loaded = 1;
2259
2260        /* Reset software totals */
2261        memset(hw_stats, 0, sizeof(*hw_stats));
2262
2263        return 0;
2264}
2265
2266/* This function calculates the number of xstats based on the current config */
2267static unsigned
2268txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2269{
2270        int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2271        return TXGBE_NB_HW_STATS +
2272               TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2273               TXGBE_NB_QP_STATS * nb_queues;
2274}
2275
2276static inline int
2277txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2278{
2279        int nb, st;
2280
2281        /* Extended stats from txgbe_hw_stats */
2282        if (id < TXGBE_NB_HW_STATS) {
2283                snprintf(name, size, "[hw]%s",
2284                        rte_txgbe_stats_strings[id].name);
2285                return 0;
2286        }
2287        id -= TXGBE_NB_HW_STATS;
2288
2289        /* Priority Stats */
2290        if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2291                nb = id / TXGBE_NB_UP_STATS;
2292                st = id % TXGBE_NB_UP_STATS;
2293                snprintf(name, size, "[p%u]%s", nb,
2294                        rte_txgbe_up_strings[st].name);
2295                return 0;
2296        }
2297        id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2298
2299        /* Queue Stats */
2300        if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2301                nb = id / TXGBE_NB_QP_STATS;
2302                st = id % TXGBE_NB_QP_STATS;
2303                snprintf(name, size, "[q%u]%s", nb,
2304                        rte_txgbe_qp_strings[st].name);
2305                return 0;
2306        }
2307        id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2308
2309        return -(int)(id + 1);
2310}
2311
2312static inline int
2313txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2314{
2315        int nb, st;
2316
2317        /* Extended stats from txgbe_hw_stats */
2318        if (id < TXGBE_NB_HW_STATS) {
2319                *offset = rte_txgbe_stats_strings[id].offset;
2320                return 0;
2321        }
2322        id -= TXGBE_NB_HW_STATS;
2323
2324        /* Priority Stats */
2325        if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2326                nb = id / TXGBE_NB_UP_STATS;
2327                st = id % TXGBE_NB_UP_STATS;
2328                *offset = rte_txgbe_up_strings[st].offset +
2329                        nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2330                return 0;
2331        }
2332        id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2333
2334        /* Queue Stats */
2335        if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2336                nb = id / TXGBE_NB_QP_STATS;
2337                st = id % TXGBE_NB_QP_STATS;
2338                *offset = rte_txgbe_qp_strings[st].offset +
2339                        nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2340                return 0;
2341        }
2342
2343        return -1;
2344}
2345
2346static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2347        struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2348{
2349        unsigned int i, count;
2350
2351        count = txgbe_xstats_calc_num(dev);
2352        if (xstats_names == NULL)
2353                return count;
2354
2355        /* Note: limit >= cnt_stats checked upstream
2356         * in rte_eth_xstats_names()
2357         */
2358        limit = min(limit, count);
2359
2360        /* Extended stats from txgbe_hw_stats */
2361        for (i = 0; i < limit; i++) {
2362                if (txgbe_get_name_by_id(i, xstats_names[i].name,
2363                        sizeof(xstats_names[i].name))) {
2364                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2365                        break;
2366                }
2367        }
2368
2369        return i;
2370}
2371
2372static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2373        struct rte_eth_xstat_name *xstats_names,
2374        const uint64_t *ids,
2375        unsigned int limit)
2376{
2377        unsigned int i;
2378
2379        if (ids == NULL)
2380                return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2381
2382        for (i = 0; i < limit; i++) {
2383                if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2384                                sizeof(xstats_names[i].name))) {
2385                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2386                        return -1;
2387                }
2388        }
2389
2390        return i;
2391}
2392
2393static int
2394txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2395                                         unsigned int limit)
2396{
2397        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2398        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2399        unsigned int i, count;
2400
2401        txgbe_read_stats_registers(hw, hw_stats);
2402
2403        /* If this is a reset xstats is NULL, and we have cleared the
2404         * registers by reading them.
2405         */
2406        count = txgbe_xstats_calc_num(dev);
2407        if (xstats == NULL)
2408                return count;
2409
2410        limit = min(limit, txgbe_xstats_calc_num(dev));
2411
2412        /* Extended stats from txgbe_hw_stats */
2413        for (i = 0; i < limit; i++) {
2414                uint32_t offset = 0;
2415
2416                if (txgbe_get_offset_by_id(i, &offset)) {
2417                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2418                        break;
2419                }
2420                xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2421                xstats[i].id = i;
2422        }
2423
2424        return i;
2425}
2426
2427static int
2428txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2429                                         unsigned int limit)
2430{
2431        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2432        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2433        unsigned int i, count;
2434
2435        txgbe_read_stats_registers(hw, hw_stats);
2436
2437        /* If this is a reset xstats is NULL, and we have cleared the
2438         * registers by reading them.
2439         */
2440        count = txgbe_xstats_calc_num(dev);
2441        if (values == NULL)
2442                return count;
2443
2444        limit = min(limit, txgbe_xstats_calc_num(dev));
2445
2446        /* Extended stats from txgbe_hw_stats */
2447        for (i = 0; i < limit; i++) {
2448                uint32_t offset;
2449
2450                if (txgbe_get_offset_by_id(i, &offset)) {
2451                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2452                        break;
2453                }
2454                values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2455        }
2456
2457        return i;
2458}
2459
2460static int
2461txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2462                uint64_t *values, unsigned int limit)
2463{
2464        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2465        unsigned int i;
2466
2467        if (ids == NULL)
2468                return txgbe_dev_xstats_get_(dev, values, limit);
2469
2470        for (i = 0; i < limit; i++) {
2471                uint32_t offset;
2472
2473                if (txgbe_get_offset_by_id(ids[i], &offset)) {
2474                        PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2475                        break;
2476                }
2477                values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2478        }
2479
2480        return i;
2481}
2482
2483static int
2484txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2485{
2486        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2487        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2488
2489        /* HW registers are cleared on read */
2490        hw->offset_loaded = 0;
2491        txgbe_read_stats_registers(hw, hw_stats);
2492        hw->offset_loaded = 1;
2493
2494        /* Reset software totals */
2495        memset(hw_stats, 0, sizeof(*hw_stats));
2496
2497        return 0;
2498}
2499
2500static int
2501txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2502{
2503        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2504        u16 eeprom_verh, eeprom_verl;
2505        u32 etrack_id;
2506        int ret;
2507
2508        hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2509        hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2510
2511        etrack_id = (eeprom_verh << 16) | eeprom_verl;
2512        ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2513
2514        ret += 1; /* add the size of '\0' */
2515        if (fw_size < (u32)ret)
2516                return ret;
2517        else
2518                return 0;
2519}
2520
2521static int
2522txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2523{
2524        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2525        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2526
2527        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2528        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2529        dev_info->min_rx_bufsize = 1024;
2530        dev_info->max_rx_pktlen = 15872;
2531        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2532        dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2533        dev_info->max_vfs = pci_dev->max_vfs;
2534        dev_info->max_vmdq_pools = ETH_64_POOLS;
2535        dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2536        dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2537        dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2538                                     dev_info->rx_queue_offload_capa);
2539        dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2540        dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2541
2542        dev_info->default_rxconf = (struct rte_eth_rxconf) {
2543                .rx_thresh = {
2544                        .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2545                        .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2546                        .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2547                },
2548                .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2549                .rx_drop_en = 0,
2550                .offloads = 0,
2551        };
2552
2553        dev_info->default_txconf = (struct rte_eth_txconf) {
2554                .tx_thresh = {
2555                        .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2556                        .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2557                        .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2558                },
2559                .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2560                .offloads = 0,
2561        };
2562
2563        dev_info->rx_desc_lim = rx_desc_lim;
2564        dev_info->tx_desc_lim = tx_desc_lim;
2565
2566        dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2567        dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2568        dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2569
2570        dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2571        dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2572
2573        /* Driver-preferred Rx/Tx parameters */
2574        dev_info->default_rxportconf.burst_size = 32;
2575        dev_info->default_txportconf.burst_size = 32;
2576        dev_info->default_rxportconf.nb_queues = 1;
2577        dev_info->default_txportconf.nb_queues = 1;
2578        dev_info->default_rxportconf.ring_size = 256;
2579        dev_info->default_txportconf.ring_size = 256;
2580
2581        return 0;
2582}
2583
2584const uint32_t *
2585txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2586{
2587        if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2588            dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2589            dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2590            dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2591                return txgbe_get_supported_ptypes();
2592
2593        return NULL;
2594}
2595
2596void
2597txgbe_dev_setup_link_alarm_handler(void *param)
2598{
2599        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2600        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2601        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2602        u32 speed;
2603        bool autoneg = false;
2604
2605        speed = hw->phy.autoneg_advertised;
2606        if (!speed)
2607                hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2608
2609        hw->mac.setup_link(hw, speed, true);
2610
2611        intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2612}
2613
2614/* return 0 means link status changed, -1 means not changed */
2615int
2616txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2617                            int wait_to_complete)
2618{
2619        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2620        struct rte_eth_link link;
2621        u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2622        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2623        bool link_up;
2624        int err;
2625        int wait = 1;
2626
2627        memset(&link, 0, sizeof(link));
2628        link.link_status = ETH_LINK_DOWN;
2629        link.link_speed = ETH_SPEED_NUM_NONE;
2630        link.link_duplex = ETH_LINK_HALF_DUPLEX;
2631        link.link_autoneg = ETH_LINK_AUTONEG;
2632
2633        hw->mac.get_link_status = true;
2634
2635        if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2636                return rte_eth_linkstatus_set(dev, &link);
2637
2638        /* check if it needs to wait to complete, if lsc interrupt is enabled */
2639        if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2640                wait = 0;
2641
2642        err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2643
2644        if (err != 0) {
2645                link.link_speed = ETH_SPEED_NUM_100M;
2646                link.link_duplex = ETH_LINK_FULL_DUPLEX;
2647                return rte_eth_linkstatus_set(dev, &link);
2648        }
2649
2650        if (link_up == 0) {
2651                if (hw->phy.media_type == txgbe_media_type_fiber) {
2652                        intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2653                        rte_eal_alarm_set(10,
2654                                txgbe_dev_setup_link_alarm_handler, dev);
2655                }
2656                return rte_eth_linkstatus_set(dev, &link);
2657        }
2658
2659        intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2660        link.link_status = ETH_LINK_UP;
2661        link.link_duplex = ETH_LINK_FULL_DUPLEX;
2662
2663        switch (link_speed) {
2664        default:
2665        case TXGBE_LINK_SPEED_UNKNOWN:
2666                link.link_duplex = ETH_LINK_FULL_DUPLEX;
2667                link.link_speed = ETH_SPEED_NUM_100M;
2668                break;
2669
2670        case TXGBE_LINK_SPEED_100M_FULL:
2671                link.link_speed = ETH_SPEED_NUM_100M;
2672                break;
2673
2674        case TXGBE_LINK_SPEED_1GB_FULL:
2675                link.link_speed = ETH_SPEED_NUM_1G;
2676                break;
2677
2678        case TXGBE_LINK_SPEED_2_5GB_FULL:
2679                link.link_speed = ETH_SPEED_NUM_2_5G;
2680                break;
2681
2682        case TXGBE_LINK_SPEED_5GB_FULL:
2683                link.link_speed = ETH_SPEED_NUM_5G;
2684                break;
2685
2686        case TXGBE_LINK_SPEED_10GB_FULL:
2687                link.link_speed = ETH_SPEED_NUM_10G;
2688                break;
2689        }
2690
2691        return rte_eth_linkstatus_set(dev, &link);
2692}
2693
2694static int
2695txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2696{
2697        return txgbe_dev_link_update_share(dev, wait_to_complete);
2698}
2699
2700static int
2701txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2702{
2703        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2704        uint32_t fctrl;
2705
2706        fctrl = rd32(hw, TXGBE_PSRCTL);
2707        fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2708        wr32(hw, TXGBE_PSRCTL, fctrl);
2709
2710        return 0;
2711}
2712
2713static int
2714txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2715{
2716        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2717        uint32_t fctrl;
2718
2719        fctrl = rd32(hw, TXGBE_PSRCTL);
2720        fctrl &= (~TXGBE_PSRCTL_UCP);
2721        if (dev->data->all_multicast == 1)
2722                fctrl |= TXGBE_PSRCTL_MCP;
2723        else
2724                fctrl &= (~TXGBE_PSRCTL_MCP);
2725        wr32(hw, TXGBE_PSRCTL, fctrl);
2726
2727        return 0;
2728}
2729
2730static int
2731txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2732{
2733        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2734        uint32_t fctrl;
2735
2736        fctrl = rd32(hw, TXGBE_PSRCTL);
2737        fctrl |= TXGBE_PSRCTL_MCP;
2738        wr32(hw, TXGBE_PSRCTL, fctrl);
2739
2740        return 0;
2741}
2742
2743static int
2744txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2745{
2746        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2747        uint32_t fctrl;
2748
2749        if (dev->data->promiscuous == 1)
2750                return 0; /* must remain in all_multicast mode */
2751
2752        fctrl = rd32(hw, TXGBE_PSRCTL);
2753        fctrl &= (~TXGBE_PSRCTL_MCP);
2754        wr32(hw, TXGBE_PSRCTL, fctrl);
2755
2756        return 0;
2757}
2758
2759/**
2760 * It clears the interrupt causes and enables the interrupt.
2761 * It will be called once only during nic initialized.
2762 *
2763 * @param dev
2764 *  Pointer to struct rte_eth_dev.
2765 * @param on
2766 *  Enable or Disable.
2767 *
2768 * @return
2769 *  - On success, zero.
2770 *  - On failure, a negative value.
2771 */
2772static int
2773txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2774{
2775        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2776
2777        txgbe_dev_link_status_print(dev);
2778        if (on)
2779                intr->mask_misc |= TXGBE_ICRMISC_LSC;
2780        else
2781                intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2782
2783        return 0;
2784}
2785
2786/**
2787 * It clears the interrupt causes and enables the interrupt.
2788 * It will be called once only during nic initialized.
2789 *
2790 * @param dev
2791 *  Pointer to struct rte_eth_dev.
2792 *
2793 * @return
2794 *  - On success, zero.
2795 *  - On failure, a negative value.
2796 */
2797static int
2798txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2799{
2800        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2801
2802        intr->mask[0] |= TXGBE_ICR_MASK;
2803        intr->mask[1] |= TXGBE_ICR_MASK;
2804
2805        return 0;
2806}
2807
2808/**
2809 * It clears the interrupt causes and enables the interrupt.
2810 * It will be called once only during nic initialized.
2811 *
2812 * @param dev
2813 *  Pointer to struct rte_eth_dev.
2814 *
2815 * @return
2816 *  - On success, zero.
2817 *  - On failure, a negative value.
2818 */
2819static int
2820txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2821{
2822        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2823
2824        intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2825
2826        return 0;
2827}
2828
2829/*
2830 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2831 *
2832 * @param dev
2833 *  Pointer to struct rte_eth_dev.
2834 *
2835 * @return
2836 *  - On success, zero.
2837 *  - On failure, a negative value.
2838 */
2839static int
2840txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2841{
2842        uint32_t eicr;
2843        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2844        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2845
2846        /* clear all cause mask */
2847        txgbe_disable_intr(hw);
2848
2849        /* read-on-clear nic registers here */
2850        eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2851        PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2852
2853        intr->flags = 0;
2854
2855        /* set flag for async link update */
2856        if (eicr & TXGBE_ICRMISC_LSC)
2857                intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2858
2859        if (eicr & TXGBE_ICRMISC_VFMBX)
2860                intr->flags |= TXGBE_FLAG_MAILBOX;
2861
2862        if (eicr & TXGBE_ICRMISC_LNKSEC)
2863                intr->flags |= TXGBE_FLAG_MACSEC;
2864
2865        if (eicr & TXGBE_ICRMISC_GPIO)
2866                intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2867
2868        return 0;
2869}
2870
2871/**
2872 * It gets and then prints the link status.
2873 *
2874 * @param dev
2875 *  Pointer to struct rte_eth_dev.
2876 *
2877 * @return
2878 *  - On success, zero.
2879 *  - On failure, a negative value.
2880 */
2881static void
2882txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2883{
2884        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2885        struct rte_eth_link link;
2886
2887        rte_eth_linkstatus_get(dev, &link);
2888
2889        if (link.link_status) {
2890                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2891                                        (int)(dev->data->port_id),
2892                                        (unsigned int)link.link_speed,
2893                        link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2894                                        "full-duplex" : "half-duplex");
2895        } else {
2896                PMD_INIT_LOG(INFO, " Port %d: Link Down",
2897                                (int)(dev->data->port_id));
2898        }
2899        PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2900                                pci_dev->addr.domain,
2901                                pci_dev->addr.bus,
2902                                pci_dev->addr.devid,
2903                                pci_dev->addr.function);
2904}
2905
2906/*
2907 * It executes link_update after knowing an interrupt occurred.
2908 *
2909 * @param dev
2910 *  Pointer to struct rte_eth_dev.
2911 *
2912 * @return
2913 *  - On success, zero.
2914 *  - On failure, a negative value.
2915 */
2916static int
2917txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2918                           struct rte_intr_handle *intr_handle)
2919{
2920        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2921        int64_t timeout;
2922        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2923
2924        PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2925
2926        if (intr->flags & TXGBE_FLAG_MAILBOX) {
2927                txgbe_pf_mbx_process(dev);
2928                intr->flags &= ~TXGBE_FLAG_MAILBOX;
2929        }
2930
2931        if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2932                hw->phy.handle_lasi(hw);
2933                intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2934        }
2935
2936        if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2937                struct rte_eth_link link;
2938
2939                /*get the link status before link update, for predicting later*/
2940                rte_eth_linkstatus_get(dev, &link);
2941
2942                txgbe_dev_link_update(dev, 0);
2943
2944                /* likely to up */
2945                if (!link.link_status)
2946                        /* handle it 1 sec later, wait it being stable */
2947                        timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2948                /* likely to down */
2949                else
2950                        /* handle it 4 sec later, wait it being stable */
2951                        timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2952
2953                txgbe_dev_link_status_print(dev);
2954                if (rte_eal_alarm_set(timeout * 1000,
2955                                      txgbe_dev_interrupt_delayed_handler,
2956                                      (void *)dev) < 0) {
2957                        PMD_DRV_LOG(ERR, "Error setting alarm");
2958                } else {
2959                        /* remember original mask */
2960                        intr->mask_misc_orig = intr->mask_misc;
2961                        /* only disable lsc interrupt */
2962                        intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2963                }
2964        }
2965
2966        PMD_DRV_LOG(DEBUG, "enable intr immediately");
2967        txgbe_enable_intr(dev);
2968        rte_intr_enable(intr_handle);
2969
2970        return 0;
2971}
2972
2973/**
2974 * Interrupt handler which shall be registered for alarm callback for delayed
2975 * handling specific interrupt to wait for the stable nic state. As the
2976 * NIC interrupt state is not stable for txgbe after link is just down,
2977 * it needs to wait 4 seconds to get the stable status.
2978 *
2979 * @param handle
2980 *  Pointer to interrupt handle.
2981 * @param param
2982 *  The address of parameter (struct rte_eth_dev *) registered before.
2983 *
2984 * @return
2985 *  void
2986 */
2987static void
2988txgbe_dev_interrupt_delayed_handler(void *param)
2989{
2990        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2991        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2992        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2993        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2994        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2995        uint32_t eicr;
2996
2997        txgbe_disable_intr(hw);
2998
2999        eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3000        if (eicr & TXGBE_ICRMISC_VFMBX)
3001                txgbe_pf_mbx_process(dev);
3002
3003        if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3004                hw->phy.handle_lasi(hw);
3005                intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3006        }
3007
3008        if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3009                txgbe_dev_link_update(dev, 0);
3010                intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
3011                txgbe_dev_link_status_print(dev);
3012                rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
3013                                              NULL);
3014        }
3015
3016        if (intr->flags & TXGBE_FLAG_MACSEC) {
3017                rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3018                                              NULL);
3019                intr->flags &= ~TXGBE_FLAG_MACSEC;
3020        }
3021
3022        /* restore original mask */
3023        intr->mask_misc = intr->mask_misc_orig;
3024        intr->mask_misc_orig = 0;
3025
3026        PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3027        txgbe_enable_intr(dev);
3028        rte_intr_enable(intr_handle);
3029}
3030
3031/**
3032 * Interrupt handler triggered by NIC  for handling
3033 * specific interrupt.
3034 *
3035 * @param handle
3036 *  Pointer to interrupt handle.
3037 * @param param
3038 *  The address of parameter (struct rte_eth_dev *) registered before.
3039 *
3040 * @return
3041 *  void
3042 */
3043static void
3044txgbe_dev_interrupt_handler(void *param)
3045{
3046        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3047
3048        txgbe_dev_interrupt_get_status(dev);
3049        txgbe_dev_interrupt_action(dev, dev->intr_handle);
3050}
3051
3052static int
3053txgbe_dev_led_on(struct rte_eth_dev *dev)
3054{
3055        struct txgbe_hw *hw;
3056
3057        hw = TXGBE_DEV_HW(dev);
3058        return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3059}
3060
3061static int
3062txgbe_dev_led_off(struct rte_eth_dev *dev)
3063{
3064        struct txgbe_hw *hw;
3065
3066        hw = TXGBE_DEV_HW(dev);
3067        return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3068}
3069
3070static int
3071txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3072{
3073        struct txgbe_hw *hw;
3074        uint32_t mflcn_reg;
3075        uint32_t fccfg_reg;
3076        int rx_pause;
3077        int tx_pause;
3078
3079        hw = TXGBE_DEV_HW(dev);
3080
3081        fc_conf->pause_time = hw->fc.pause_time;
3082        fc_conf->high_water = hw->fc.high_water[0];
3083        fc_conf->low_water = hw->fc.low_water[0];
3084        fc_conf->send_xon = hw->fc.send_xon;
3085        fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3086
3087        /*
3088         * Return rx_pause status according to actual setting of
3089         * RXFCCFG register.
3090         */
3091        mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3092        if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3093                rx_pause = 1;
3094        else
3095                rx_pause = 0;
3096
3097        /*
3098         * Return tx_pause status according to actual setting of
3099         * TXFCCFG register.
3100         */
3101        fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3102        if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3103                tx_pause = 1;
3104        else
3105                tx_pause = 0;
3106
3107        if (rx_pause && tx_pause)
3108                fc_conf->mode = RTE_FC_FULL;
3109        else if (rx_pause)
3110                fc_conf->mode = RTE_FC_RX_PAUSE;
3111        else if (tx_pause)
3112                fc_conf->mode = RTE_FC_TX_PAUSE;
3113        else
3114                fc_conf->mode = RTE_FC_NONE;
3115
3116        return 0;
3117}
3118
3119static int
3120txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3121{
3122        struct txgbe_hw *hw;
3123        int err;
3124        uint32_t rx_buf_size;
3125        uint32_t max_high_water;
3126        enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3127                txgbe_fc_none,
3128                txgbe_fc_rx_pause,
3129                txgbe_fc_tx_pause,
3130                txgbe_fc_full
3131        };
3132
3133        PMD_INIT_FUNC_TRACE();
3134
3135        hw = TXGBE_DEV_HW(dev);
3136        rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3137        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3138
3139        /*
3140         * At least reserve one Ethernet frame for watermark
3141         * high_water/low_water in kilo bytes for txgbe
3142         */
3143        max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3144        if (fc_conf->high_water > max_high_water ||
3145            fc_conf->high_water < fc_conf->low_water) {
3146                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3147                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3148                return -EINVAL;
3149        }
3150
3151        hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3152        hw->fc.pause_time     = fc_conf->pause_time;
3153        hw->fc.high_water[0]  = fc_conf->high_water;
3154        hw->fc.low_water[0]   = fc_conf->low_water;
3155        hw->fc.send_xon       = fc_conf->send_xon;
3156        hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3157
3158        err = txgbe_fc_enable(hw);
3159
3160        /* Not negotiated is not an error case */
3161        if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3162                wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3163                      (fc_conf->mac_ctrl_frame_fwd
3164                       ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3165                txgbe_flush(hw);
3166
3167                return 0;
3168        }
3169
3170        PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3171        return -EIO;
3172}
3173
3174static int
3175txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3176                struct rte_eth_pfc_conf *pfc_conf)
3177{
3178        int err;
3179        uint32_t rx_buf_size;
3180        uint32_t max_high_water;
3181        uint8_t tc_num;
3182        uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3183        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3184        struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3185
3186        enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3187                txgbe_fc_none,
3188                txgbe_fc_rx_pause,
3189                txgbe_fc_tx_pause,
3190                txgbe_fc_full
3191        };
3192
3193        PMD_INIT_FUNC_TRACE();
3194
3195        txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3196        tc_num = map[pfc_conf->priority];
3197        rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3198        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3199        /*
3200         * At least reserve one Ethernet frame for watermark
3201         * high_water/low_water in kilo bytes for txgbe
3202         */
3203        max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3204        if (pfc_conf->fc.high_water > max_high_water ||
3205            pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3206                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3207                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3208                return -EINVAL;
3209        }
3210
3211        hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3212        hw->fc.pause_time = pfc_conf->fc.pause_time;
3213        hw->fc.send_xon = pfc_conf->fc.send_xon;
3214        hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3215        hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3216
3217        err = txgbe_dcb_pfc_enable(hw, tc_num);
3218
3219        /* Not negotiated is not an error case */
3220        if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3221                return 0;
3222
3223        PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3224        return -EIO;
3225}
3226
3227int
3228txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3229                          struct rte_eth_rss_reta_entry64 *reta_conf,
3230                          uint16_t reta_size)
3231{
3232        uint8_t i, j, mask;
3233        uint32_t reta;
3234        uint16_t idx, shift;
3235        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3236        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3237
3238        PMD_INIT_FUNC_TRACE();
3239
3240        if (!txgbe_rss_update_sp(hw->mac.type)) {
3241                PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3242                        "NIC.");
3243                return -ENOTSUP;
3244        }
3245
3246        if (reta_size != ETH_RSS_RETA_SIZE_128) {
3247                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3248                        "(%d) doesn't match the number hardware can supported "
3249                        "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3250                return -EINVAL;
3251        }
3252
3253        for (i = 0; i < reta_size; i += 4) {
3254                idx = i / RTE_RETA_GROUP_SIZE;
3255                shift = i % RTE_RETA_GROUP_SIZE;
3256                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3257                if (!mask)
3258                        continue;
3259
3260                reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3261                for (j = 0; j < 4; j++) {
3262                        if (RS8(mask, j, 0x1)) {
3263                                reta  &= ~(MS32(8 * j, 0xFF));
3264                                reta |= LS32(reta_conf[idx].reta[shift + j],
3265                                                8 * j, 0xFF);
3266                        }
3267                }
3268                wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3269        }
3270        adapter->rss_reta_updated = 1;
3271
3272        return 0;
3273}
3274
3275int
3276txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3277                         struct rte_eth_rss_reta_entry64 *reta_conf,
3278                         uint16_t reta_size)
3279{
3280        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3281        uint8_t i, j, mask;
3282        uint32_t reta;
3283        uint16_t idx, shift;
3284
3285        PMD_INIT_FUNC_TRACE();
3286
3287        if (reta_size != ETH_RSS_RETA_SIZE_128) {
3288                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3289                        "(%d) doesn't match the number hardware can supported "
3290                        "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3291                return -EINVAL;
3292        }
3293
3294        for (i = 0; i < reta_size; i += 4) {
3295                idx = i / RTE_RETA_GROUP_SIZE;
3296                shift = i % RTE_RETA_GROUP_SIZE;
3297                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3298                if (!mask)
3299                        continue;
3300
3301                reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3302                for (j = 0; j < 4; j++) {
3303                        if (RS8(mask, j, 0x1))
3304                                reta_conf[idx].reta[shift + j] =
3305                                        (uint16_t)RS32(reta, 8 * j, 0xFF);
3306                }
3307        }
3308
3309        return 0;
3310}
3311
3312static int
3313txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3314                                uint32_t index, uint32_t pool)
3315{
3316        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3317        uint32_t enable_addr = 1;
3318
3319        return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3320                             pool, enable_addr);
3321}
3322
3323static void
3324txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3325{
3326        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3327
3328        txgbe_clear_rar(hw, index);
3329}
3330
3331static int
3332txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3333{
3334        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3335
3336        txgbe_remove_rar(dev, 0);
3337        txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3338
3339        return 0;
3340}
3341
3342static int
3343txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3344{
3345        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3346        struct rte_eth_dev_info dev_info;
3347        uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3348        struct rte_eth_dev_data *dev_data = dev->data;
3349        int ret;
3350
3351        ret = txgbe_dev_info_get(dev, &dev_info);
3352        if (ret != 0)
3353                return ret;
3354
3355        /* check that mtu is within the allowed range */
3356        if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3357                return -EINVAL;
3358
3359        /* If device is started, refuse mtu that requires the support of
3360         * scattered packets when this feature has not been enabled before.
3361         */
3362        if (dev_data->dev_started && !dev_data->scattered_rx &&
3363            (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3364             dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3365                PMD_INIT_LOG(ERR, "Stop port first.");
3366                return -EINVAL;
3367        }
3368
3369        /* update max frame size */
3370        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3371
3372        if (hw->mode)
3373                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3374                        TXGBE_FRAME_SIZE_MAX);
3375        else
3376                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3377                        TXGBE_FRMSZ_MAX(frame_size));
3378
3379        return 0;
3380}
3381
3382static uint32_t
3383txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3384{
3385        uint32_t vector = 0;
3386
3387        switch (hw->mac.mc_filter_type) {
3388        case 0:   /* use bits [47:36] of the address */
3389                vector = ((uc_addr->addr_bytes[4] >> 4) |
3390                        (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3391                break;
3392        case 1:   /* use bits [46:35] of the address */
3393                vector = ((uc_addr->addr_bytes[4] >> 3) |
3394                        (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3395                break;
3396        case 2:   /* use bits [45:34] of the address */
3397                vector = ((uc_addr->addr_bytes[4] >> 2) |
3398                        (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3399                break;
3400        case 3:   /* use bits [43:32] of the address */
3401                vector = ((uc_addr->addr_bytes[4]) |
3402                        (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3403                break;
3404        default:  /* Invalid mc_filter_type */
3405                break;
3406        }
3407
3408        /* vector can only be 12-bits or boundary will be exceeded */
3409        vector &= 0xFFF;
3410        return vector;
3411}
3412
3413static int
3414txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3415                        struct rte_ether_addr *mac_addr, uint8_t on)
3416{
3417        uint32_t vector;
3418        uint32_t uta_idx;
3419        uint32_t reg_val;
3420        uint32_t uta_mask;
3421        uint32_t psrctl;
3422
3423        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3424        struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3425
3426        /* The UTA table only exists on pf hardware */
3427        if (hw->mac.type < txgbe_mac_raptor)
3428                return -ENOTSUP;
3429
3430        vector = txgbe_uta_vector(hw, mac_addr);
3431        uta_idx = (vector >> 5) & 0x7F;
3432        uta_mask = 0x1UL << (vector & 0x1F);
3433
3434        if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3435                return 0;
3436
3437        reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3438        if (on) {
3439                uta_info->uta_in_use++;
3440                reg_val |= uta_mask;
3441                uta_info->uta_shadow[uta_idx] |= uta_mask;
3442        } else {
3443                uta_info->uta_in_use--;
3444                reg_val &= ~uta_mask;
3445                uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3446        }
3447
3448        wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3449
3450        psrctl = rd32(hw, TXGBE_PSRCTL);
3451        if (uta_info->uta_in_use > 0)
3452                psrctl |= TXGBE_PSRCTL_UCHFENA;
3453        else
3454                psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3455
3456        psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3457        psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3458        wr32(hw, TXGBE_PSRCTL, psrctl);
3459
3460        return 0;
3461}
3462
3463static int
3464txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3465{
3466        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3467        struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3468        uint32_t psrctl;
3469        int i;
3470
3471        /* The UTA table only exists on pf hardware */
3472        if (hw->mac.type < txgbe_mac_raptor)
3473                return -ENOTSUP;
3474
3475        if (on) {
3476                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3477                        uta_info->uta_shadow[i] = ~0;
3478                        wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3479                }
3480        } else {
3481                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3482                        uta_info->uta_shadow[i] = 0;
3483                        wr32(hw, TXGBE_UCADDRTBL(i), 0);
3484                }
3485        }
3486
3487        psrctl = rd32(hw, TXGBE_PSRCTL);
3488        if (on)
3489                psrctl |= TXGBE_PSRCTL_UCHFENA;
3490        else
3491                psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3492
3493        psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3494        psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3495        wr32(hw, TXGBE_PSRCTL, psrctl);
3496
3497        return 0;
3498}
3499
3500uint32_t
3501txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3502{
3503        uint32_t new_val = orig_val;
3504
3505        if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3506                new_val |= TXGBE_POOLETHCTL_UTA;
3507        if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3508                new_val |= TXGBE_POOLETHCTL_MCHA;
3509        if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3510                new_val |= TXGBE_POOLETHCTL_UCHA;
3511        if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3512                new_val |= TXGBE_POOLETHCTL_BCA;
3513        if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3514                new_val |= TXGBE_POOLETHCTL_MCP;
3515
3516        return new_val;
3517}
3518
3519static int
3520txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3521{
3522        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3523        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3524        uint32_t mask;
3525        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3526
3527        if (queue_id < 32) {
3528                mask = rd32(hw, TXGBE_IMS(0));
3529                mask &= (1 << queue_id);
3530                wr32(hw, TXGBE_IMS(0), mask);
3531        } else if (queue_id < 64) {
3532                mask = rd32(hw, TXGBE_IMS(1));
3533                mask &= (1 << (queue_id - 32));
3534                wr32(hw, TXGBE_IMS(1), mask);
3535        }
3536        rte_intr_enable(intr_handle);
3537
3538        return 0;
3539}
3540
3541static int
3542txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3543{
3544        uint32_t mask;
3545        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3546
3547        if (queue_id < 32) {
3548                mask = rd32(hw, TXGBE_IMS(0));
3549                mask &= ~(1 << queue_id);
3550                wr32(hw, TXGBE_IMS(0), mask);
3551        } else if (queue_id < 64) {
3552                mask = rd32(hw, TXGBE_IMS(1));
3553                mask &= ~(1 << (queue_id - 32));
3554                wr32(hw, TXGBE_IMS(1), mask);
3555        }
3556
3557        return 0;
3558}
3559
3560/**
3561 * set the IVAR registers, mapping interrupt causes to vectors
3562 * @param hw
3563 *  pointer to txgbe_hw struct
3564 * @direction
3565 *  0 for Rx, 1 for Tx, -1 for other causes
3566 * @queue
3567 *  queue to map the corresponding interrupt to
3568 * @msix_vector
3569 *  the vector to map to the corresponding queue
3570 */
3571void
3572txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3573                   uint8_t queue, uint8_t msix_vector)
3574{
3575        uint32_t tmp, idx;
3576
3577        if (direction == -1) {
3578                /* other causes */
3579                msix_vector |= TXGBE_IVARMISC_VLD;
3580                idx = 0;
3581                tmp = rd32(hw, TXGBE_IVARMISC);
3582                tmp &= ~(0xFF << idx);
3583                tmp |= (msix_vector << idx);
3584                wr32(hw, TXGBE_IVARMISC, tmp);
3585        } else {
3586                /* rx or tx causes */
3587                /* Workround for ICR lost */
3588                idx = ((16 * (queue & 1)) + (8 * direction));
3589                tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3590                tmp &= ~(0xFF << idx);
3591                tmp |= (msix_vector << idx);
3592                wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3593        }
3594}
3595
3596/**
3597 * Sets up the hardware to properly generate MSI-X interrupts
3598 * @hw
3599 *  board private structure
3600 */
3601static void
3602txgbe_configure_msix(struct rte_eth_dev *dev)
3603{
3604        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3605        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3606        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3607        uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3608        uint32_t vec = TXGBE_MISC_VEC_ID;
3609        uint32_t gpie;
3610
3611        /* won't configure msix register if no mapping is done
3612         * between intr vector and event fd
3613         * but if misx has been enabled already, need to configure
3614         * auto clean, auto mask and throttling.
3615         */
3616        gpie = rd32(hw, TXGBE_GPIE);
3617        if (!rte_intr_dp_is_en(intr_handle) &&
3618            !(gpie & TXGBE_GPIE_MSIX))
3619                return;
3620
3621        if (rte_intr_allow_others(intr_handle)) {
3622                base = TXGBE_RX_VEC_START;
3623                vec = base;
3624        }
3625
3626        /* setup GPIE for MSI-x mode */
3627        gpie = rd32(hw, TXGBE_GPIE);
3628        gpie |= TXGBE_GPIE_MSIX;
3629        wr32(hw, TXGBE_GPIE, gpie);
3630
3631        /* Populate the IVAR table and set the ITR values to the
3632         * corresponding register.
3633         */
3634        if (rte_intr_dp_is_en(intr_handle)) {
3635                for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3636                        queue_id++) {
3637                        /* by default, 1:1 mapping */
3638                        txgbe_set_ivar_map(hw, 0, queue_id, vec);
3639                        intr_handle->intr_vec[queue_id] = vec;
3640                        if (vec < base + intr_handle->nb_efd - 1)
3641                                vec++;
3642                }
3643
3644                txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3645        }
3646        wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3647                        TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3648                        | TXGBE_ITR_WRDSA);
3649}
3650
3651int
3652txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3653                           uint16_t queue_idx, uint16_t tx_rate)
3654{
3655        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3656        uint32_t bcnrc_val;
3657
3658        if (queue_idx >= hw->mac.max_tx_queues)
3659                return -EINVAL;
3660
3661        if (tx_rate != 0) {
3662                bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3663                bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3664        } else {
3665                bcnrc_val = 0;
3666        }
3667
3668        /*
3669         * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3670         * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3671         */
3672        wr32(hw, TXGBE_ARBTXMMW, 0x14);
3673
3674        /* Set ARBTXRATE of queue X */
3675        wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3676        wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3677        txgbe_flush(hw);
3678
3679        return 0;
3680}
3681
3682int
3683txgbe_syn_filter_set(struct rte_eth_dev *dev,
3684                        struct rte_eth_syn_filter *filter,
3685                        bool add)
3686{
3687        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3688        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3689        uint32_t syn_info;
3690        uint32_t synqf;
3691
3692        if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3693                return -EINVAL;
3694
3695        syn_info = filter_info->syn_info;
3696
3697        if (add) {
3698                if (syn_info & TXGBE_SYNCLS_ENA)
3699                        return -EINVAL;
3700                synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3701                synqf |= TXGBE_SYNCLS_ENA;
3702
3703                if (filter->hig_pri)
3704                        synqf |= TXGBE_SYNCLS_HIPRIO;
3705                else
3706                        synqf &= ~TXGBE_SYNCLS_HIPRIO;
3707        } else {
3708                synqf = rd32(hw, TXGBE_SYNCLS);
3709                if (!(syn_info & TXGBE_SYNCLS_ENA))
3710                        return -ENOENT;
3711                synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3712        }
3713
3714        filter_info->syn_info = synqf;
3715        wr32(hw, TXGBE_SYNCLS, synqf);
3716        txgbe_flush(hw);
3717        return 0;
3718}
3719
3720static inline enum txgbe_5tuple_protocol
3721convert_protocol_type(uint8_t protocol_value)
3722{
3723        if (protocol_value == IPPROTO_TCP)
3724                return TXGBE_5TF_PROT_TCP;
3725        else if (protocol_value == IPPROTO_UDP)
3726                return TXGBE_5TF_PROT_UDP;
3727        else if (protocol_value == IPPROTO_SCTP)
3728                return TXGBE_5TF_PROT_SCTP;
3729        else
3730                return TXGBE_5TF_PROT_NONE;
3731}
3732
3733/* inject a 5-tuple filter to HW */
3734static inline void
3735txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3736                           struct txgbe_5tuple_filter *filter)
3737{
3738        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3739        int i;
3740        uint32_t ftqf, sdpqf;
3741        uint32_t l34timir = 0;
3742        uint32_t mask = TXGBE_5TFCTL0_MASK;
3743
3744        i = filter->index;
3745        sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3746        sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3747
3748        ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3749        ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3750        if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3751                mask &= ~TXGBE_5TFCTL0_MSADDR;
3752        if (filter->filter_info.dst_ip_mask == 0)
3753                mask &= ~TXGBE_5TFCTL0_MDADDR;
3754        if (filter->filter_info.src_port_mask == 0)
3755                mask &= ~TXGBE_5TFCTL0_MSPORT;
3756        if (filter->filter_info.dst_port_mask == 0)
3757                mask &= ~TXGBE_5TFCTL0_MDPORT;
3758        if (filter->filter_info.proto_mask == 0)
3759                mask &= ~TXGBE_5TFCTL0_MPROTO;
3760        ftqf |= mask;
3761        ftqf |= TXGBE_5TFCTL0_MPOOL;
3762        ftqf |= TXGBE_5TFCTL0_ENA;
3763
3764        wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3765        wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3766        wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3767        wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3768
3769        l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3770        wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3771}
3772
3773/*
3774 * add a 5tuple filter
3775 *
3776 * @param
3777 * dev: Pointer to struct rte_eth_dev.
3778 * index: the index the filter allocates.
3779 * filter: pointer to the filter that will be added.
3780 * rx_queue: the queue id the filter assigned to.
3781 *
3782 * @return
3783 *    - On success, zero.
3784 *    - On failure, a negative value.
3785 */
3786static int
3787txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3788                        struct txgbe_5tuple_filter *filter)
3789{
3790        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3791        int i, idx, shift;
3792
3793        /*
3794         * look for an unused 5tuple filter index,
3795         * and insert the filter to list.
3796         */
3797        for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3798                idx = i / (sizeof(uint32_t) * NBBY);
3799                shift = i % (sizeof(uint32_t) * NBBY);
3800                if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3801                        filter_info->fivetuple_mask[idx] |= 1 << shift;
3802                        filter->index = i;
3803                        TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3804                                          filter,
3805                                          entries);
3806                        break;
3807                }
3808        }
3809        if (i >= TXGBE_MAX_FTQF_FILTERS) {
3810                PMD_DRV_LOG(ERR, "5tuple filters are full.");
3811                return -ENOSYS;
3812        }
3813
3814        txgbe_inject_5tuple_filter(dev, filter);
3815
3816        return 0;
3817}
3818
3819/*
3820 * remove a 5tuple filter
3821 *
3822 * @param
3823 * dev: Pointer to struct rte_eth_dev.
3824 * filter: the pointer of the filter will be removed.
3825 */
3826static void
3827txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3828                        struct txgbe_5tuple_filter *filter)
3829{
3830        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3831        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3832        uint16_t index = filter->index;
3833
3834        filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3835                                ~(1 << (index % (sizeof(uint32_t) * NBBY)));
3836        TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3837        rte_free(filter);
3838
3839        wr32(hw, TXGBE_5TFDADDR(index), 0);
3840        wr32(hw, TXGBE_5TFSADDR(index), 0);
3841        wr32(hw, TXGBE_5TFPORT(index), 0);
3842        wr32(hw, TXGBE_5TFCTL0(index), 0);
3843        wr32(hw, TXGBE_5TFCTL1(index), 0);
3844}
3845
3846static inline struct txgbe_5tuple_filter *
3847txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3848                        struct txgbe_5tuple_filter_info *key)
3849{
3850        struct txgbe_5tuple_filter *it;
3851
3852        TAILQ_FOREACH(it, filter_list, entries) {
3853                if (memcmp(key, &it->filter_info,
3854                        sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3855                        return it;
3856                }
3857        }
3858        return NULL;
3859}
3860
3861/* translate elements in struct rte_eth_ntuple_filter
3862 * to struct txgbe_5tuple_filter_info
3863 */
3864static inline int
3865ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3866                        struct txgbe_5tuple_filter_info *filter_info)
3867{
3868        if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3869                filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3870                filter->priority < TXGBE_5TUPLE_MIN_PRI)
3871                return -EINVAL;
3872
3873        switch (filter->dst_ip_mask) {
3874        case UINT32_MAX:
3875                filter_info->dst_ip_mask = 0;
3876                filter_info->dst_ip = filter->dst_ip;
3877                break;
3878        case 0:
3879                filter_info->dst_ip_mask = 1;
3880                break;
3881        default:
3882                PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3883                return -EINVAL;
3884        }
3885
3886        switch (filter->src_ip_mask) {
3887        case UINT32_MAX:
3888                filter_info->src_ip_mask = 0;
3889                filter_info->src_ip = filter->src_ip;
3890                break;
3891        case 0:
3892                filter_info->src_ip_mask = 1;
3893                break;
3894        default:
3895                PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3896                return -EINVAL;
3897        }
3898
3899        switch (filter->dst_port_mask) {
3900        case UINT16_MAX:
3901                filter_info->dst_port_mask = 0;
3902                filter_info->dst_port = filter->dst_port;
3903                break;
3904        case 0:
3905                filter_info->dst_port_mask = 1;
3906                break;
3907        default:
3908                PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3909                return -EINVAL;
3910        }
3911
3912        switch (filter->src_port_mask) {
3913        case UINT16_MAX:
3914                filter_info->src_port_mask = 0;
3915                filter_info->src_port = filter->src_port;
3916                break;
3917        case 0:
3918                filter_info->src_port_mask = 1;
3919                break;
3920        default:
3921                PMD_DRV_LOG(ERR, "invalid src_port mask.");
3922                return -EINVAL;
3923        }
3924
3925        switch (filter->proto_mask) {
3926        case UINT8_MAX:
3927                filter_info->proto_mask = 0;
3928                filter_info->proto =
3929                        convert_protocol_type(filter->proto);
3930                break;
3931        case 0:
3932                filter_info->proto_mask = 1;
3933                break;
3934        default:
3935                PMD_DRV_LOG(ERR, "invalid protocol mask.");
3936                return -EINVAL;
3937        }
3938
3939        filter_info->priority = (uint8_t)filter->priority;
3940        return 0;
3941}
3942
3943/*
3944 * add or delete a ntuple filter
3945 *
3946 * @param
3947 * dev: Pointer to struct rte_eth_dev.
3948 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3949 * add: if true, add filter, if false, remove filter
3950 *
3951 * @return
3952 *    - On success, zero.
3953 *    - On failure, a negative value.
3954 */
3955int
3956txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
3957                        struct rte_eth_ntuple_filter *ntuple_filter,
3958                        bool add)
3959{
3960        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3961        struct txgbe_5tuple_filter_info filter_5tuple;
3962        struct txgbe_5tuple_filter *filter;
3963        int ret;
3964
3965        if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
3966                PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3967                return -EINVAL;
3968        }
3969
3970        memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
3971        ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
3972        if (ret < 0)
3973                return ret;
3974
3975        filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
3976                                         &filter_5tuple);
3977        if (filter != NULL && add) {
3978                PMD_DRV_LOG(ERR, "filter exists.");
3979                return -EEXIST;
3980        }
3981        if (filter == NULL && !add) {
3982                PMD_DRV_LOG(ERR, "filter doesn't exist.");
3983                return -ENOENT;
3984        }
3985
3986        if (add) {
3987                filter = rte_zmalloc("txgbe_5tuple_filter",
3988                                sizeof(struct txgbe_5tuple_filter), 0);
3989                if (filter == NULL)
3990                        return -ENOMEM;
3991                rte_memcpy(&filter->filter_info,
3992                                 &filter_5tuple,
3993                                 sizeof(struct txgbe_5tuple_filter_info));
3994                filter->queue = ntuple_filter->queue;
3995                ret = txgbe_add_5tuple_filter(dev, filter);
3996                if (ret < 0) {
3997                        rte_free(filter);
3998                        return ret;
3999                }
4000        } else {
4001                txgbe_remove_5tuple_filter(dev, filter);
4002        }
4003
4004        return 0;
4005}
4006
4007int
4008txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4009                        struct rte_eth_ethertype_filter *filter,
4010                        bool add)
4011{
4012        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4013        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4014        uint32_t etqf = 0;
4015        uint32_t etqs = 0;
4016        int ret;
4017        struct txgbe_ethertype_filter ethertype_filter;
4018
4019        if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4020                return -EINVAL;
4021
4022        if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4023            filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4024                PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4025                        " ethertype filter.", filter->ether_type);
4026                return -EINVAL;
4027        }
4028
4029        if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4030                PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4031                return -EINVAL;
4032        }
4033        if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4034                PMD_DRV_LOG(ERR, "drop option is unsupported.");
4035                return -EINVAL;
4036        }
4037
4038        ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4039        if (ret >= 0 && add) {
4040                PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4041                            filter->ether_type);
4042                return -EEXIST;
4043        }
4044        if (ret < 0 && !add) {
4045                PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4046                            filter->ether_type);
4047                return -ENOENT;
4048        }
4049
4050        if (add) {
4051                etqf = TXGBE_ETFLT_ENA;
4052                etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4053                etqs |= TXGBE_ETCLS_QPID(filter->queue);
4054                etqs |= TXGBE_ETCLS_QENA;
4055
4056                ethertype_filter.ethertype = filter->ether_type;
4057                ethertype_filter.etqf = etqf;
4058                ethertype_filter.etqs = etqs;
4059                ethertype_filter.conf = FALSE;
4060                ret = txgbe_ethertype_filter_insert(filter_info,
4061                                                    &ethertype_filter);
4062                if (ret < 0) {
4063                        PMD_DRV_LOG(ERR, "ethertype filters are full.");
4064                        return -ENOSPC;
4065                }
4066        } else {
4067                ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4068                if (ret < 0)
4069                        return -ENOSYS;
4070        }
4071        wr32(hw, TXGBE_ETFLT(ret), etqf);
4072        wr32(hw, TXGBE_ETCLS(ret), etqs);
4073        txgbe_flush(hw);
4074
4075        return 0;
4076}
4077
4078static int
4079txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
4080                     enum rte_filter_type filter_type,
4081                     enum rte_filter_op filter_op,
4082                     void *arg)
4083{
4084        int ret = 0;
4085
4086        switch (filter_type) {
4087        case RTE_ETH_FILTER_GENERIC:
4088                if (filter_op != RTE_ETH_FILTER_GET)
4089                        return -EINVAL;
4090                *(const void **)arg = &txgbe_flow_ops;
4091                break;
4092        default:
4093                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4094                                                        filter_type);
4095                ret = -EINVAL;
4096                break;
4097        }
4098
4099        return ret;
4100}
4101
4102static u8 *
4103txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4104                        u8 **mc_addr_ptr, u32 *vmdq)
4105{
4106        u8 *mc_addr;
4107
4108        *vmdq = 0;
4109        mc_addr = *mc_addr_ptr;
4110        *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4111        return mc_addr;
4112}
4113
4114int
4115txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4116                          struct rte_ether_addr *mc_addr_set,
4117                          uint32_t nb_mc_addr)
4118{
4119        struct txgbe_hw *hw;
4120        u8 *mc_addr_list;
4121
4122        hw = TXGBE_DEV_HW(dev);
4123        mc_addr_list = (u8 *)mc_addr_set;
4124        return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4125                                         txgbe_dev_addr_list_itr, TRUE);
4126}
4127
4128static uint64_t
4129txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4130{
4131        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4132        uint64_t systime_cycles;
4133
4134        systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4135        systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4136
4137        return systime_cycles;
4138}
4139
4140static uint64_t
4141txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4142{
4143        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4144        uint64_t rx_tstamp_cycles;
4145
4146        /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4147        rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4148        rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4149
4150        return rx_tstamp_cycles;
4151}
4152
4153static uint64_t
4154txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4155{
4156        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4157        uint64_t tx_tstamp_cycles;
4158
4159        /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4160        tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4161        tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4162
4163        return tx_tstamp_cycles;
4164}
4165
4166static void
4167txgbe_start_timecounters(struct rte_eth_dev *dev)
4168{
4169        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4170        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4171        struct rte_eth_link link;
4172        uint32_t incval = 0;
4173        uint32_t shift = 0;
4174
4175        /* Get current link speed. */
4176        txgbe_dev_link_update(dev, 1);
4177        rte_eth_linkstatus_get(dev, &link);
4178
4179        switch (link.link_speed) {
4180        case ETH_SPEED_NUM_100M:
4181                incval = TXGBE_INCVAL_100;
4182                shift = TXGBE_INCVAL_SHIFT_100;
4183                break;
4184        case ETH_SPEED_NUM_1G:
4185                incval = TXGBE_INCVAL_1GB;
4186                shift = TXGBE_INCVAL_SHIFT_1GB;
4187                break;
4188        case ETH_SPEED_NUM_10G:
4189        default:
4190                incval = TXGBE_INCVAL_10GB;
4191                shift = TXGBE_INCVAL_SHIFT_10GB;
4192                break;
4193        }
4194
4195        wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4196
4197        memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4198        memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4199        memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4200
4201        adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4202        adapter->systime_tc.cc_shift = shift;
4203        adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4204
4205        adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4206        adapter->rx_tstamp_tc.cc_shift = shift;
4207        adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4208
4209        adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4210        adapter->tx_tstamp_tc.cc_shift = shift;
4211        adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4212}
4213
4214static int
4215txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4216{
4217        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4218
4219        adapter->systime_tc.nsec += delta;
4220        adapter->rx_tstamp_tc.nsec += delta;
4221        adapter->tx_tstamp_tc.nsec += delta;
4222
4223        return 0;
4224}
4225
4226static int
4227txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4228{
4229        uint64_t ns;
4230        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4231
4232        ns = rte_timespec_to_ns(ts);
4233        /* Set the timecounters to a new value. */
4234        adapter->systime_tc.nsec = ns;
4235        adapter->rx_tstamp_tc.nsec = ns;
4236        adapter->tx_tstamp_tc.nsec = ns;
4237
4238        return 0;
4239}
4240
4241static int
4242txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4243{
4244        uint64_t ns, systime_cycles;
4245        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4246
4247        systime_cycles = txgbe_read_systime_cyclecounter(dev);
4248        ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4249        *ts = rte_ns_to_timespec(ns);
4250
4251        return 0;
4252}
4253
4254static int
4255txgbe_timesync_enable(struct rte_eth_dev *dev)
4256{
4257        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4258        uint32_t tsync_ctl;
4259
4260        /* Stop the timesync system time. */
4261        wr32(hw, TXGBE_TSTIMEINC, 0x0);
4262        /* Reset the timesync system time value. */
4263        wr32(hw, TXGBE_TSTIMEL, 0x0);
4264        wr32(hw, TXGBE_TSTIMEH, 0x0);
4265
4266        txgbe_start_timecounters(dev);
4267
4268        /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4269        wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4270                RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4271
4272        /* Enable timestamping of received PTP packets. */
4273        tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4274        tsync_ctl |= TXGBE_TSRXCTL_ENA;
4275        wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4276
4277        /* Enable timestamping of transmitted PTP packets. */
4278        tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4279        tsync_ctl |= TXGBE_TSTXCTL_ENA;
4280        wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4281
4282        txgbe_flush(hw);
4283
4284        return 0;
4285}
4286
4287static int
4288txgbe_timesync_disable(struct rte_eth_dev *dev)
4289{
4290        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4291        uint32_t tsync_ctl;
4292
4293        /* Disable timestamping of transmitted PTP packets. */
4294        tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4295        tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4296        wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4297
4298        /* Disable timestamping of received PTP packets. */
4299        tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4300        tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4301        wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4302
4303        /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4304        wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4305
4306        /* Stop incrementating the System Time registers. */
4307        wr32(hw, TXGBE_TSTIMEINC, 0);
4308
4309        return 0;
4310}
4311
4312static int
4313txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4314                                 struct timespec *timestamp,
4315                                 uint32_t flags __rte_unused)
4316{
4317        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4318        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4319        uint32_t tsync_rxctl;
4320        uint64_t rx_tstamp_cycles;
4321        uint64_t ns;
4322
4323        tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4324        if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4325                return -EINVAL;
4326
4327        rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4328        ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4329        *timestamp = rte_ns_to_timespec(ns);
4330
4331        return  0;
4332}
4333
4334static int
4335txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4336                                 struct timespec *timestamp)
4337{
4338        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4339        struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4340        uint32_t tsync_txctl;
4341        uint64_t tx_tstamp_cycles;
4342        uint64_t ns;
4343
4344        tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4345        if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4346                return -EINVAL;
4347
4348        tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4349        ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4350        *timestamp = rte_ns_to_timespec(ns);
4351
4352        return 0;
4353}
4354
4355static int
4356txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4357{
4358        int count = 0;
4359        int g_ind = 0;
4360        const struct reg_info *reg_group;
4361        const struct reg_info **reg_set = txgbe_regs_others;
4362
4363        while ((reg_group = reg_set[g_ind++]))
4364                count += txgbe_regs_group_count(reg_group);
4365
4366        return count;
4367}
4368
4369static int
4370txgbe_get_regs(struct rte_eth_dev *dev,
4371              struct rte_dev_reg_info *regs)
4372{
4373        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4374        uint32_t *data = regs->data;
4375        int g_ind = 0;
4376        int count = 0;
4377        const struct reg_info *reg_group;
4378        const struct reg_info **reg_set = txgbe_regs_others;
4379
4380        if (data == NULL) {
4381                regs->length = txgbe_get_reg_length(dev);
4382                regs->width = sizeof(uint32_t);
4383                return 0;
4384        }
4385
4386        /* Support only full register dump */
4387        if (regs->length == 0 ||
4388            regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4389                regs->version = hw->mac.type << 24 |
4390                                hw->revision_id << 16 |
4391                                hw->device_id;
4392                while ((reg_group = reg_set[g_ind++]))
4393                        count += txgbe_read_regs_group(dev, &data[count],
4394                                                      reg_group);
4395                return 0;
4396        }
4397
4398        return -ENOTSUP;
4399}
4400
4401static int
4402txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4403{
4404        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4405
4406        /* Return unit is byte count */
4407        return hw->rom.word_size * 2;
4408}
4409
4410static int
4411txgbe_get_eeprom(struct rte_eth_dev *dev,
4412                struct rte_dev_eeprom_info *in_eeprom)
4413{
4414        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4415        struct txgbe_rom_info *eeprom = &hw->rom;
4416        uint16_t *data = in_eeprom->data;
4417        int first, length;
4418
4419        first = in_eeprom->offset >> 1;
4420        length = in_eeprom->length >> 1;
4421        if (first > hw->rom.word_size ||
4422            ((first + length) > hw->rom.word_size))
4423                return -EINVAL;
4424
4425        in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4426
4427        return eeprom->readw_buffer(hw, first, length, data);
4428}
4429
4430static int
4431txgbe_set_eeprom(struct rte_eth_dev *dev,
4432                struct rte_dev_eeprom_info *in_eeprom)
4433{
4434        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4435        struct txgbe_rom_info *eeprom = &hw->rom;
4436        uint16_t *data = in_eeprom->data;
4437        int first, length;
4438
4439        first = in_eeprom->offset >> 1;
4440        length = in_eeprom->length >> 1;
4441        if (first > hw->rom.word_size ||
4442            ((first + length) > hw->rom.word_size))
4443                return -EINVAL;
4444
4445        in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4446
4447        return eeprom->writew_buffer(hw,  first, length, data);
4448}
4449
4450static int
4451txgbe_get_module_info(struct rte_eth_dev *dev,
4452                      struct rte_eth_dev_module_info *modinfo)
4453{
4454        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4455        uint32_t status;
4456        uint8_t sff8472_rev, addr_mode;
4457        bool page_swap = false;
4458
4459        /* Check whether we support SFF-8472 or not */
4460        status = hw->phy.read_i2c_eeprom(hw,
4461                                             TXGBE_SFF_SFF_8472_COMP,
4462                                             &sff8472_rev);
4463        if (status != 0)
4464                return -EIO;
4465
4466        /* addressing mode is not supported */
4467        status = hw->phy.read_i2c_eeprom(hw,
4468                                             TXGBE_SFF_SFF_8472_SWAP,
4469                                             &addr_mode);
4470        if (status != 0)
4471                return -EIO;
4472
4473        if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4474                PMD_DRV_LOG(ERR,
4475                            "Address change required to access page 0xA2, "
4476                            "but not supported. Please report the module "
4477                            "type to the driver maintainers.");
4478                page_swap = true;
4479        }
4480
4481        if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4482                /* We have a SFP, but it does not support SFF-8472 */
4483                modinfo->type = RTE_ETH_MODULE_SFF_8079;
4484                modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4485        } else {
4486                /* We have a SFP which supports a revision of SFF-8472. */
4487                modinfo->type = RTE_ETH_MODULE_SFF_8472;
4488                modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4489        }
4490
4491        return 0;
4492}
4493
4494static int
4495txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4496                        struct rte_dev_eeprom_info *info)
4497{
4498        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4499        uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4500        uint8_t databyte = 0xFF;
4501        uint8_t *data = info->data;
4502        uint32_t i = 0;
4503
4504        if (info->length == 0)
4505                return -EINVAL;
4506
4507        for (i = info->offset; i < info->offset + info->length; i++) {
4508                if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4509                        status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4510                else
4511                        status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4512
4513                if (status != 0)
4514                        return -EIO;
4515
4516                data[i - info->offset] = databyte;
4517        }
4518
4519        return 0;
4520}
4521
4522bool
4523txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4524{
4525        switch (mac_type) {
4526        case txgbe_mac_raptor:
4527                return 1;
4528        default:
4529                return 0;
4530        }
4531}
4532
4533static int
4534txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4535                        struct rte_eth_dcb_info *dcb_info)
4536{
4537        struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4538        struct txgbe_dcb_tc_config *tc;
4539        struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4540        uint8_t nb_tcs;
4541        uint8_t i, j;
4542
4543        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4544                dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4545        else
4546                dcb_info->nb_tcs = 1;
4547
4548        tc_queue = &dcb_info->tc_queue;
4549        nb_tcs = dcb_info->nb_tcs;
4550
4551        if (dcb_config->vt_mode) { /* vt is enabled */
4552                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4553                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4554                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4555                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4556                if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4557                        for (j = 0; j < nb_tcs; j++) {
4558                                tc_queue->tc_rxq[0][j].base = j;
4559                                tc_queue->tc_rxq[0][j].nb_queue = 1;
4560                                tc_queue->tc_txq[0][j].base = j;
4561                                tc_queue->tc_txq[0][j].nb_queue = 1;
4562                        }
4563                } else {
4564                        for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4565                                for (j = 0; j < nb_tcs; j++) {
4566                                        tc_queue->tc_rxq[i][j].base =
4567                                                i * nb_tcs + j;
4568                                        tc_queue->tc_rxq[i][j].nb_queue = 1;
4569                                        tc_queue->tc_txq[i][j].base =
4570                                                i * nb_tcs + j;
4571                                        tc_queue->tc_txq[i][j].nb_queue = 1;
4572                                }
4573                        }
4574                }
4575        } else { /* vt is disabled */
4576                struct rte_eth_dcb_rx_conf *rx_conf =
4577                                &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4578                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4579                        dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4580                if (dcb_info->nb_tcs == ETH_4_TCS) {
4581                        for (i = 0; i < dcb_info->nb_tcs; i++) {
4582                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4583                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4584                        }
4585                        dcb_info->tc_queue.tc_txq[0][0].base = 0;
4586                        dcb_info->tc_queue.tc_txq[0][1].base = 64;
4587                        dcb_info->tc_queue.tc_txq[0][2].base = 96;
4588                        dcb_info->tc_queue.tc_txq[0][3].base = 112;
4589                        dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4590                        dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4591                        dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4592                        dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4593                } else if (dcb_info->nb_tcs == ETH_8_TCS) {
4594                        for (i = 0; i < dcb_info->nb_tcs; i++) {
4595                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4596                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4597                        }
4598                        dcb_info->tc_queue.tc_txq[0][0].base = 0;
4599                        dcb_info->tc_queue.tc_txq[0][1].base = 32;
4600                        dcb_info->tc_queue.tc_txq[0][2].base = 64;
4601                        dcb_info->tc_queue.tc_txq[0][3].base = 80;
4602                        dcb_info->tc_queue.tc_txq[0][4].base = 96;
4603                        dcb_info->tc_queue.tc_txq[0][5].base = 104;
4604                        dcb_info->tc_queue.tc_txq[0][6].base = 112;
4605                        dcb_info->tc_queue.tc_txq[0][7].base = 120;
4606                        dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4607                        dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4608                        dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4609                        dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4610                        dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4611                        dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4612                        dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4613                        dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4614                }
4615        }
4616        for (i = 0; i < dcb_info->nb_tcs; i++) {
4617                tc = &dcb_config->tc_config[i];
4618                dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4619        }
4620        return 0;
4621}
4622
4623/* Update e-tag ether type */
4624static int
4625txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4626                            uint16_t ether_type)
4627{
4628        uint32_t etag_etype;
4629
4630        etag_etype = rd32(hw, TXGBE_EXTAG);
4631        etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4632        etag_etype |= ether_type;
4633        wr32(hw, TXGBE_EXTAG, etag_etype);
4634        txgbe_flush(hw);
4635
4636        return 0;
4637}
4638
4639/* Enable e-tag tunnel */
4640static int
4641txgbe_e_tag_enable(struct txgbe_hw *hw)
4642{
4643        uint32_t etag_etype;
4644
4645        etag_etype = rd32(hw, TXGBE_PORTCTL);
4646        etag_etype |= TXGBE_PORTCTL_ETAG;
4647        wr32(hw, TXGBE_PORTCTL, etag_etype);
4648        txgbe_flush(hw);
4649
4650        return 0;
4651}
4652
4653static int
4654txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4655                       struct txgbe_l2_tunnel_conf  *l2_tunnel)
4656{
4657        int ret = 0;
4658        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4659        uint32_t i, rar_entries;
4660        uint32_t rar_low, rar_high;
4661
4662        rar_entries = hw->mac.num_rar_entries;
4663
4664        for (i = 1; i < rar_entries; i++) {
4665                wr32(hw, TXGBE_ETHADDRIDX, i);
4666                rar_high = rd32(hw, TXGBE_ETHADDRH);
4667                rar_low  = rd32(hw, TXGBE_ETHADDRL);
4668                if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4669                    (rar_high & TXGBE_ETHADDRH_ETAG) &&
4670                    (TXGBE_ETHADDRL_ETAG(rar_low) ==
4671                     l2_tunnel->tunnel_id)) {
4672                        wr32(hw, TXGBE_ETHADDRL, 0);
4673                        wr32(hw, TXGBE_ETHADDRH, 0);
4674
4675                        txgbe_clear_vmdq(hw, i, BIT_MASK32);
4676
4677                        return ret;
4678                }
4679        }
4680
4681        return ret;
4682}
4683
4684static int
4685txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4686                       struct txgbe_l2_tunnel_conf *l2_tunnel)
4687{
4688        int ret = 0;
4689        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4690        uint32_t i, rar_entries;
4691        uint32_t rar_low, rar_high;
4692
4693        /* One entry for one tunnel. Try to remove potential existing entry. */
4694        txgbe_e_tag_filter_del(dev, l2_tunnel);
4695
4696        rar_entries = hw->mac.num_rar_entries;
4697
4698        for (i = 1; i < rar_entries; i++) {
4699                wr32(hw, TXGBE_ETHADDRIDX, i);
4700                rar_high = rd32(hw, TXGBE_ETHADDRH);
4701                if (rar_high & TXGBE_ETHADDRH_VLD) {
4702                        continue;
4703                } else {
4704                        txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4705                        rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4706                        rar_low = l2_tunnel->tunnel_id;
4707
4708                        wr32(hw, TXGBE_ETHADDRL, rar_low);
4709                        wr32(hw, TXGBE_ETHADDRH, rar_high);
4710
4711                        return ret;
4712                }
4713        }
4714
4715        PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4716                     " Please remove a rule before adding a new one.");
4717        return -EINVAL;
4718}
4719
4720static inline struct txgbe_l2_tn_filter *
4721txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4722                          struct txgbe_l2_tn_key *key)
4723{
4724        int ret;
4725
4726        ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4727        if (ret < 0)
4728                return NULL;
4729
4730        return l2_tn_info->hash_map[ret];
4731}
4732
4733static inline int
4734txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4735                          struct txgbe_l2_tn_filter *l2_tn_filter)
4736{
4737        int ret;
4738
4739        ret = rte_hash_add_key(l2_tn_info->hash_handle,
4740                               &l2_tn_filter->key);
4741
4742        if (ret < 0) {
4743                PMD_DRV_LOG(ERR,
4744                            "Failed to insert L2 tunnel filter"
4745                            " to hash table %d!",
4746                            ret);
4747                return ret;
4748        }
4749
4750        l2_tn_info->hash_map[ret] = l2_tn_filter;
4751
4752        TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4753
4754        return 0;
4755}
4756
4757static inline int
4758txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4759                          struct txgbe_l2_tn_key *key)
4760{
4761        int ret;
4762        struct txgbe_l2_tn_filter *l2_tn_filter;
4763
4764        ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4765
4766        if (ret < 0) {
4767                PMD_DRV_LOG(ERR,
4768                            "No such L2 tunnel filter to delete %d!",
4769                            ret);
4770                return ret;
4771        }
4772
4773        l2_tn_filter = l2_tn_info->hash_map[ret];
4774        l2_tn_info->hash_map[ret] = NULL;
4775
4776        TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4777        rte_free(l2_tn_filter);
4778
4779        return 0;
4780}
4781
4782/* Add l2 tunnel filter */
4783int
4784txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4785                               struct txgbe_l2_tunnel_conf *l2_tunnel,
4786                               bool restore)
4787{
4788        int ret;
4789        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4790        struct txgbe_l2_tn_key key;
4791        struct txgbe_l2_tn_filter *node;
4792
4793        if (!restore) {
4794                key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4795                key.tn_id = l2_tunnel->tunnel_id;
4796
4797                node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4798
4799                if (node) {
4800                        PMD_DRV_LOG(ERR,
4801                                    "The L2 tunnel filter already exists!");
4802                        return -EINVAL;
4803                }
4804
4805                node = rte_zmalloc("txgbe_l2_tn",
4806                                   sizeof(struct txgbe_l2_tn_filter),
4807                                   0);
4808                if (!node)
4809                        return -ENOMEM;
4810
4811                rte_memcpy(&node->key,
4812                                 &key,
4813                                 sizeof(struct txgbe_l2_tn_key));
4814                node->pool = l2_tunnel->pool;
4815                ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4816                if (ret < 0) {
4817                        rte_free(node);
4818                        return ret;
4819                }
4820        }
4821
4822        switch (l2_tunnel->l2_tunnel_type) {
4823        case RTE_L2_TUNNEL_TYPE_E_TAG:
4824                ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4825                break;
4826        default:
4827                PMD_DRV_LOG(ERR, "Invalid tunnel type");
4828                ret = -EINVAL;
4829                break;
4830        }
4831
4832        if (!restore && ret < 0)
4833                (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4834
4835        return ret;
4836}
4837
4838/* Delete l2 tunnel filter */
4839int
4840txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4841                               struct txgbe_l2_tunnel_conf *l2_tunnel)
4842{
4843        int ret;
4844        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4845        struct txgbe_l2_tn_key key;
4846
4847        key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4848        key.tn_id = l2_tunnel->tunnel_id;
4849        ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4850        if (ret < 0)
4851                return ret;
4852
4853        switch (l2_tunnel->l2_tunnel_type) {
4854        case RTE_L2_TUNNEL_TYPE_E_TAG:
4855                ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4856                break;
4857        default:
4858                PMD_DRV_LOG(ERR, "Invalid tunnel type");
4859                ret = -EINVAL;
4860                break;
4861        }
4862
4863        return ret;
4864}
4865
4866static int
4867txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4868{
4869        int ret = 0;
4870        uint32_t ctrl;
4871        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4872
4873        ctrl = rd32(hw, TXGBE_POOLCTL);
4874        ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4875        if (en)
4876                ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4877        wr32(hw, TXGBE_POOLCTL, ctrl);
4878
4879        return ret;
4880}
4881
4882/* Add UDP tunneling port */
4883static int
4884txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4885                              struct rte_eth_udp_tunnel *udp_tunnel)
4886{
4887        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4888        int ret = 0;
4889
4890        if (udp_tunnel == NULL)
4891                return -EINVAL;
4892
4893        switch (udp_tunnel->prot_type) {
4894        case RTE_TUNNEL_TYPE_VXLAN:
4895                if (udp_tunnel->udp_port == 0) {
4896                        PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
4897                        ret = -EINVAL;
4898                        break;
4899                }
4900                wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
4901                wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
4902                break;
4903        case RTE_TUNNEL_TYPE_GENEVE:
4904                if (udp_tunnel->udp_port == 0) {
4905                        PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
4906                        ret = -EINVAL;
4907                        break;
4908                }
4909                wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
4910                break;
4911        case RTE_TUNNEL_TYPE_TEREDO:
4912                if (udp_tunnel->udp_port == 0) {
4913                        PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
4914                        ret = -EINVAL;
4915                        break;
4916                }
4917                wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
4918                break;
4919        default:
4920                PMD_DRV_LOG(ERR, "Invalid tunnel type");
4921                ret = -EINVAL;
4922                break;
4923        }
4924
4925        txgbe_flush(hw);
4926
4927        return ret;
4928}
4929
4930/* Remove UDP tunneling port */
4931static int
4932txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
4933                              struct rte_eth_udp_tunnel *udp_tunnel)
4934{
4935        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4936        int ret = 0;
4937        uint16_t cur_port;
4938
4939        if (udp_tunnel == NULL)
4940                return -EINVAL;
4941
4942        switch (udp_tunnel->prot_type) {
4943        case RTE_TUNNEL_TYPE_VXLAN:
4944                cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
4945                if (cur_port != udp_tunnel->udp_port) {
4946                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
4947                                        udp_tunnel->udp_port);
4948                        ret = -EINVAL;
4949                        break;
4950                }
4951                wr32(hw, TXGBE_VXLANPORT, 0);
4952                wr32(hw, TXGBE_VXLANPORTGPE, 0);
4953                break;
4954        case RTE_TUNNEL_TYPE_GENEVE:
4955                cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
4956                if (cur_port != udp_tunnel->udp_port) {
4957                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
4958                                        udp_tunnel->udp_port);
4959                        ret = -EINVAL;
4960                        break;
4961                }
4962                wr32(hw, TXGBE_GENEVEPORT, 0);
4963                break;
4964        case RTE_TUNNEL_TYPE_TEREDO:
4965                cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
4966                if (cur_port != udp_tunnel->udp_port) {
4967                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
4968                                        udp_tunnel->udp_port);
4969                        ret = -EINVAL;
4970                        break;
4971                }
4972                wr32(hw, TXGBE_TEREDOPORT, 0);
4973                break;
4974        default:
4975                PMD_DRV_LOG(ERR, "Invalid tunnel type");
4976                ret = -EINVAL;
4977                break;
4978        }
4979
4980        txgbe_flush(hw);
4981
4982        return ret;
4983}
4984
4985/* restore n-tuple filter */
4986static inline void
4987txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
4988{
4989        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4990        struct txgbe_5tuple_filter *node;
4991
4992        TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
4993                txgbe_inject_5tuple_filter(dev, node);
4994        }
4995}
4996
4997/* restore ethernet type filter */
4998static inline void
4999txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5000{
5001        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5002        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5003        int i;
5004
5005        for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5006                if (filter_info->ethertype_mask & (1 << i)) {
5007                        wr32(hw, TXGBE_ETFLT(i),
5008                                        filter_info->ethertype_filters[i].etqf);
5009                        wr32(hw, TXGBE_ETCLS(i),
5010                                        filter_info->ethertype_filters[i].etqs);
5011                        txgbe_flush(hw);
5012                }
5013        }
5014}
5015
5016/* restore SYN filter */
5017static inline void
5018txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5019{
5020        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5021        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5022        uint32_t synqf;
5023
5024        synqf = filter_info->syn_info;
5025
5026        if (synqf & TXGBE_SYNCLS_ENA) {
5027                wr32(hw, TXGBE_SYNCLS, synqf);
5028                txgbe_flush(hw);
5029        }
5030}
5031
5032/* restore L2 tunnel filter */
5033static inline void
5034txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5035{
5036        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5037        struct txgbe_l2_tn_filter *node;
5038        struct txgbe_l2_tunnel_conf l2_tn_conf;
5039
5040        TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5041                l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5042                l2_tn_conf.tunnel_id      = node->key.tn_id;
5043                l2_tn_conf.pool           = node->pool;
5044                (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5045        }
5046}
5047
5048/* restore rss filter */
5049static inline void
5050txgbe_rss_filter_restore(struct rte_eth_dev *dev)
5051{
5052        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5053
5054        if (filter_info->rss_info.conf.queue_num)
5055                txgbe_config_rss_filter(dev,
5056                        &filter_info->rss_info, TRUE);
5057}
5058
5059static int
5060txgbe_filter_restore(struct rte_eth_dev *dev)
5061{
5062        txgbe_ntuple_filter_restore(dev);
5063        txgbe_ethertype_filter_restore(dev);
5064        txgbe_syn_filter_restore(dev);
5065        txgbe_fdir_filter_restore(dev);
5066        txgbe_l2_tn_filter_restore(dev);
5067        txgbe_rss_filter_restore(dev);
5068
5069        return 0;
5070}
5071
5072static void
5073txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
5074{
5075        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5076        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5077
5078        if (l2_tn_info->e_tag_en)
5079                (void)txgbe_e_tag_enable(hw);
5080
5081        if (l2_tn_info->e_tag_fwd_en)
5082                (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
5083
5084        (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
5085}
5086
5087/* remove all the n-tuple filters */
5088void
5089txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
5090{
5091        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5092        struct txgbe_5tuple_filter *p_5tuple;
5093
5094        while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
5095                txgbe_remove_5tuple_filter(dev, p_5tuple);
5096}
5097
5098/* remove all the ether type filters */
5099void
5100txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
5101{
5102        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5103        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5104        int i;
5105
5106        for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5107                if (filter_info->ethertype_mask & (1 << i) &&
5108                    !filter_info->ethertype_filters[i].conf) {
5109                        (void)txgbe_ethertype_filter_remove(filter_info,
5110                                                            (uint8_t)i);
5111                        wr32(hw, TXGBE_ETFLT(i), 0);
5112                        wr32(hw, TXGBE_ETCLS(i), 0);
5113                        txgbe_flush(hw);
5114                }
5115        }
5116}
5117
5118/* remove the SYN filter */
5119void
5120txgbe_clear_syn_filter(struct rte_eth_dev *dev)
5121{
5122        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5123        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5124
5125        if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
5126                filter_info->syn_info = 0;
5127
5128                wr32(hw, TXGBE_SYNCLS, 0);
5129                txgbe_flush(hw);
5130        }
5131}
5132
5133/* remove all the L2 tunnel filters */
5134int
5135txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
5136{
5137        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5138        struct txgbe_l2_tn_filter *l2_tn_filter;
5139        struct txgbe_l2_tunnel_conf l2_tn_conf;
5140        int ret = 0;
5141
5142        while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
5143                l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
5144                l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
5145                l2_tn_conf.pool           = l2_tn_filter->pool;
5146                ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
5147                if (ret < 0)
5148                        return ret;
5149        }
5150
5151        return 0;
5152}
5153
5154static const struct eth_dev_ops txgbe_eth_dev_ops = {
5155        .dev_configure              = txgbe_dev_configure,
5156        .dev_infos_get              = txgbe_dev_info_get,
5157        .dev_start                  = txgbe_dev_start,
5158        .dev_stop                   = txgbe_dev_stop,
5159        .dev_set_link_up            = txgbe_dev_set_link_up,
5160        .dev_set_link_down          = txgbe_dev_set_link_down,
5161        .dev_close                  = txgbe_dev_close,
5162        .dev_reset                  = txgbe_dev_reset,
5163        .promiscuous_enable         = txgbe_dev_promiscuous_enable,
5164        .promiscuous_disable        = txgbe_dev_promiscuous_disable,
5165        .allmulticast_enable        = txgbe_dev_allmulticast_enable,
5166        .allmulticast_disable       = txgbe_dev_allmulticast_disable,
5167        .link_update                = txgbe_dev_link_update,
5168        .stats_get                  = txgbe_dev_stats_get,
5169        .xstats_get                 = txgbe_dev_xstats_get,
5170        .xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
5171        .stats_reset                = txgbe_dev_stats_reset,
5172        .xstats_reset               = txgbe_dev_xstats_reset,
5173        .xstats_get_names           = txgbe_dev_xstats_get_names,
5174        .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
5175        .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
5176        .fw_version_get             = txgbe_fw_version_get,
5177        .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
5178        .mtu_set                    = txgbe_dev_mtu_set,
5179        .vlan_filter_set            = txgbe_vlan_filter_set,
5180        .vlan_tpid_set              = txgbe_vlan_tpid_set,
5181        .vlan_offload_set           = txgbe_vlan_offload_set,
5182        .vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
5183        .rx_queue_start             = txgbe_dev_rx_queue_start,
5184        .rx_queue_stop              = txgbe_dev_rx_queue_stop,
5185        .tx_queue_start             = txgbe_dev_tx_queue_start,
5186        .tx_queue_stop              = txgbe_dev_tx_queue_stop,
5187        .rx_queue_setup             = txgbe_dev_rx_queue_setup,
5188        .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
5189        .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
5190        .rx_queue_release           = txgbe_dev_rx_queue_release,
5191        .tx_queue_setup             = txgbe_dev_tx_queue_setup,
5192        .tx_queue_release           = txgbe_dev_tx_queue_release,
5193        .dev_led_on                 = txgbe_dev_led_on,
5194        .dev_led_off                = txgbe_dev_led_off,
5195        .flow_ctrl_get              = txgbe_flow_ctrl_get,
5196        .flow_ctrl_set              = txgbe_flow_ctrl_set,
5197        .priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
5198        .mac_addr_add               = txgbe_add_rar,
5199        .mac_addr_remove            = txgbe_remove_rar,
5200        .mac_addr_set               = txgbe_set_default_mac_addr,
5201        .uc_hash_table_set          = txgbe_uc_hash_table_set,
5202        .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
5203        .set_queue_rate_limit       = txgbe_set_queue_rate_limit,
5204        .reta_update                = txgbe_dev_rss_reta_update,
5205        .reta_query                 = txgbe_dev_rss_reta_query,
5206        .rss_hash_update            = txgbe_dev_rss_hash_update,
5207        .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
5208        .filter_ctrl                = txgbe_dev_filter_ctrl,
5209        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
5210        .rxq_info_get               = txgbe_rxq_info_get,
5211        .txq_info_get               = txgbe_txq_info_get,
5212        .timesync_enable            = txgbe_timesync_enable,
5213        .timesync_disable           = txgbe_timesync_disable,
5214        .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5215        .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5216        .get_reg                    = txgbe_get_regs,
5217        .get_eeprom_length          = txgbe_get_eeprom_length,
5218        .get_eeprom                 = txgbe_get_eeprom,
5219        .set_eeprom                 = txgbe_set_eeprom,
5220        .get_module_info            = txgbe_get_module_info,
5221        .get_module_eeprom          = txgbe_get_module_eeprom,
5222        .get_dcb_info               = txgbe_dev_get_dcb_info,
5223        .timesync_adjust_time       = txgbe_timesync_adjust_time,
5224        .timesync_read_time         = txgbe_timesync_read_time,
5225        .timesync_write_time        = txgbe_timesync_write_time,
5226        .udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
5227        .udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
5228        .tm_ops_get                 = txgbe_tm_ops_get,
5229        .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
5230};
5231
5232RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5233RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5234RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5235
5236RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
5237RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
5238
5239#ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5240        RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
5241#endif
5242#ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5243        RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
5244#endif
5245
5246#ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5247        RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
5248#endif
5249