dpdk/drivers/net/fm10k/fm10k_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2013-2016 Intel Corporation
   3 */
   4
   5#include <rte_ethdev_driver.h>
   6#include <rte_ethdev_pci.h>
   7#include <rte_malloc.h>
   8#include <rte_memzone.h>
   9#include <rte_string_fns.h>
  10#include <rte_dev.h>
  11#include <rte_spinlock.h>
  12#include <rte_kvargs.h>
  13
  14#include "fm10k.h"
  15#include "base/fm10k_api.h"
  16
  17/* Default delay to acquire mailbox lock */
  18#define FM10K_MBXLOCK_DELAY_US 20
  19#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
  20
  21#define MAIN_VSI_POOL_NUMBER 0
  22
  23/* Max try times to acquire switch status */
  24#define MAX_QUERY_SWITCH_STATE_TIMES 10
  25/* Wait interval to get switch status */
  26#define WAIT_SWITCH_MSG_US    100000
  27/* A period of quiescence for switch */
  28#define FM10K_SWITCH_QUIESCE_US 100000
  29/* Number of chars per uint32 type */
  30#define CHARS_PER_UINT32 (sizeof(uint32_t))
  31#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
  32
  33/* default 1:1 map from queue ID to interrupt vector ID */
  34#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
  35
  36/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
  37#define MAX_LPORT_NUM    128
  38#define GLORT_FD_Q_BASE  0x40
  39#define GLORT_PF_MASK    0xFFC0
  40#define GLORT_FD_MASK    GLORT_PF_MASK
  41#define GLORT_FD_INDEX   GLORT_FD_Q_BASE
  42
  43static void fm10k_close_mbx_service(struct fm10k_hw *hw);
  44static int fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
  45static int fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
  46static int fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
  47static int fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
  48static inline int fm10k_glort_valid(struct fm10k_hw *hw);
  49static int
  50fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
  51static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
  52        const u8 *mac, bool add, uint32_t pool);
  53static void fm10k_tx_queue_release(void *queue);
  54static void fm10k_rx_queue_release(void *queue);
  55static void fm10k_set_rx_function(struct rte_eth_dev *dev);
  56static void fm10k_set_tx_function(struct rte_eth_dev *dev);
  57static int fm10k_check_ftag(struct rte_devargs *devargs);
  58static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
  59
  60static int fm10k_dev_infos_get(struct rte_eth_dev *dev,
  61                               struct rte_eth_dev_info *dev_info);
  62static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
  63static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
  64static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
  65static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
  66
  67struct fm10k_xstats_name_off {
  68        char name[RTE_ETH_XSTATS_NAME_SIZE];
  69        unsigned offset;
  70};
  71
  72static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
  73        {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
  74        {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
  75        {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
  76        {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
  77        {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
  78        {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
  79        {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
  80        {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
  81                nodesc_drop)},
  82};
  83
  84#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
  85                sizeof(fm10k_hw_stats_strings[0]))
  86
  87static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
  88        {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
  89        {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
  90        {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
  91};
  92
  93#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
  94                sizeof(fm10k_hw_stats_rx_q_strings[0]))
  95
  96static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
  97        {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
  98        {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
  99};
 100
 101#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
 102                sizeof(fm10k_hw_stats_tx_q_strings[0]))
 103
 104#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
 105                (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
 106static int
 107fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 108
 109static void
 110fm10k_mbx_initlock(struct fm10k_hw *hw)
 111{
 112        rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
 113}
 114
 115static void
 116fm10k_mbx_lock(struct fm10k_hw *hw)
 117{
 118        while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
 119                rte_delay_us(FM10K_MBXLOCK_DELAY_US);
 120}
 121
 122static void
 123fm10k_mbx_unlock(struct fm10k_hw *hw)
 124{
 125        rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
 126}
 127
 128/* Stubs needed for linkage when vPMD is disabled */
 129__rte_weak int
 130fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
 131{
 132        return -1;
 133}
 134
 135__rte_weak uint16_t
 136fm10k_recv_pkts_vec(
 137        __rte_unused void *rx_queue,
 138        __rte_unused struct rte_mbuf **rx_pkts,
 139        __rte_unused uint16_t nb_pkts)
 140{
 141        return 0;
 142}
 143
 144__rte_weak uint16_t
 145fm10k_recv_scattered_pkts_vec(
 146                __rte_unused void *rx_queue,
 147                __rte_unused struct rte_mbuf **rx_pkts,
 148                __rte_unused uint16_t nb_pkts)
 149{
 150        return 0;
 151}
 152
 153__rte_weak int
 154fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
 155
 156{
 157        return -1;
 158}
 159
 160__rte_weak void
 161fm10k_rx_queue_release_mbufs_vec(
 162                __rte_unused struct fm10k_rx_queue *rxq)
 163{
 164        return;
 165}
 166
 167__rte_weak void
 168fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
 169{
 170        return;
 171}
 172
 173__rte_weak int
 174fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
 175{
 176        return -1;
 177}
 178
 179__rte_weak uint16_t
 180fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
 181                           __rte_unused struct rte_mbuf **tx_pkts,
 182                           __rte_unused uint16_t nb_pkts)
 183{
 184        return 0;
 185}
 186
 187/*
 188 * reset queue to initial state, allocate software buffers used when starting
 189 * device.
 190 * return 0 on success
 191 * return -ENOMEM if buffers cannot be allocated
 192 * return -EINVAL if buffers do not satisfy alignment condition
 193 */
 194static inline int
 195rx_queue_reset(struct fm10k_rx_queue *q)
 196{
 197        static const union fm10k_rx_desc zero = {{0} };
 198        uint64_t dma_addr;
 199        int i, diag;
 200        PMD_INIT_FUNC_TRACE();
 201
 202        diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
 203        if (diag != 0)
 204                return -ENOMEM;
 205
 206        for (i = 0; i < q->nb_desc; ++i) {
 207                fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
 208                if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
 209                        rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
 210                                                q->nb_desc);
 211                        return -EINVAL;
 212                }
 213                dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
 214                q->hw_ring[i].q.pkt_addr = dma_addr;
 215                q->hw_ring[i].q.hdr_addr = dma_addr;
 216        }
 217
 218        /* initialize extra software ring entries. Space for these extra
 219         * entries is always allocated.
 220         */
 221        memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
 222        for (i = 0; i < q->nb_fake_desc; ++i) {
 223                q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
 224                q->hw_ring[q->nb_desc + i] = zero;
 225        }
 226
 227        q->next_dd = 0;
 228        q->next_alloc = 0;
 229        q->next_trigger = q->alloc_thresh - 1;
 230        FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
 231        q->rxrearm_start = 0;
 232        q->rxrearm_nb = 0;
 233
 234        return 0;
 235}
 236
 237/*
 238 * clean queue, descriptor rings, free software buffers used when stopping
 239 * device.
 240 */
 241static inline void
 242rx_queue_clean(struct fm10k_rx_queue *q)
 243{
 244        union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
 245        uint32_t i;
 246        PMD_INIT_FUNC_TRACE();
 247
 248        /* zero descriptor rings */
 249        for (i = 0; i < q->nb_desc; ++i)
 250                q->hw_ring[i] = zero;
 251
 252        /* zero faked descriptors */
 253        for (i = 0; i < q->nb_fake_desc; ++i)
 254                q->hw_ring[q->nb_desc + i] = zero;
 255
 256        /* vPMD driver has a different way of releasing mbufs. */
 257        if (q->rx_using_sse) {
 258                fm10k_rx_queue_release_mbufs_vec(q);
 259                return;
 260        }
 261
 262        /* free software buffers */
 263        for (i = 0; i < q->nb_desc; ++i) {
 264                if (q->sw_ring[i]) {
 265                        rte_pktmbuf_free_seg(q->sw_ring[i]);
 266                        q->sw_ring[i] = NULL;
 267                }
 268        }
 269}
 270
 271/*
 272 * free all queue memory used when releasing the queue (i.e. configure)
 273 */
 274static inline void
 275rx_queue_free(struct fm10k_rx_queue *q)
 276{
 277        PMD_INIT_FUNC_TRACE();
 278        if (q) {
 279                PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
 280                rx_queue_clean(q);
 281                if (q->sw_ring) {
 282                        rte_free(q->sw_ring);
 283                        q->sw_ring = NULL;
 284                }
 285                rte_free(q);
 286                q = NULL;
 287        }
 288}
 289
 290/*
 291 * disable RX queue, wait unitl HW finished necessary flush operation
 292 */
 293static inline int
 294rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
 295{
 296        uint32_t reg, i;
 297
 298        reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
 299        FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
 300                        reg & ~FM10K_RXQCTL_ENABLE);
 301
 302        /* Wait 100us at most */
 303        for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
 304                rte_delay_us(1);
 305                reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
 306                if (!(reg & FM10K_RXQCTL_ENABLE))
 307                        break;
 308        }
 309
 310        if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
 311                return -1;
 312
 313        return 0;
 314}
 315
 316/*
 317 * reset queue to initial state, allocate software buffers used when starting
 318 * device
 319 */
 320static inline void
 321tx_queue_reset(struct fm10k_tx_queue *q)
 322{
 323        PMD_INIT_FUNC_TRACE();
 324        q->last_free = 0;
 325        q->next_free = 0;
 326        q->nb_used = 0;
 327        q->nb_free = q->nb_desc - 1;
 328        fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
 329        FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
 330}
 331
 332/*
 333 * clean queue, descriptor rings, free software buffers used when stopping
 334 * device
 335 */
 336static inline void
 337tx_queue_clean(struct fm10k_tx_queue *q)
 338{
 339        struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
 340        uint32_t i;
 341        PMD_INIT_FUNC_TRACE();
 342
 343        /* zero descriptor rings */
 344        for (i = 0; i < q->nb_desc; ++i)
 345                q->hw_ring[i] = zero;
 346
 347        /* free software buffers */
 348        for (i = 0; i < q->nb_desc; ++i) {
 349                if (q->sw_ring[i]) {
 350                        rte_pktmbuf_free_seg(q->sw_ring[i]);
 351                        q->sw_ring[i] = NULL;
 352                }
 353        }
 354}
 355
 356/*
 357 * free all queue memory used when releasing the queue (i.e. configure)
 358 */
 359static inline void
 360tx_queue_free(struct fm10k_tx_queue *q)
 361{
 362        PMD_INIT_FUNC_TRACE();
 363        if (q) {
 364                PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
 365                tx_queue_clean(q);
 366                if (q->rs_tracker.list) {
 367                        rte_free(q->rs_tracker.list);
 368                        q->rs_tracker.list = NULL;
 369                }
 370                if (q->sw_ring) {
 371                        rte_free(q->sw_ring);
 372                        q->sw_ring = NULL;
 373                }
 374                rte_free(q);
 375                q = NULL;
 376        }
 377}
 378
 379/*
 380 * disable TX queue, wait unitl HW finished necessary flush operation
 381 */
 382static inline int
 383tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
 384{
 385        uint32_t reg, i;
 386
 387        reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
 388        FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
 389                        reg & ~FM10K_TXDCTL_ENABLE);
 390
 391        /* Wait 100us at most */
 392        for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
 393                rte_delay_us(1);
 394                reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
 395                if (!(reg & FM10K_TXDCTL_ENABLE))
 396                        break;
 397        }
 398
 399        if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
 400                return -1;
 401
 402        return 0;
 403}
 404
 405static int
 406fm10k_check_mq_mode(struct rte_eth_dev *dev)
 407{
 408        enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
 409        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 410        struct rte_eth_vmdq_rx_conf *vmdq_conf;
 411        uint16_t nb_rx_q = dev->data->nb_rx_queues;
 412
 413        vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 414
 415        if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
 416                PMD_INIT_LOG(ERR, "DCB mode is not supported.");
 417                return -EINVAL;
 418        }
 419
 420        if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
 421                return 0;
 422
 423        if (hw->mac.type == fm10k_mac_vf) {
 424                PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
 425                return -EINVAL;
 426        }
 427
 428        /* Check VMDQ queue pool number */
 429        if (vmdq_conf->nb_queue_pools >
 430                        sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
 431                        vmdq_conf->nb_queue_pools > nb_rx_q) {
 432                PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
 433                        vmdq_conf->nb_queue_pools);
 434                return -EINVAL;
 435        }
 436
 437        return 0;
 438}
 439
 440static const struct fm10k_txq_ops def_txq_ops = {
 441        .reset = tx_queue_reset,
 442};
 443
 444static int
 445fm10k_dev_configure(struct rte_eth_dev *dev)
 446{
 447        int ret;
 448
 449        PMD_INIT_FUNC_TRACE();
 450
 451        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
 452                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 453
 454        /* multipe queue mode checking */
 455        ret  = fm10k_check_mq_mode(dev);
 456        if (ret != 0) {
 457                PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
 458                            ret);
 459                return ret;
 460        }
 461
 462        dev->data->scattered_rx = 0;
 463
 464        return 0;
 465}
 466
 467static void
 468fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
 469{
 470        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 471        struct rte_eth_vmdq_rx_conf *vmdq_conf;
 472        uint32_t i;
 473
 474        vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 475
 476        for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
 477                if (!vmdq_conf->pool_map[i].pools)
 478                        continue;
 479                fm10k_mbx_lock(hw);
 480                fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
 481                fm10k_mbx_unlock(hw);
 482        }
 483}
 484
 485static void
 486fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
 487{
 488        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 489
 490        /* Add default mac address */
 491        fm10k_MAC_filter_set(dev, hw->mac.addr, true,
 492                MAIN_VSI_POOL_NUMBER);
 493}
 494
 495static void
 496fm10k_dev_rss_configure(struct rte_eth_dev *dev)
 497{
 498        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 499        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 500        uint32_t mrqc, *key, i, reta, j;
 501        uint64_t hf;
 502
 503#define RSS_KEY_SIZE 40
 504        static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
 505                0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
 506                0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
 507                0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
 508                0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
 509                0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 510        };
 511
 512        if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
 513                dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
 514                FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
 515                return;
 516        }
 517
 518        /* random key is rss_intel_key (default) or user provided (rss_key) */
 519        if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
 520                key = (uint32_t *)rss_intel_key;
 521        else
 522                key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
 523
 524        /* Now fill our hash function seeds, 4 bytes at a time */
 525        for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
 526                FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
 527
 528        /*
 529         * Fill in redirection table
 530         * The byte-swap is needed because NIC registers are in
 531         * little-endian order.
 532         */
 533        reta = 0;
 534        for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
 535                if (j == dev->data->nb_rx_queues)
 536                        j = 0;
 537                reta = (reta << CHAR_BIT) | j;
 538                if ((i & 3) == 3)
 539                        FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
 540                                        rte_bswap32(reta));
 541        }
 542
 543        /*
 544         * Generate RSS hash based on packet types, TCP/UDP
 545         * port numbers and/or IPv4/v6 src and dst addresses
 546         */
 547        hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
 548        mrqc = 0;
 549        mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
 550        mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
 551        mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
 552        mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
 553        mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
 554        mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
 555        mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
 556        mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
 557        mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
 558
 559        if (mrqc == 0) {
 560                PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
 561                        "supported", hf);
 562                return;
 563        }
 564
 565        FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
 566}
 567
 568static void
 569fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
 570{
 571        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 572        uint32_t i;
 573
 574        for (i = 0; i < nb_lport_new; i++) {
 575                /* Set unicast mode by default. App can change
 576                 * to other mode in other API func.
 577                 */
 578                fm10k_mbx_lock(hw);
 579                hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
 580                        FM10K_XCAST_MODE_NONE);
 581                fm10k_mbx_unlock(hw);
 582        }
 583}
 584
 585static void
 586fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 587{
 588        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 589        struct rte_eth_vmdq_rx_conf *vmdq_conf;
 590        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 591        struct fm10k_macvlan_filter_info *macvlan;
 592        uint16_t nb_queue_pools = 0; /* pool number in configuration */
 593        uint16_t nb_lport_new;
 594
 595        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
 596        vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 597
 598        fm10k_dev_rss_configure(dev);
 599
 600        /* only PF supports VMDQ */
 601        if (hw->mac.type != fm10k_mac_pf)
 602                return;
 603
 604        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
 605                nb_queue_pools = vmdq_conf->nb_queue_pools;
 606
 607        /* no pool number change, no need to update logic port and VLAN/MAC */
 608        if (macvlan->nb_queue_pools == nb_queue_pools)
 609                return;
 610
 611        nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
 612        fm10k_dev_logic_port_update(dev, nb_lport_new);
 613
 614        /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
 615        memset(dev->data->mac_addrs, 0,
 616                RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
 617        rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
 618                &dev->data->mac_addrs[0]);
 619        memset(macvlan, 0, sizeof(*macvlan));
 620        macvlan->nb_queue_pools = nb_queue_pools;
 621
 622        if (nb_queue_pools)
 623                fm10k_dev_vmdq_rx_configure(dev);
 624        else
 625                fm10k_dev_pf_main_vsi_reset(dev);
 626}
 627
 628static int
 629fm10k_dev_tx_init(struct rte_eth_dev *dev)
 630{
 631        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 632        int i, ret;
 633        struct fm10k_tx_queue *txq;
 634        uint64_t base_addr;
 635        uint32_t size;
 636
 637        /* Disable TXINT to avoid possible interrupt */
 638        for (i = 0; i < hw->mac.max_queues; i++)
 639                FM10K_WRITE_REG(hw, FM10K_TXINT(i),
 640                                3 << FM10K_TXINT_TIMER_SHIFT);
 641
 642        /* Setup TX queue */
 643        for (i = 0; i < dev->data->nb_tx_queues; ++i) {
 644                txq = dev->data->tx_queues[i];
 645                base_addr = txq->hw_ring_phys_addr;
 646                size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
 647
 648                /* disable queue to avoid issues while updating state */
 649                ret = tx_queue_disable(hw, i);
 650                if (ret) {
 651                        PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
 652                        return -1;
 653                }
 654                /* Enable use of FTAG bit in TX descriptor, PFVTCTL
 655                 * register is read-only for VF.
 656                 */
 657                if (fm10k_check_ftag(dev->device->devargs)) {
 658                        if (hw->mac.type == fm10k_mac_pf) {
 659                                FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
 660                                                FM10K_PFVTCTL_FTAG_DESC_ENABLE);
 661                                PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
 662                        } else {
 663                                PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
 664                                return -ENOTSUP;
 665                        }
 666                }
 667
 668                /* set location and size for descriptor ring */
 669                FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
 670                                base_addr & UINT64_LOWER_32BITS_MASK);
 671                FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
 672                                base_addr >> (CHAR_BIT * sizeof(uint32_t)));
 673                FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
 674
 675                /* assign default SGLORT for each TX queue by PF */
 676                if (hw->mac.type == fm10k_mac_pf)
 677                        FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
 678        }
 679
 680        /* set up vector or scalar TX function as appropriate */
 681        fm10k_set_tx_function(dev);
 682
 683        return 0;
 684}
 685
 686static int
 687fm10k_dev_rx_init(struct rte_eth_dev *dev)
 688{
 689        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 690        struct fm10k_macvlan_filter_info *macvlan;
 691        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
 692        struct rte_intr_handle *intr_handle = &pdev->intr_handle;
 693        int i, ret;
 694        struct fm10k_rx_queue *rxq;
 695        uint64_t base_addr;
 696        uint32_t size;
 697        uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
 698        uint32_t logic_port = hw->mac.dglort_map;
 699        uint16_t buf_size;
 700        uint16_t queue_stride = 0;
 701
 702        /* enable RXINT for interrupt mode */
 703        i = 0;
 704        if (rte_intr_dp_is_en(intr_handle)) {
 705                for (; i < dev->data->nb_rx_queues; i++) {
 706                        FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
 707                        if (hw->mac.type == fm10k_mac_pf)
 708                                FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
 709                                        FM10K_ITR_AUTOMASK |
 710                                        FM10K_ITR_MASK_CLEAR);
 711                        else
 712                                FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
 713                                        FM10K_ITR_AUTOMASK |
 714                                        FM10K_ITR_MASK_CLEAR);
 715                }
 716        }
 717        /* Disable other RXINT to avoid possible interrupt */
 718        for (; i < hw->mac.max_queues; i++)
 719                FM10K_WRITE_REG(hw, FM10K_RXINT(i),
 720                        3 << FM10K_RXINT_TIMER_SHIFT);
 721
 722        /* Setup RX queues */
 723        for (i = 0; i < dev->data->nb_rx_queues; ++i) {
 724                rxq = dev->data->rx_queues[i];
 725                base_addr = rxq->hw_ring_phys_addr;
 726                size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
 727
 728                /* disable queue to avoid issues while updating state */
 729                ret = rx_queue_disable(hw, i);
 730                if (ret) {
 731                        PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
 732                        return -1;
 733                }
 734
 735                /* Setup the Base and Length of the Rx Descriptor Ring */
 736                FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
 737                                base_addr & UINT64_LOWER_32BITS_MASK);
 738                FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
 739                                base_addr >> (CHAR_BIT * sizeof(uint32_t)));
 740                FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
 741
 742                /* Configure the Rx buffer size for one buff without split */
 743                buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
 744                        RTE_PKTMBUF_HEADROOM);
 745                /* As RX buffer is aligned to 512B within mbuf, some bytes are
 746                 * reserved for this purpose, and the worst case could be 511B.
 747                 * But SRR reg assumes all buffers have the same size. In order
 748                 * to fill the gap, we'll have to consider the worst case and
 749                 * assume 512B is reserved. If we don't do so, it's possible
 750                 * for HW to overwrite data to next mbuf.
 751                 */
 752                buf_size -= FM10K_RX_DATABUF_ALIGN;
 753
 754                FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
 755                                (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
 756                                FM10K_SRRCTL_LOOPBACK_SUPPRESS);
 757
 758                /* It adds dual VLAN length for supporting dual VLAN */
 759                if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 760                                2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
 761                        rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
 762                        uint32_t reg;
 763                        dev->data->scattered_rx = 1;
 764                        reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
 765                        reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
 766                        FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
 767                }
 768
 769                /* Enable drop on empty, it's RO for VF */
 770                if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
 771                        rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
 772
 773                FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
 774                FM10K_WRITE_FLUSH(hw);
 775        }
 776
 777        /* Configure VMDQ/RSS if applicable */
 778        fm10k_dev_mq_rx_configure(dev);
 779
 780        /* Decide the best RX function */
 781        fm10k_set_rx_function(dev);
 782
 783        /* update RX_SGLORT for loopback suppress*/
 784        if (hw->mac.type != fm10k_mac_pf)
 785                return 0;
 786        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
 787        if (macvlan->nb_queue_pools)
 788                queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
 789        for (i = 0; i < dev->data->nb_rx_queues; ++i) {
 790                if (i && queue_stride && !(i % queue_stride))
 791                        logic_port++;
 792                FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
 793        }
 794
 795        return 0;
 796}
 797
 798static int
 799fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 800{
 801        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 802        int err;
 803        uint32_t reg;
 804        struct fm10k_rx_queue *rxq;
 805
 806        PMD_INIT_FUNC_TRACE();
 807
 808        rxq = dev->data->rx_queues[rx_queue_id];
 809        err = rx_queue_reset(rxq);
 810        if (err == -ENOMEM) {
 811                PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
 812                return err;
 813        } else if (err == -EINVAL) {
 814                PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
 815                        " %d", err);
 816                return err;
 817        }
 818
 819        /* Setup the HW Rx Head and Tail Descriptor Pointers
 820         * Note: this must be done AFTER the queue is enabled on real
 821         * hardware, but BEFORE the queue is enabled when using the
 822         * emulation platform. Do it in both places for now and remove
 823         * this comment and the following two register writes when the
 824         * emulation platform is no longer being used.
 825         */
 826        FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
 827        FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
 828
 829        /* Set PF ownership flag for PF devices */
 830        reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
 831        if (hw->mac.type == fm10k_mac_pf)
 832                reg |= FM10K_RXQCTL_PF;
 833        reg |= FM10K_RXQCTL_ENABLE;
 834        /* enable RX queue */
 835        FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
 836        FM10K_WRITE_FLUSH(hw);
 837
 838        /* Setup the HW Rx Head and Tail Descriptor Pointers
 839         * Note: this must be done AFTER the queue is enabled
 840         */
 841        FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
 842        FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
 843        dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 844
 845        return 0;
 846}
 847
 848static int
 849fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 850{
 851        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 852
 853        PMD_INIT_FUNC_TRACE();
 854
 855        /* Disable RX queue */
 856        rx_queue_disable(hw, rx_queue_id);
 857
 858        /* Free mbuf and clean HW ring */
 859        rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
 860        dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 861
 862        return 0;
 863}
 864
 865static int
 866fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 867{
 868        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 869        /** @todo - this should be defined in the shared code */
 870#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
 871        uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
 872        struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
 873
 874        PMD_INIT_FUNC_TRACE();
 875
 876        q->ops->reset(q);
 877
 878        /* reset head and tail pointers */
 879        FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
 880        FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
 881
 882        /* enable TX queue */
 883        FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
 884                                FM10K_TXDCTL_ENABLE | txdctl);
 885        FM10K_WRITE_FLUSH(hw);
 886        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 887
 888        return 0;
 889}
 890
 891static int
 892fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 893{
 894        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 895
 896        PMD_INIT_FUNC_TRACE();
 897
 898        tx_queue_disable(hw, tx_queue_id);
 899        tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
 900        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 901
 902        return 0;
 903}
 904
 905static inline int fm10k_glort_valid(struct fm10k_hw *hw)
 906{
 907        return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
 908                != FM10K_DGLORTMAP_NONE);
 909}
 910
 911static int
 912fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
 913{
 914        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 915        int status;
 916
 917        PMD_INIT_FUNC_TRACE();
 918
 919        /* Return if it didn't acquire valid glort range */
 920        if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
 921                return 0;
 922
 923        fm10k_mbx_lock(hw);
 924        status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
 925                                FM10K_XCAST_MODE_PROMISC);
 926        fm10k_mbx_unlock(hw);
 927
 928        if (status != FM10K_SUCCESS) {
 929                PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
 930                return -EAGAIN;
 931        }
 932
 933        return 0;
 934}
 935
 936static int
 937fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
 938{
 939        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 940        uint8_t mode;
 941        int status;
 942
 943        PMD_INIT_FUNC_TRACE();
 944
 945        /* Return if it didn't acquire valid glort range */
 946        if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
 947                return 0;
 948
 949        if (dev->data->all_multicast == 1)
 950                mode = FM10K_XCAST_MODE_ALLMULTI;
 951        else
 952                mode = FM10K_XCAST_MODE_NONE;
 953
 954        fm10k_mbx_lock(hw);
 955        status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
 956                                mode);
 957        fm10k_mbx_unlock(hw);
 958
 959        if (status != FM10K_SUCCESS) {
 960                PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
 961                return -EAGAIN;
 962        }
 963
 964        return 0;
 965}
 966
 967static int
 968fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
 969{
 970        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 971        int status;
 972
 973        PMD_INIT_FUNC_TRACE();
 974
 975        /* Return if it didn't acquire valid glort range */
 976        if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
 977                return 0;
 978
 979        /* If promiscuous mode is enabled, it doesn't make sense to enable
 980         * allmulticast and disable promiscuous since fm10k only can select
 981         * one of the modes.
 982         */
 983        if (dev->data->promiscuous) {
 984                PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
 985                        "needn't enable allmulticast");
 986                return 0;
 987        }
 988
 989        fm10k_mbx_lock(hw);
 990        status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
 991                                FM10K_XCAST_MODE_ALLMULTI);
 992        fm10k_mbx_unlock(hw);
 993
 994        if (status != FM10K_SUCCESS) {
 995                PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
 996                return -EAGAIN;
 997        }
 998
 999        return 0;
1000}
1001
1002static int
1003fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1004{
1005        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1006        int status;
1007
1008        PMD_INIT_FUNC_TRACE();
1009
1010        /* Return if it didn't acquire valid glort range */
1011        if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1012                return 0;
1013
1014        if (dev->data->promiscuous) {
1015                PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1016                        "since promisc mode is enabled");
1017                return -EINVAL;
1018        }
1019
1020        fm10k_mbx_lock(hw);
1021        /* Change mode to unicast mode */
1022        status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1023                                FM10K_XCAST_MODE_NONE);
1024        fm10k_mbx_unlock(hw);
1025
1026        if (status != FM10K_SUCCESS) {
1027                PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1028                return -EAGAIN;
1029        }
1030
1031        return 0;
1032}
1033
1034static void
1035fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1036{
1037        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1038        uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1039        uint16_t nb_queue_pools;
1040        struct fm10k_macvlan_filter_info *macvlan;
1041
1042        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1043        nb_queue_pools = macvlan->nb_queue_pools;
1044        pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
1045        rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
1046
1047        /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1048        dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1049        dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1050                        hw->mac.dglort_map;
1051        FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1052        /* Configure VMDQ/RSS DGlort Decoder */
1053        FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1054
1055        /* Flow Director configurations, only queue number is valid. */
1056        dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
1057        dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1058                        (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1059        FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1060        FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1061
1062        /* Invalidate all other GLORT entries */
1063        for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1064                FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1065                                FM10K_DGLORTMAP_NONE);
1066}
1067
1068#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1069static int
1070fm10k_dev_start(struct rte_eth_dev *dev)
1071{
1072        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1073        int i, diag;
1074
1075        PMD_INIT_FUNC_TRACE();
1076
1077        /* stop, init, then start the hw */
1078        diag = fm10k_stop_hw(hw);
1079        if (diag != FM10K_SUCCESS) {
1080                PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1081                return -EIO;
1082        }
1083
1084        diag = fm10k_init_hw(hw);
1085        if (diag != FM10K_SUCCESS) {
1086                PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1087                return -EIO;
1088        }
1089
1090        diag = fm10k_start_hw(hw);
1091        if (diag != FM10K_SUCCESS) {
1092                PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1093                return -EIO;
1094        }
1095
1096        diag = fm10k_dev_tx_init(dev);
1097        if (diag) {
1098                PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1099                return diag;
1100        }
1101
1102        if (fm10k_dev_rxq_interrupt_setup(dev))
1103                return -EIO;
1104
1105        diag = fm10k_dev_rx_init(dev);
1106        if (diag) {
1107                PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1108                return diag;
1109        }
1110
1111        if (hw->mac.type == fm10k_mac_pf)
1112                fm10k_dev_dglort_map_configure(dev);
1113
1114        for (i = 0; i < dev->data->nb_rx_queues; i++) {
1115                struct fm10k_rx_queue *rxq;
1116                rxq = dev->data->rx_queues[i];
1117
1118                if (rxq->rx_deferred_start)
1119                        continue;
1120                diag = fm10k_dev_rx_queue_start(dev, i);
1121                if (diag != 0) {
1122                        int j;
1123                        for (j = 0; j < i; ++j)
1124                                rx_queue_clean(dev->data->rx_queues[j]);
1125                        return diag;
1126                }
1127        }
1128
1129        for (i = 0; i < dev->data->nb_tx_queues; i++) {
1130                struct fm10k_tx_queue *txq;
1131                txq = dev->data->tx_queues[i];
1132
1133                if (txq->tx_deferred_start)
1134                        continue;
1135                diag = fm10k_dev_tx_queue_start(dev, i);
1136                if (diag != 0) {
1137                        int j;
1138                        for (j = 0; j < i; ++j)
1139                                tx_queue_clean(dev->data->tx_queues[j]);
1140                        for (j = 0; j < dev->data->nb_rx_queues; ++j)
1141                                rx_queue_clean(dev->data->rx_queues[j]);
1142                        return diag;
1143                }
1144        }
1145
1146        /* Update default vlan when not in VMDQ mode */
1147        if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1148                fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1149
1150        fm10k_link_update(dev, 0);
1151
1152        return 0;
1153}
1154
1155static void
1156fm10k_dev_stop(struct rte_eth_dev *dev)
1157{
1158        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1159        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1160        struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1161        int i;
1162
1163        PMD_INIT_FUNC_TRACE();
1164
1165        if (dev->data->tx_queues)
1166                for (i = 0; i < dev->data->nb_tx_queues; i++)
1167                        fm10k_dev_tx_queue_stop(dev, i);
1168
1169        if (dev->data->rx_queues)
1170                for (i = 0; i < dev->data->nb_rx_queues; i++)
1171                        fm10k_dev_rx_queue_stop(dev, i);
1172
1173        /* Disable datapath event */
1174        if (rte_intr_dp_is_en(intr_handle)) {
1175                for (i = 0; i < dev->data->nb_rx_queues; i++) {
1176                        FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1177                                3 << FM10K_RXINT_TIMER_SHIFT);
1178                        if (hw->mac.type == fm10k_mac_pf)
1179                                FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1180                                        FM10K_ITR_MASK_SET);
1181                        else
1182                                FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1183                                        FM10K_ITR_MASK_SET);
1184                }
1185        }
1186        /* Clean datapath event and queue/vec mapping */
1187        rte_intr_efd_disable(intr_handle);
1188        rte_free(intr_handle->intr_vec);
1189        intr_handle->intr_vec = NULL;
1190}
1191
1192static void
1193fm10k_dev_queue_release(struct rte_eth_dev *dev)
1194{
1195        int i;
1196
1197        PMD_INIT_FUNC_TRACE();
1198
1199        if (dev->data->tx_queues) {
1200                for (i = 0; i < dev->data->nb_tx_queues; i++) {
1201                        struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1202
1203                        tx_queue_free(txq);
1204                }
1205        }
1206
1207        if (dev->data->rx_queues) {
1208                for (i = 0; i < dev->data->nb_rx_queues; i++)
1209                        fm10k_rx_queue_release(dev->data->rx_queues[i]);
1210        }
1211}
1212
1213static int
1214fm10k_link_update(struct rte_eth_dev *dev,
1215        __rte_unused int wait_to_complete)
1216{
1217        struct fm10k_dev_info *dev_info =
1218                FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1219        PMD_INIT_FUNC_TRACE();
1220
1221        dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
1222        dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1223        dev->data->dev_link.link_status =
1224                dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1225        dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
1226
1227        return 0;
1228}
1229
1230static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1231        struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1232{
1233        unsigned i, q;
1234        unsigned count = 0;
1235
1236        if (xstats_names != NULL) {
1237                /* Note: limit checked in rte_eth_xstats_names() */
1238
1239                /* Global stats */
1240                for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1241                        snprintf(xstats_names[count].name,
1242                                sizeof(xstats_names[count].name),
1243                                "%s", fm10k_hw_stats_strings[count].name);
1244                        count++;
1245                }
1246
1247                /* PF queue stats */
1248                for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1249                        for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1250                                snprintf(xstats_names[count].name,
1251                                        sizeof(xstats_names[count].name),
1252                                        "rx_q%u_%s", q,
1253                                        fm10k_hw_stats_rx_q_strings[i].name);
1254                                count++;
1255                        }
1256                        for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1257                                snprintf(xstats_names[count].name,
1258                                        sizeof(xstats_names[count].name),
1259                                        "tx_q%u_%s", q,
1260                                        fm10k_hw_stats_tx_q_strings[i].name);
1261                                count++;
1262                        }
1263                }
1264        }
1265        return FM10K_NB_XSTATS;
1266}
1267
1268static int
1269fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1270                 unsigned n)
1271{
1272        struct fm10k_hw_stats *hw_stats =
1273                FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1274        unsigned i, q, count = 0;
1275
1276        if (n < FM10K_NB_XSTATS)
1277                return FM10K_NB_XSTATS;
1278
1279        /* Global stats */
1280        for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1281                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1282                        fm10k_hw_stats_strings[count].offset);
1283                xstats[count].id = count;
1284                count++;
1285        }
1286
1287        /* PF queue stats */
1288        for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1289                for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1290                        xstats[count].value =
1291                                *(uint64_t *)(((char *)&hw_stats->q[q]) +
1292                                fm10k_hw_stats_rx_q_strings[i].offset);
1293                        xstats[count].id = count;
1294                        count++;
1295                }
1296                for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1297                        xstats[count].value =
1298                                *(uint64_t *)(((char *)&hw_stats->q[q]) +
1299                                fm10k_hw_stats_tx_q_strings[i].offset);
1300                        xstats[count].id = count;
1301                        count++;
1302                }
1303        }
1304
1305        return FM10K_NB_XSTATS;
1306}
1307
1308static int
1309fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1310{
1311        uint64_t ipackets, opackets, ibytes, obytes, imissed;
1312        struct fm10k_hw *hw =
1313                FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1314        struct fm10k_hw_stats *hw_stats =
1315                FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1316        int i;
1317
1318        PMD_INIT_FUNC_TRACE();
1319
1320        fm10k_update_hw_stats(hw, hw_stats);
1321
1322        ipackets = opackets = ibytes = obytes = imissed = 0;
1323        for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1324                (i < hw->mac.max_queues); ++i) {
1325                stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1326                stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1327                stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1328                stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1329                stats->q_errors[i]   = hw_stats->q[i].rx_drops.count;
1330                ipackets += stats->q_ipackets[i];
1331                opackets += stats->q_opackets[i];
1332                ibytes   += stats->q_ibytes[i];
1333                obytes   += stats->q_obytes[i];
1334                imissed  += stats->q_errors[i];
1335        }
1336        stats->ipackets = ipackets;
1337        stats->opackets = opackets;
1338        stats->ibytes = ibytes;
1339        stats->obytes = obytes;
1340        stats->imissed = imissed;
1341        return 0;
1342}
1343
1344static int
1345fm10k_stats_reset(struct rte_eth_dev *dev)
1346{
1347        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1348        struct fm10k_hw_stats *hw_stats =
1349                FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1350
1351        PMD_INIT_FUNC_TRACE();
1352
1353        memset(hw_stats, 0, sizeof(*hw_stats));
1354        fm10k_rebind_hw_stats(hw, hw_stats);
1355
1356        return 0;
1357}
1358
1359static int
1360fm10k_dev_infos_get(struct rte_eth_dev *dev,
1361        struct rte_eth_dev_info *dev_info)
1362{
1363        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1364        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1365
1366        PMD_INIT_FUNC_TRACE();
1367
1368        dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1369        dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1370        dev_info->max_rx_queues      = hw->mac.max_queues;
1371        dev_info->max_tx_queues      = hw->mac.max_queues;
1372        dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1373        dev_info->max_hash_mac_addrs = 0;
1374        dev_info->max_vfs            = pdev->max_vfs;
1375        dev_info->vmdq_pool_base     = 0;
1376        dev_info->vmdq_queue_base    = 0;
1377        dev_info->max_vmdq_pools     = ETH_32_POOLS;
1378        dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1379        dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
1380        dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
1381                                    dev_info->rx_queue_offload_capa;
1382        dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
1383        dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
1384                                    dev_info->tx_queue_offload_capa;
1385
1386        dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1387        dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1388        dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1389                                        ETH_RSS_IPV6 |
1390                                        ETH_RSS_IPV6_EX |
1391                                        ETH_RSS_NONFRAG_IPV4_TCP |
1392                                        ETH_RSS_NONFRAG_IPV6_TCP |
1393                                        ETH_RSS_IPV6_TCP_EX |
1394                                        ETH_RSS_NONFRAG_IPV4_UDP |
1395                                        ETH_RSS_NONFRAG_IPV6_UDP |
1396                                        ETH_RSS_IPV6_UDP_EX;
1397
1398        dev_info->default_rxconf = (struct rte_eth_rxconf) {
1399                .rx_thresh = {
1400                        .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1401                        .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1402                        .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1403                },
1404                .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1405                .rx_drop_en = 0,
1406                .offloads = 0,
1407        };
1408
1409        dev_info->default_txconf = (struct rte_eth_txconf) {
1410                .tx_thresh = {
1411                        .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1412                        .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1413                        .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1414                },
1415                .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1416                .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1417                .offloads = 0,
1418        };
1419
1420        dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1421                .nb_max = FM10K_MAX_RX_DESC,
1422                .nb_min = FM10K_MIN_RX_DESC,
1423                .nb_align = FM10K_MULT_RX_DESC,
1424        };
1425
1426        dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1427                .nb_max = FM10K_MAX_TX_DESC,
1428                .nb_min = FM10K_MIN_TX_DESC,
1429                .nb_align = FM10K_MULT_TX_DESC,
1430                .nb_seg_max = FM10K_TX_MAX_SEG,
1431                .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1432        };
1433
1434        dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1435                        ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1436                        ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1437
1438        return 0;
1439}
1440
1441#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1442static const uint32_t *
1443fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1444{
1445        if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1446            dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1447                static uint32_t ptypes[] = {
1448                        /* refers to rx_desc_to_ol_flags() */
1449                        RTE_PTYPE_L2_ETHER,
1450                        RTE_PTYPE_L3_IPV4,
1451                        RTE_PTYPE_L3_IPV4_EXT,
1452                        RTE_PTYPE_L3_IPV6,
1453                        RTE_PTYPE_L3_IPV6_EXT,
1454                        RTE_PTYPE_L4_TCP,
1455                        RTE_PTYPE_L4_UDP,
1456                        RTE_PTYPE_UNKNOWN
1457                };
1458
1459                return ptypes;
1460        } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1461                   dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1462                static uint32_t ptypes_vec[] = {
1463                        /* refers to fm10k_desc_to_pktype_v() */
1464                        RTE_PTYPE_L3_IPV4,
1465                        RTE_PTYPE_L3_IPV4_EXT,
1466                        RTE_PTYPE_L3_IPV6,
1467                        RTE_PTYPE_L3_IPV6_EXT,
1468                        RTE_PTYPE_L4_TCP,
1469                        RTE_PTYPE_L4_UDP,
1470                        RTE_PTYPE_TUNNEL_GENEVE,
1471                        RTE_PTYPE_TUNNEL_NVGRE,
1472                        RTE_PTYPE_TUNNEL_VXLAN,
1473                        RTE_PTYPE_TUNNEL_GRE,
1474                        RTE_PTYPE_UNKNOWN
1475                };
1476
1477                return ptypes_vec;
1478        }
1479
1480        return NULL;
1481}
1482#else
1483static const uint32_t *
1484fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1485{
1486        return NULL;
1487}
1488#endif
1489
1490static int
1491fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1492{
1493        s32 result;
1494        uint16_t mac_num = 0;
1495        uint32_t vid_idx, vid_bit, mac_index;
1496        struct fm10k_hw *hw;
1497        struct fm10k_macvlan_filter_info *macvlan;
1498        struct rte_eth_dev_data *data = dev->data;
1499
1500        hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1502
1503        if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1504                PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1505                return -EINVAL;
1506        }
1507
1508        if (vlan_id > ETH_VLAN_ID_MAX) {
1509                PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1510                return -EINVAL;
1511        }
1512
1513        vid_idx = FM10K_VFTA_IDX(vlan_id);
1514        vid_bit = FM10K_VFTA_BIT(vlan_id);
1515        /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1516        if (on && (macvlan->vfta[vid_idx] & vid_bit))
1517                return 0;
1518        /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1519        if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1520                PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1521                        "in the VLAN filter table");
1522                return -EINVAL;
1523        }
1524
1525        fm10k_mbx_lock(hw);
1526        result = fm10k_update_vlan(hw, vlan_id, 0, on);
1527        fm10k_mbx_unlock(hw);
1528        if (result != FM10K_SUCCESS) {
1529                PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1530                return -EIO;
1531        }
1532
1533        for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1534                        (result == FM10K_SUCCESS); mac_index++) {
1535                if (rte_is_zero_ether_addr(&data->mac_addrs[mac_index]))
1536                        continue;
1537                if (mac_num > macvlan->mac_num - 1) {
1538                        PMD_INIT_LOG(ERR, "MAC address number "
1539                                        "not match");
1540                        break;
1541                }
1542                fm10k_mbx_lock(hw);
1543                result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1544                        data->mac_addrs[mac_index].addr_bytes,
1545                        vlan_id, on, 0);
1546                fm10k_mbx_unlock(hw);
1547                mac_num++;
1548        }
1549        if (result != FM10K_SUCCESS) {
1550                PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1551                return -EIO;
1552        }
1553
1554        if (on) {
1555                macvlan->vlan_num++;
1556                macvlan->vfta[vid_idx] |= vid_bit;
1557        } else {
1558                macvlan->vlan_num--;
1559                macvlan->vfta[vid_idx] &= ~vid_bit;
1560        }
1561        return 0;
1562}
1563
1564static int
1565fm10k_vlan_offload_set(struct rte_eth_dev *dev __rte_unused,
1566                       int mask __rte_unused)
1567{
1568        return 0;
1569}
1570
1571/* Add/Remove a MAC address, and update filters to main VSI */
1572static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1573                const u8 *mac, bool add, uint32_t pool)
1574{
1575        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1576        struct fm10k_macvlan_filter_info *macvlan;
1577        uint32_t i, j, k;
1578
1579        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1580
1581        if (pool != MAIN_VSI_POOL_NUMBER) {
1582                PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1583                        "mac to pool %u", pool);
1584                return;
1585        }
1586        for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1587                if (!macvlan->vfta[j])
1588                        continue;
1589                for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1590                        if (!(macvlan->vfta[j] & (1 << k)))
1591                                continue;
1592                        if (i + 1 > macvlan->vlan_num) {
1593                                PMD_INIT_LOG(ERR, "vlan number not match");
1594                                return;
1595                        }
1596                        fm10k_mbx_lock(hw);
1597                        fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1598                                j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1599                        fm10k_mbx_unlock(hw);
1600                        i++;
1601                }
1602        }
1603}
1604
1605/* Add/Remove a MAC address, and update filters to VMDQ */
1606static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1607                const u8 *mac, bool add, uint32_t pool)
1608{
1609        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1610        struct fm10k_macvlan_filter_info *macvlan;
1611        struct rte_eth_vmdq_rx_conf *vmdq_conf;
1612        uint32_t i;
1613
1614        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1615        vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1616
1617        if (pool > macvlan->nb_queue_pools) {
1618                PMD_DRV_LOG(ERR, "Pool number %u invalid."
1619                        " Max pool is %u",
1620                        pool, macvlan->nb_queue_pools);
1621                return;
1622        }
1623        for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1624                if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1625                        continue;
1626                fm10k_mbx_lock(hw);
1627                fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1628                        vmdq_conf->pool_map[i].vlan_id, add, 0);
1629                fm10k_mbx_unlock(hw);
1630        }
1631}
1632
1633/* Add/Remove a MAC address, and update filters */
1634static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1635                const u8 *mac, bool add, uint32_t pool)
1636{
1637        struct fm10k_macvlan_filter_info *macvlan;
1638
1639        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1640
1641        if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1642                fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1643        else
1644                fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1645
1646        if (add)
1647                macvlan->mac_num++;
1648        else
1649                macvlan->mac_num--;
1650}
1651
1652/* Add a MAC address, and update filters */
1653static int
1654fm10k_macaddr_add(struct rte_eth_dev *dev,
1655                struct rte_ether_addr *mac_addr,
1656                uint32_t index,
1657                uint32_t pool)
1658{
1659        struct fm10k_macvlan_filter_info *macvlan;
1660
1661        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1662        fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1663        macvlan->mac_vmdq_id[index] = pool;
1664        return 0;
1665}
1666
1667/* Remove a MAC address, and update filters */
1668static void
1669fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1670{
1671        struct rte_eth_dev_data *data = dev->data;
1672        struct fm10k_macvlan_filter_info *macvlan;
1673
1674        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1675        fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1676                        FALSE, macvlan->mac_vmdq_id[index]);
1677        macvlan->mac_vmdq_id[index] = 0;
1678}
1679
1680static inline int
1681check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1682{
1683        if ((request < min) || (request > max) || ((request % mult) != 0))
1684                return -1;
1685        else
1686                return 0;
1687}
1688
1689
1690static inline int
1691check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1692{
1693        if ((request < min) || (request > max) || ((div % request) != 0))
1694                return -1;
1695        else
1696                return 0;
1697}
1698
1699static inline int
1700handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1701{
1702        uint16_t rx_free_thresh;
1703
1704        if (conf->rx_free_thresh == 0)
1705                rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1706        else
1707                rx_free_thresh = conf->rx_free_thresh;
1708
1709        /* make sure the requested threshold satisfies the constraints */
1710        if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1711                        FM10K_RX_FREE_THRESH_MAX(q),
1712                        FM10K_RX_FREE_THRESH_DIV(q),
1713                        rx_free_thresh)) {
1714                PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1715                        "less than or equal to %u, "
1716                        "greater than or equal to %u, "
1717                        "and a divisor of %u",
1718                        rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1719                        FM10K_RX_FREE_THRESH_MIN(q),
1720                        FM10K_RX_FREE_THRESH_DIV(q));
1721                return -EINVAL;
1722        }
1723
1724        q->alloc_thresh = rx_free_thresh;
1725        q->drop_en = conf->rx_drop_en;
1726        q->rx_deferred_start = conf->rx_deferred_start;
1727
1728        return 0;
1729}
1730
1731/*
1732 * Hardware requires specific alignment for Rx packet buffers. At
1733 * least one of the following two conditions must be satisfied.
1734 *  1. Address is 512B aligned
1735 *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1736 *
1737 * As such, the driver may need to adjust the DMA address within the
1738 * buffer by up to 512B.
1739 *
1740 * return 1 if the element size is valid, otherwise return 0.
1741 */
1742static int
1743mempool_element_size_valid(struct rte_mempool *mp)
1744{
1745        uint32_t min_size;
1746
1747        /* elt_size includes mbuf header and headroom */
1748        min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1749                        RTE_PKTMBUF_HEADROOM;
1750
1751        /* account for up to 512B of alignment */
1752        min_size -= FM10K_RX_DATABUF_ALIGN;
1753
1754        /* sanity check for overflow */
1755        if (min_size > mp->elt_size)
1756                return 0;
1757
1758        /* size is valid */
1759        return 1;
1760}
1761
1762static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1763{
1764        RTE_SET_USED(dev);
1765
1766        return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
1767}
1768
1769static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1770{
1771        RTE_SET_USED(dev);
1772
1773        return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
1774                           DEV_RX_OFFLOAD_VLAN_FILTER |
1775                           DEV_RX_OFFLOAD_IPV4_CKSUM  |
1776                           DEV_RX_OFFLOAD_UDP_CKSUM   |
1777                           DEV_RX_OFFLOAD_TCP_CKSUM   |
1778                           DEV_RX_OFFLOAD_JUMBO_FRAME |
1779                           DEV_RX_OFFLOAD_HEADER_SPLIT |
1780                           DEV_RX_OFFLOAD_RSS_HASH);
1781}
1782
1783static int
1784fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1785        uint16_t nb_desc, unsigned int socket_id,
1786        const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1787{
1788        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789        struct fm10k_dev_info *dev_info =
1790                FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1791        struct fm10k_rx_queue *q;
1792        const struct rte_memzone *mz;
1793        uint64_t offloads;
1794
1795        PMD_INIT_FUNC_TRACE();
1796
1797        offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1798
1799        /* make sure the mempool element size can account for alignment. */
1800        if (!mempool_element_size_valid(mp)) {
1801                PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1802                return -EINVAL;
1803        }
1804
1805        /* make sure a valid number of descriptors have been requested */
1806        if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1807                                FM10K_MULT_RX_DESC, nb_desc)) {
1808                PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1809                        "less than or equal to %"PRIu32", "
1810                        "greater than or equal to %u, "
1811                        "and a multiple of %u",
1812                        nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1813                        FM10K_MULT_RX_DESC);
1814                return -EINVAL;
1815        }
1816
1817        /*
1818         * if this queue existed already, free the associated memory. The
1819         * queue cannot be reused in case we need to allocate memory on
1820         * different socket than was previously used.
1821         */
1822        if (dev->data->rx_queues[queue_id] != NULL) {
1823                rx_queue_free(dev->data->rx_queues[queue_id]);
1824                dev->data->rx_queues[queue_id] = NULL;
1825        }
1826
1827        /* allocate memory for the queue structure */
1828        q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1829                                socket_id);
1830        if (q == NULL) {
1831                PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1832                return -ENOMEM;
1833        }
1834
1835        /* setup queue */
1836        q->mp = mp;
1837        q->nb_desc = nb_desc;
1838        q->nb_fake_desc = FM10K_MULT_RX_DESC;
1839        q->port_id = dev->data->port_id;
1840        q->queue_id = queue_id;
1841        q->tail_ptr = (volatile uint32_t *)
1842                &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1843        q->offloads = offloads;
1844        if (handle_rxconf(q, conf))
1845                return -EINVAL;
1846
1847        /* allocate memory for the software ring */
1848        q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1849                        (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1850                        RTE_CACHE_LINE_SIZE, socket_id);
1851        if (q->sw_ring == NULL) {
1852                PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1853                rte_free(q);
1854                return -ENOMEM;
1855        }
1856
1857        /*
1858         * allocate memory for the hardware descriptor ring. A memzone large
1859         * enough to hold the maximum ring size is requested to allow for
1860         * resizing in later calls to the queue setup function.
1861         */
1862        mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1863                                      FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1864                                      socket_id);
1865        if (mz == NULL) {
1866                PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1867                rte_free(q->sw_ring);
1868                rte_free(q);
1869                return -ENOMEM;
1870        }
1871        q->hw_ring = mz->addr;
1872        q->hw_ring_phys_addr = mz->iova;
1873
1874        /* Check if number of descs satisfied Vector requirement */
1875        if (!rte_is_power_of_2(nb_desc)) {
1876                PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1877                                    "preconditions - canceling the feature for "
1878                                    "the whole port[%d]",
1879                             q->queue_id, q->port_id);
1880                dev_info->rx_vec_allowed = false;
1881        } else
1882                fm10k_rxq_vec_setup(q);
1883
1884        dev->data->rx_queues[queue_id] = q;
1885        return 0;
1886}
1887
1888static void
1889fm10k_rx_queue_release(void *queue)
1890{
1891        PMD_INIT_FUNC_TRACE();
1892
1893        rx_queue_free(queue);
1894}
1895
1896static inline int
1897handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1898{
1899        uint16_t tx_free_thresh;
1900        uint16_t tx_rs_thresh;
1901
1902        /* constraint MACROs require that tx_free_thresh is configured
1903         * before tx_rs_thresh */
1904        if (conf->tx_free_thresh == 0)
1905                tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1906        else
1907                tx_free_thresh = conf->tx_free_thresh;
1908
1909        /* make sure the requested threshold satisfies the constraints */
1910        if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1911                        FM10K_TX_FREE_THRESH_MAX(q),
1912                        FM10K_TX_FREE_THRESH_DIV(q),
1913                        tx_free_thresh)) {
1914                PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1915                        "less than or equal to %u, "
1916                        "greater than or equal to %u, "
1917                        "and a divisor of %u",
1918                        tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1919                        FM10K_TX_FREE_THRESH_MIN(q),
1920                        FM10K_TX_FREE_THRESH_DIV(q));
1921                return -EINVAL;
1922        }
1923
1924        q->free_thresh = tx_free_thresh;
1925
1926        if (conf->tx_rs_thresh == 0)
1927                tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1928        else
1929                tx_rs_thresh = conf->tx_rs_thresh;
1930
1931        q->tx_deferred_start = conf->tx_deferred_start;
1932
1933        /* make sure the requested threshold satisfies the constraints */
1934        if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1935                        FM10K_TX_RS_THRESH_MAX(q),
1936                        FM10K_TX_RS_THRESH_DIV(q),
1937                        tx_rs_thresh)) {
1938                PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1939                        "less than or equal to %u, "
1940                        "greater than or equal to %u, "
1941                        "and a divisor of %u",
1942                        tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1943                        FM10K_TX_RS_THRESH_MIN(q),
1944                        FM10K_TX_RS_THRESH_DIV(q));
1945                return -EINVAL;
1946        }
1947
1948        q->rs_thresh = tx_rs_thresh;
1949
1950        return 0;
1951}
1952
1953static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1954{
1955        RTE_SET_USED(dev);
1956
1957        return 0;
1958}
1959
1960static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1961{
1962        RTE_SET_USED(dev);
1963
1964        return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
1965                          DEV_TX_OFFLOAD_MULTI_SEGS  |
1966                          DEV_TX_OFFLOAD_IPV4_CKSUM  |
1967                          DEV_TX_OFFLOAD_UDP_CKSUM   |
1968                          DEV_TX_OFFLOAD_TCP_CKSUM   |
1969                          DEV_TX_OFFLOAD_TCP_TSO);
1970}
1971
1972static int
1973fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1974        uint16_t nb_desc, unsigned int socket_id,
1975        const struct rte_eth_txconf *conf)
1976{
1977        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1978        struct fm10k_tx_queue *q;
1979        const struct rte_memzone *mz;
1980        uint64_t offloads;
1981
1982        PMD_INIT_FUNC_TRACE();
1983
1984        offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
1985
1986        /* make sure a valid number of descriptors have been requested */
1987        if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1988                                FM10K_MULT_TX_DESC, nb_desc)) {
1989                PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1990                        "less than or equal to %"PRIu32", "
1991                        "greater than or equal to %u, "
1992                        "and a multiple of %u",
1993                        nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1994                        FM10K_MULT_TX_DESC);
1995                return -EINVAL;
1996        }
1997
1998        /*
1999         * if this queue existed already, free the associated memory. The
2000         * queue cannot be reused in case we need to allocate memory on
2001         * different socket than was previously used.
2002         */
2003        if (dev->data->tx_queues[queue_id] != NULL) {
2004                struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2005
2006                tx_queue_free(txq);
2007                dev->data->tx_queues[queue_id] = NULL;
2008        }
2009
2010        /* allocate memory for the queue structure */
2011        q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2012                                socket_id);
2013        if (q == NULL) {
2014                PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2015                return -ENOMEM;
2016        }
2017
2018        /* setup queue */
2019        q->nb_desc = nb_desc;
2020        q->port_id = dev->data->port_id;
2021        q->queue_id = queue_id;
2022        q->offloads = offloads;
2023        q->ops = &def_txq_ops;
2024        q->tail_ptr = (volatile uint32_t *)
2025                &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2026        if (handle_txconf(q, conf))
2027                return -EINVAL;
2028
2029        /* allocate memory for the software ring */
2030        q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2031                                        nb_desc * sizeof(struct rte_mbuf *),
2032                                        RTE_CACHE_LINE_SIZE, socket_id);
2033        if (q->sw_ring == NULL) {
2034                PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2035                rte_free(q);
2036                return -ENOMEM;
2037        }
2038
2039        /*
2040         * allocate memory for the hardware descriptor ring. A memzone large
2041         * enough to hold the maximum ring size is requested to allow for
2042         * resizing in later calls to the queue setup function.
2043         */
2044        mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2045                                      FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2046                                      socket_id);
2047        if (mz == NULL) {
2048                PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2049                rte_free(q->sw_ring);
2050                rte_free(q);
2051                return -ENOMEM;
2052        }
2053        q->hw_ring = mz->addr;
2054        q->hw_ring_phys_addr = mz->iova;
2055
2056        /*
2057         * allocate memory for the RS bit tracker. Enough slots to hold the
2058         * descriptor index for each RS bit needing to be set are required.
2059         */
2060        q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2061                                ((nb_desc + 1) / q->rs_thresh) *
2062                                sizeof(uint16_t),
2063                                RTE_CACHE_LINE_SIZE, socket_id);
2064        if (q->rs_tracker.list == NULL) {
2065                PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2066                rte_free(q->sw_ring);
2067                rte_free(q);
2068                return -ENOMEM;
2069        }
2070
2071        dev->data->tx_queues[queue_id] = q;
2072        return 0;
2073}
2074
2075static void
2076fm10k_tx_queue_release(void *queue)
2077{
2078        struct fm10k_tx_queue *q = queue;
2079        PMD_INIT_FUNC_TRACE();
2080
2081        tx_queue_free(q);
2082}
2083
2084static int
2085fm10k_reta_update(struct rte_eth_dev *dev,
2086                        struct rte_eth_rss_reta_entry64 *reta_conf,
2087                        uint16_t reta_size)
2088{
2089        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2090        uint16_t i, j, idx, shift;
2091        uint8_t mask;
2092        uint32_t reta;
2093
2094        PMD_INIT_FUNC_TRACE();
2095
2096        if (reta_size > FM10K_MAX_RSS_INDICES) {
2097                PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2098                        "(%d) doesn't match the number hardware can supported "
2099                        "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2100                return -EINVAL;
2101        }
2102
2103        /*
2104         * Update Redirection Table RETA[n], n=0..31. The redirection table has
2105         * 128-entries in 32 registers
2106         */
2107        for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2108                idx = i / RTE_RETA_GROUP_SIZE;
2109                shift = i % RTE_RETA_GROUP_SIZE;
2110                mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2111                                BIT_MASK_PER_UINT32);
2112                if (mask == 0)
2113                        continue;
2114
2115                reta = 0;
2116                if (mask != BIT_MASK_PER_UINT32)
2117                        reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2118
2119                for (j = 0; j < CHARS_PER_UINT32; j++) {
2120                        if (mask & (0x1 << j)) {
2121                                if (mask != 0xF)
2122                                        reta &= ~(UINT8_MAX << CHAR_BIT * j);
2123                                reta |= reta_conf[idx].reta[shift + j] <<
2124                                                (CHAR_BIT * j);
2125                        }
2126                }
2127                FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2128        }
2129
2130        return 0;
2131}
2132
2133static int
2134fm10k_reta_query(struct rte_eth_dev *dev,
2135                        struct rte_eth_rss_reta_entry64 *reta_conf,
2136                        uint16_t reta_size)
2137{
2138        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2139        uint16_t i, j, idx, shift;
2140        uint8_t mask;
2141        uint32_t reta;
2142
2143        PMD_INIT_FUNC_TRACE();
2144
2145        if (reta_size < FM10K_MAX_RSS_INDICES) {
2146                PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2147                        "(%d) doesn't match the number hardware can supported "
2148                        "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2149                return -EINVAL;
2150        }
2151
2152        /*
2153         * Read Redirection Table RETA[n], n=0..31. The redirection table has
2154         * 128-entries in 32 registers
2155         */
2156        for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2157                idx = i / RTE_RETA_GROUP_SIZE;
2158                shift = i % RTE_RETA_GROUP_SIZE;
2159                mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2160                                BIT_MASK_PER_UINT32);
2161                if (mask == 0)
2162                        continue;
2163
2164                reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2165                for (j = 0; j < CHARS_PER_UINT32; j++) {
2166                        if (mask & (0x1 << j))
2167                                reta_conf[idx].reta[shift + j] = ((reta >>
2168                                        CHAR_BIT * j) & UINT8_MAX);
2169                }
2170        }
2171
2172        return 0;
2173}
2174
2175static int
2176fm10k_rss_hash_update(struct rte_eth_dev *dev,
2177        struct rte_eth_rss_conf *rss_conf)
2178{
2179        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2180        uint32_t *key = (uint32_t *)rss_conf->rss_key;
2181        uint32_t mrqc;
2182        uint64_t hf = rss_conf->rss_hf;
2183        int i;
2184
2185        PMD_INIT_FUNC_TRACE();
2186
2187        if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2188                                FM10K_RSSRK_ENTRIES_PER_REG))
2189                return -EINVAL;
2190
2191        if (hf == 0)
2192                return -EINVAL;
2193
2194        mrqc = 0;
2195        mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2196        mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2197        mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2198        mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2199        mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2200        mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2201        mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2202        mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2203        mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2204
2205        /* If the mapping doesn't fit any supported, return */
2206        if (mrqc == 0)
2207                return -EINVAL;
2208
2209        if (key != NULL)
2210                for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2211                        FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2212
2213        FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2214
2215        return 0;
2216}
2217
2218static int
2219fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2220        struct rte_eth_rss_conf *rss_conf)
2221{
2222        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2223        uint32_t *key = (uint32_t *)rss_conf->rss_key;
2224        uint32_t mrqc;
2225        uint64_t hf;
2226        int i;
2227
2228        PMD_INIT_FUNC_TRACE();
2229
2230        if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2231                                FM10K_RSSRK_ENTRIES_PER_REG))
2232                return -EINVAL;
2233
2234        if (key != NULL)
2235                for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2236                        key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2237
2238        mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2239        hf = 0;
2240        hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2241        hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2242        hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2243        hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2244        hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2245        hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2246        hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2247        hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2248        hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2249
2250        rss_conf->rss_hf = hf;
2251
2252        return 0;
2253}
2254
2255static void
2256fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2257{
2258        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2259        uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2260
2261        /* Bind all local non-queue interrupt to vector 0 */
2262        int_map |= FM10K_MISC_VEC_ID;
2263
2264        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2265        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2266        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2267        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2268        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2269        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2270
2271        /* Enable misc causes */
2272        FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2273                                FM10K_EIMR_ENABLE(THI_FAULT) |
2274                                FM10K_EIMR_ENABLE(FUM_FAULT) |
2275                                FM10K_EIMR_ENABLE(MAILBOX) |
2276                                FM10K_EIMR_ENABLE(SWITCHREADY) |
2277                                FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2278                                FM10K_EIMR_ENABLE(SRAMERROR) |
2279                                FM10K_EIMR_ENABLE(VFLR));
2280
2281        /* Enable ITR 0 */
2282        FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2283                                        FM10K_ITR_MASK_CLEAR);
2284        FM10K_WRITE_FLUSH(hw);
2285}
2286
2287static void
2288fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2289{
2290        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2291        uint32_t int_map = FM10K_INT_MAP_DISABLE;
2292
2293        int_map |= FM10K_MISC_VEC_ID;
2294
2295        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2296        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2297        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2298        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2299        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2300        FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2301
2302        /* Disable misc causes */
2303        FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2304                                FM10K_EIMR_DISABLE(THI_FAULT) |
2305                                FM10K_EIMR_DISABLE(FUM_FAULT) |
2306                                FM10K_EIMR_DISABLE(MAILBOX) |
2307                                FM10K_EIMR_DISABLE(SWITCHREADY) |
2308                                FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2309                                FM10K_EIMR_DISABLE(SRAMERROR) |
2310                                FM10K_EIMR_DISABLE(VFLR));
2311
2312        /* Disable ITR 0 */
2313        FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2314        FM10K_WRITE_FLUSH(hw);
2315}
2316
2317static void
2318fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2319{
2320        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2321        uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2322
2323        /* Bind all local non-queue interrupt to vector 0 */
2324        int_map |= FM10K_MISC_VEC_ID;
2325
2326        /* Only INT 0 available, other 15 are reserved. */
2327        FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2328
2329        /* Enable ITR 0 */
2330        FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2331                                        FM10K_ITR_MASK_CLEAR);
2332        FM10K_WRITE_FLUSH(hw);
2333}
2334
2335static void
2336fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2337{
2338        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2339        uint32_t int_map = FM10K_INT_MAP_DISABLE;
2340
2341        int_map |= FM10K_MISC_VEC_ID;
2342
2343        /* Only INT 0 available, other 15 are reserved. */
2344        FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2345
2346        /* Disable ITR 0 */
2347        FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2348        FM10K_WRITE_FLUSH(hw);
2349}
2350
2351static int
2352fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2353{
2354        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2355        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2356
2357        /* Enable ITR */
2358        if (hw->mac.type == fm10k_mac_pf)
2359                FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2360                        FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2361        else
2362                FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2363                        FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2364        rte_intr_ack(&pdev->intr_handle);
2365        return 0;
2366}
2367
2368static int
2369fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2370{
2371        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2372        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2373
2374        /* Disable ITR */
2375        if (hw->mac.type == fm10k_mac_pf)
2376                FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2377                        FM10K_ITR_MASK_SET);
2378        else
2379                FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2380                        FM10K_ITR_MASK_SET);
2381        return 0;
2382}
2383
2384static int
2385fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2386{
2387        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2388        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2389        struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2390        uint32_t intr_vector, vec;
2391        uint16_t queue_id;
2392        int result = 0;
2393
2394        /* fm10k needs one separate interrupt for mailbox,
2395         * so only drivers which support multiple interrupt vectors
2396         * e.g. vfio-pci can work for fm10k interrupt mode
2397         */
2398        if (!rte_intr_cap_multiple(intr_handle) ||
2399                        dev->data->dev_conf.intr_conf.rxq == 0)
2400                return result;
2401
2402        intr_vector = dev->data->nb_rx_queues;
2403
2404        /* disable interrupt first */
2405        rte_intr_disable(intr_handle);
2406        if (hw->mac.type == fm10k_mac_pf)
2407                fm10k_dev_disable_intr_pf(dev);
2408        else
2409                fm10k_dev_disable_intr_vf(dev);
2410
2411        if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2412                PMD_INIT_LOG(ERR, "Failed to init event fd");
2413                result = -EIO;
2414        }
2415
2416        if (rte_intr_dp_is_en(intr_handle) && !result) {
2417                intr_handle->intr_vec = rte_zmalloc("intr_vec",
2418                        dev->data->nb_rx_queues * sizeof(int), 0);
2419                if (intr_handle->intr_vec) {
2420                        for (queue_id = 0, vec = FM10K_RX_VEC_START;
2421                                        queue_id < dev->data->nb_rx_queues;
2422                                        queue_id++) {
2423                                intr_handle->intr_vec[queue_id] = vec;
2424                                if (vec < intr_handle->nb_efd - 1
2425                                                + FM10K_RX_VEC_START)
2426                                        vec++;
2427                        }
2428                } else {
2429                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2430                                " intr_vec", dev->data->nb_rx_queues);
2431                        rte_intr_efd_disable(intr_handle);
2432                        result = -ENOMEM;
2433                }
2434        }
2435
2436        if (hw->mac.type == fm10k_mac_pf)
2437                fm10k_dev_enable_intr_pf(dev);
2438        else
2439                fm10k_dev_enable_intr_vf(dev);
2440        rte_intr_enable(intr_handle);
2441        hw->mac.ops.update_int_moderator(hw);
2442        return result;
2443}
2444
2445static int
2446fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2447{
2448        struct fm10k_fault fault;
2449        int err;
2450        const char *estr = "Unknown error";
2451
2452        /* Process PCA fault */
2453        if (eicr & FM10K_EICR_PCA_FAULT) {
2454                err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2455                if (err)
2456                        goto error;
2457                switch (fault.type) {
2458                case PCA_NO_FAULT:
2459                        estr = "PCA_NO_FAULT"; break;
2460                case PCA_UNMAPPED_ADDR:
2461                        estr = "PCA_UNMAPPED_ADDR"; break;
2462                case PCA_BAD_QACCESS_PF:
2463                        estr = "PCA_BAD_QACCESS_PF"; break;
2464                case PCA_BAD_QACCESS_VF:
2465                        estr = "PCA_BAD_QACCESS_VF"; break;
2466                case PCA_MALICIOUS_REQ:
2467                        estr = "PCA_MALICIOUS_REQ"; break;
2468                case PCA_POISONED_TLP:
2469                        estr = "PCA_POISONED_TLP"; break;
2470                case PCA_TLP_ABORT:
2471                        estr = "PCA_TLP_ABORT"; break;
2472                default:
2473                        goto error;
2474                }
2475                PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2476                        estr, fault.func ? "VF" : "PF", fault.func,
2477                        fault.address, fault.specinfo);
2478        }
2479
2480        /* Process THI fault */
2481        if (eicr & FM10K_EICR_THI_FAULT) {
2482                err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2483                if (err)
2484                        goto error;
2485                switch (fault.type) {
2486                case THI_NO_FAULT:
2487                        estr = "THI_NO_FAULT"; break;
2488                case THI_MAL_DIS_Q_FAULT:
2489                        estr = "THI_MAL_DIS_Q_FAULT"; break;
2490                default:
2491                        goto error;
2492                }
2493                PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2494                        estr, fault.func ? "VF" : "PF", fault.func,
2495                        fault.address, fault.specinfo);
2496        }
2497
2498        /* Process FUM fault */
2499        if (eicr & FM10K_EICR_FUM_FAULT) {
2500                err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2501                if (err)
2502                        goto error;
2503                switch (fault.type) {
2504                case FUM_NO_FAULT:
2505                        estr = "FUM_NO_FAULT"; break;
2506                case FUM_UNMAPPED_ADDR:
2507                        estr = "FUM_UNMAPPED_ADDR"; break;
2508                case FUM_POISONED_TLP:
2509                        estr = "FUM_POISONED_TLP"; break;
2510                case FUM_BAD_VF_QACCESS:
2511                        estr = "FUM_BAD_VF_QACCESS"; break;
2512                case FUM_ADD_DECODE_ERR:
2513                        estr = "FUM_ADD_DECODE_ERR"; break;
2514                case FUM_RO_ERROR:
2515                        estr = "FUM_RO_ERROR"; break;
2516                case FUM_QPRC_CRC_ERROR:
2517                        estr = "FUM_QPRC_CRC_ERROR"; break;
2518                case FUM_CSR_TIMEOUT:
2519                        estr = "FUM_CSR_TIMEOUT"; break;
2520                case FUM_INVALID_TYPE:
2521                        estr = "FUM_INVALID_TYPE"; break;
2522                case FUM_INVALID_LENGTH:
2523                        estr = "FUM_INVALID_LENGTH"; break;
2524                case FUM_INVALID_BE:
2525                        estr = "FUM_INVALID_BE"; break;
2526                case FUM_INVALID_ALIGN:
2527                        estr = "FUM_INVALID_ALIGN"; break;
2528                default:
2529                        goto error;
2530                }
2531                PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2532                        estr, fault.func ? "VF" : "PF", fault.func,
2533                        fault.address, fault.specinfo);
2534        }
2535
2536        return 0;
2537error:
2538        PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2539        return err;
2540}
2541
2542/**
2543 * PF interrupt handler triggered by NIC for handling specific interrupt.
2544 *
2545 * @param handle
2546 *  Pointer to interrupt handle.
2547 * @param param
2548 *  The address of parameter (struct rte_eth_dev *) regsitered before.
2549 *
2550 * @return
2551 *  void
2552 */
2553static void
2554fm10k_dev_interrupt_handler_pf(void *param)
2555{
2556        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2557        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2558        uint32_t cause, status;
2559        struct fm10k_dev_info *dev_info =
2560                FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2561        int status_mbx;
2562        s32 err;
2563
2564        if (hw->mac.type != fm10k_mac_pf)
2565                return;
2566
2567        cause = FM10K_READ_REG(hw, FM10K_EICR);
2568
2569        /* Handle PCI fault cases */
2570        if (cause & FM10K_EICR_FAULT_MASK) {
2571                PMD_INIT_LOG(ERR, "INT: find fault!");
2572                fm10k_dev_handle_fault(hw, cause);
2573        }
2574
2575        /* Handle switch up/down */
2576        if (cause & FM10K_EICR_SWITCHNOTREADY)
2577                PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2578
2579        if (cause & FM10K_EICR_SWITCHREADY) {
2580                PMD_INIT_LOG(INFO, "INT: Switch is ready");
2581                if (dev_info->sm_down == 1) {
2582                        fm10k_mbx_lock(hw);
2583
2584                        /* For recreating logical ports */
2585                        status_mbx = hw->mac.ops.update_lport_state(hw,
2586                                        hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2587                        if (status_mbx == FM10K_SUCCESS)
2588                                PMD_INIT_LOG(INFO,
2589                                        "INT: Recreated Logical port");
2590                        else
2591                                PMD_INIT_LOG(INFO,
2592                                        "INT: Logical ports weren't recreated");
2593
2594                        status_mbx = hw->mac.ops.update_xcast_mode(hw,
2595                                hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2596                        if (status_mbx != FM10K_SUCCESS)
2597                                PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2598
2599                        fm10k_mbx_unlock(hw);
2600
2601                        /* first clear the internal SW recording structure */
2602                        if (!(dev->data->dev_conf.rxmode.mq_mode &
2603                                                ETH_MQ_RX_VMDQ_FLAG))
2604                                fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2605                                        false);
2606
2607                        fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2608                                        MAIN_VSI_POOL_NUMBER);
2609
2610                        /*
2611                         * Add default mac address and vlan for the logical
2612                         * ports that have been created, leave to the
2613                         * application to fully recover Rx filtering.
2614                         */
2615                        fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2616                                        MAIN_VSI_POOL_NUMBER);
2617
2618                        if (!(dev->data->dev_conf.rxmode.mq_mode &
2619                                                ETH_MQ_RX_VMDQ_FLAG))
2620                                fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2621                                        true);
2622
2623                        dev_info->sm_down = 0;
2624                        _rte_eth_dev_callback_process(dev,
2625                                        RTE_ETH_EVENT_INTR_LSC,
2626                                        NULL);
2627                }
2628        }
2629
2630        /* Handle mailbox message */
2631        fm10k_mbx_lock(hw);
2632        err = hw->mbx.ops.process(hw, &hw->mbx);
2633        fm10k_mbx_unlock(hw);
2634
2635        if (err == FM10K_ERR_RESET_REQUESTED) {
2636                PMD_INIT_LOG(INFO, "INT: Switch is down");
2637                dev_info->sm_down = 1;
2638                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2639                                NULL);
2640        }
2641
2642        /* Handle SRAM error */
2643        if (cause & FM10K_EICR_SRAMERROR) {
2644                PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2645
2646                status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2647                /* Write to clear pending bits */
2648                FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2649
2650                /* Todo: print out error message after shared code  updates */
2651        }
2652
2653        /* Clear these 3 events if having any */
2654        cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2655                 FM10K_EICR_SWITCHREADY;
2656        if (cause)
2657                FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2658
2659        /* Re-enable interrupt from device side */
2660        FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2661                                        FM10K_ITR_MASK_CLEAR);
2662        /* Re-enable interrupt from host side */
2663        rte_intr_ack(dev->intr_handle);
2664}
2665
2666/**
2667 * VF interrupt handler triggered by NIC for handling specific interrupt.
2668 *
2669 * @param handle
2670 *  Pointer to interrupt handle.
2671 * @param param
2672 *  The address of parameter (struct rte_eth_dev *) regsitered before.
2673 *
2674 * @return
2675 *  void
2676 */
2677static void
2678fm10k_dev_interrupt_handler_vf(void *param)
2679{
2680        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2681        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2682        struct fm10k_mbx_info *mbx = &hw->mbx;
2683        struct fm10k_dev_info *dev_info =
2684                FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2685        const enum fm10k_mbx_state state = mbx->state;
2686        int status_mbx;
2687
2688        if (hw->mac.type != fm10k_mac_vf)
2689                return;
2690
2691        /* Handle mailbox message if lock is acquired */
2692        fm10k_mbx_lock(hw);
2693        hw->mbx.ops.process(hw, &hw->mbx);
2694        fm10k_mbx_unlock(hw);
2695
2696        if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2697                PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2698
2699                fm10k_mbx_lock(hw);
2700                hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2701                                MAX_LPORT_NUM, 1);
2702                fm10k_mbx_unlock(hw);
2703
2704                /* Setting reset flag */
2705                dev_info->sm_down = 1;
2706                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2707                                NULL);
2708        }
2709
2710        if (dev_info->sm_down == 1 &&
2711                        hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2712                PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2713                fm10k_mbx_lock(hw);
2714                status_mbx = hw->mac.ops.update_xcast_mode(hw,
2715                                hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2716                if (status_mbx != FM10K_SUCCESS)
2717                        PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2718                fm10k_mbx_unlock(hw);
2719
2720                /* first clear the internal SW recording structure */
2721                fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2722                fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2723                                MAIN_VSI_POOL_NUMBER);
2724
2725                /*
2726                 * Add default mac address and vlan for the logical ports that
2727                 * have been created, leave to the application to fully recover
2728                 * Rx filtering.
2729                 */
2730                fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2731                                MAIN_VSI_POOL_NUMBER);
2732                fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2733
2734                dev_info->sm_down = 0;
2735                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2736                                NULL);
2737        }
2738
2739        /* Re-enable interrupt from device side */
2740        FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2741                                        FM10K_ITR_MASK_CLEAR);
2742        /* Re-enable interrupt from host side */
2743        rte_intr_ack(dev->intr_handle);
2744}
2745
2746/* Mailbox message handler in VF */
2747static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2748        FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2749        FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2750        FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2751        FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2752};
2753
2754static int
2755fm10k_setup_mbx_service(struct fm10k_hw *hw)
2756{
2757        int err = 0;
2758
2759        /* Initialize mailbox lock */
2760        fm10k_mbx_initlock(hw);
2761
2762        /* Replace default message handler with new ones */
2763        if (hw->mac.type == fm10k_mac_vf)
2764                err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2765
2766        if (err) {
2767                PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2768                                err);
2769                return err;
2770        }
2771        /* Connect to SM for PF device or PF for VF device */
2772        return hw->mbx.ops.connect(hw, &hw->mbx);
2773}
2774
2775static void
2776fm10k_close_mbx_service(struct fm10k_hw *hw)
2777{
2778        /* Disconnect from SM for PF device or PF for VF device */
2779        hw->mbx.ops.disconnect(hw, &hw->mbx);
2780}
2781
2782static void
2783fm10k_dev_close(struct rte_eth_dev *dev)
2784{
2785        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2786        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2787        struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2788
2789        PMD_INIT_FUNC_TRACE();
2790
2791        fm10k_mbx_lock(hw);
2792        hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2793                MAX_LPORT_NUM, false);
2794        fm10k_mbx_unlock(hw);
2795
2796        /* allow 100ms for device to quiesce */
2797        rte_delay_us(FM10K_SWITCH_QUIESCE_US);
2798
2799        /* Stop mailbox service first */
2800        fm10k_close_mbx_service(hw);
2801        fm10k_dev_stop(dev);
2802        fm10k_dev_queue_release(dev);
2803        fm10k_stop_hw(hw);
2804
2805        dev->dev_ops = NULL;
2806        dev->rx_pkt_burst = NULL;
2807        dev->tx_pkt_burst = NULL;
2808
2809        /* disable uio/vfio intr */
2810        rte_intr_disable(intr_handle);
2811
2812        /*PF/VF has different interrupt handling mechanism */
2813        if (hw->mac.type == fm10k_mac_pf) {
2814                /* disable interrupt */
2815                fm10k_dev_disable_intr_pf(dev);
2816
2817                /* unregister callback func to eal lib */
2818                rte_intr_callback_unregister(intr_handle,
2819                        fm10k_dev_interrupt_handler_pf, (void *)dev);
2820        } else {
2821                /* disable interrupt */
2822                fm10k_dev_disable_intr_vf(dev);
2823
2824                rte_intr_callback_unregister(intr_handle,
2825                        fm10k_dev_interrupt_handler_vf, (void *)dev);
2826        }
2827}
2828
2829static const struct eth_dev_ops fm10k_eth_dev_ops = {
2830        .dev_configure          = fm10k_dev_configure,
2831        .dev_start              = fm10k_dev_start,
2832        .dev_stop               = fm10k_dev_stop,
2833        .dev_close              = fm10k_dev_close,
2834        .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2835        .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2836        .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2837        .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2838        .stats_get              = fm10k_stats_get,
2839        .xstats_get             = fm10k_xstats_get,
2840        .xstats_get_names       = fm10k_xstats_get_names,
2841        .stats_reset            = fm10k_stats_reset,
2842        .xstats_reset           = fm10k_stats_reset,
2843        .link_update            = fm10k_link_update,
2844        .dev_infos_get          = fm10k_dev_infos_get,
2845        .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2846        .vlan_filter_set        = fm10k_vlan_filter_set,
2847        .vlan_offload_set       = fm10k_vlan_offload_set,
2848        .mac_addr_add           = fm10k_macaddr_add,
2849        .mac_addr_remove        = fm10k_macaddr_remove,
2850        .rx_queue_start         = fm10k_dev_rx_queue_start,
2851        .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2852        .tx_queue_start         = fm10k_dev_tx_queue_start,
2853        .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2854        .rx_queue_setup         = fm10k_rx_queue_setup,
2855        .rx_queue_release       = fm10k_rx_queue_release,
2856        .tx_queue_setup         = fm10k_tx_queue_setup,
2857        .tx_queue_release       = fm10k_tx_queue_release,
2858        .rx_queue_count         = fm10k_dev_rx_queue_count,
2859        .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2860        .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
2861        .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
2862        .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2863        .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2864        .reta_update            = fm10k_reta_update,
2865        .reta_query             = fm10k_reta_query,
2866        .rss_hash_update        = fm10k_rss_hash_update,
2867        .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2868};
2869
2870static int ftag_check_handler(__rte_unused const char *key,
2871                const char *value, __rte_unused void *opaque)
2872{
2873        if (strcmp(value, "1"))
2874                return -1;
2875
2876        return 0;
2877}
2878
2879static int
2880fm10k_check_ftag(struct rte_devargs *devargs)
2881{
2882        struct rte_kvargs *kvlist;
2883        const char *ftag_key = "enable_ftag";
2884
2885        if (devargs == NULL)
2886                return 0;
2887
2888        kvlist = rte_kvargs_parse(devargs->args, NULL);
2889        if (kvlist == NULL)
2890                return 0;
2891
2892        if (!rte_kvargs_count(kvlist, ftag_key)) {
2893                rte_kvargs_free(kvlist);
2894                return 0;
2895        }
2896        /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2897        if (rte_kvargs_process(kvlist, ftag_key,
2898                                ftag_check_handler, NULL) < 0) {
2899                rte_kvargs_free(kvlist);
2900                return 0;
2901        }
2902        rte_kvargs_free(kvlist);
2903
2904        return 1;
2905}
2906
2907static uint16_t
2908fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2909                    uint16_t nb_pkts)
2910{
2911        uint16_t nb_tx = 0;
2912        struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2913
2914        while (nb_pkts) {
2915                uint16_t ret, num;
2916
2917                num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2918                ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2919                                                 num);
2920                nb_tx += ret;
2921                nb_pkts -= ret;
2922                if (ret < num)
2923                        break;
2924        }
2925
2926        return nb_tx;
2927}
2928
2929static void __rte_cold
2930fm10k_set_tx_function(struct rte_eth_dev *dev)
2931{
2932        struct fm10k_tx_queue *txq;
2933        int i;
2934        int use_sse = 1;
2935        uint16_t tx_ftag_en = 0;
2936
2937        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2938                /* primary process has set the ftag flag and offloads */
2939                txq = dev->data->tx_queues[0];
2940                if (fm10k_tx_vec_condition_check(txq)) {
2941                        dev->tx_pkt_burst = fm10k_xmit_pkts;
2942                        dev->tx_pkt_prepare = fm10k_prep_pkts;
2943                        PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2944                } else {
2945                        PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2946                        dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2947                        dev->tx_pkt_prepare = NULL;
2948                }
2949                return;
2950        }
2951
2952        if (fm10k_check_ftag(dev->device->devargs))
2953                tx_ftag_en = 1;
2954
2955        for (i = 0; i < dev->data->nb_tx_queues; i++) {
2956                txq = dev->data->tx_queues[i];
2957                txq->tx_ftag_en = tx_ftag_en;
2958                /* Check if Vector Tx is satisfied */
2959                if (fm10k_tx_vec_condition_check(txq))
2960                        use_sse = 0;
2961        }
2962
2963        if (use_sse) {
2964                PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2965                for (i = 0; i < dev->data->nb_tx_queues; i++) {
2966                        txq = dev->data->tx_queues[i];
2967                        fm10k_txq_vec_setup(txq);
2968                }
2969                dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2970                dev->tx_pkt_prepare = NULL;
2971        } else {
2972                dev->tx_pkt_burst = fm10k_xmit_pkts;
2973                dev->tx_pkt_prepare = fm10k_prep_pkts;
2974                PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2975        }
2976}
2977
2978static void __rte_cold
2979fm10k_set_rx_function(struct rte_eth_dev *dev)
2980{
2981        struct fm10k_dev_info *dev_info =
2982                FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2983        uint16_t i, rx_using_sse;
2984        uint16_t rx_ftag_en = 0;
2985
2986        if (fm10k_check_ftag(dev->device->devargs))
2987                rx_ftag_en = 1;
2988
2989        /* In order to allow Vector Rx there are a few configuration
2990         * conditions to be met.
2991         */
2992        if (!fm10k_rx_vec_condition_check(dev) &&
2993                        dev_info->rx_vec_allowed && !rx_ftag_en) {
2994                if (dev->data->scattered_rx)
2995                        dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2996                else
2997                        dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2998        } else if (dev->data->scattered_rx)
2999                dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
3000        else
3001                dev->rx_pkt_burst = fm10k_recv_pkts;
3002
3003        rx_using_sse =
3004                (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
3005                dev->rx_pkt_burst == fm10k_recv_pkts_vec);
3006
3007        if (rx_using_sse)
3008                PMD_INIT_LOG(DEBUG, "Use vector Rx func");
3009        else
3010                PMD_INIT_LOG(DEBUG, "Use regular Rx func");
3011
3012        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3013                return;
3014
3015        for (i = 0; i < dev->data->nb_rx_queues; i++) {
3016                struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
3017
3018                rxq->rx_using_sse = rx_using_sse;
3019                rxq->rx_ftag_en = rx_ftag_en;
3020        }
3021}
3022
3023static void
3024fm10k_params_init(struct rte_eth_dev *dev)
3025{
3026        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3027        struct fm10k_dev_info *info =
3028                FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
3029
3030        /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
3031         * there is no way to get link status without reading BAR4.  Until this
3032         * works, assume we have maximum bandwidth.
3033         * @todo - fix bus info
3034         */
3035        hw->bus_caps.speed = fm10k_bus_speed_8000;
3036        hw->bus_caps.width = fm10k_bus_width_pcie_x8;
3037        hw->bus_caps.payload = fm10k_bus_payload_512;
3038        hw->bus.speed = fm10k_bus_speed_8000;
3039        hw->bus.width = fm10k_bus_width_pcie_x8;
3040        hw->bus.payload = fm10k_bus_payload_256;
3041
3042        info->rx_vec_allowed = true;
3043        info->sm_down = false;
3044}
3045
3046static int
3047eth_fm10k_dev_init(struct rte_eth_dev *dev)
3048{
3049        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3050        struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3051        struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3052        int diag, i;
3053        struct fm10k_macvlan_filter_info *macvlan;
3054
3055        PMD_INIT_FUNC_TRACE();
3056
3057        dev->dev_ops = &fm10k_eth_dev_ops;
3058        dev->rx_pkt_burst = &fm10k_recv_pkts;
3059        dev->tx_pkt_burst = &fm10k_xmit_pkts;
3060        dev->tx_pkt_prepare = &fm10k_prep_pkts;
3061
3062        /*
3063         * Primary process does the whole initialization, for secondary
3064         * processes, we just select the same Rx and Tx function as primary.
3065         */
3066        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3067                fm10k_set_rx_function(dev);
3068                fm10k_set_tx_function(dev);
3069                return 0;
3070        }
3071
3072        rte_eth_copy_pci_info(dev, pdev);
3073
3074        macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3075        memset(macvlan, 0, sizeof(*macvlan));
3076        /* Vendor and Device ID need to be set before init of shared code */
3077        memset(hw, 0, sizeof(*hw));
3078        hw->device_id = pdev->id.device_id;
3079        hw->vendor_id = pdev->id.vendor_id;
3080        hw->subsystem_device_id = pdev->id.subsystem_device_id;
3081        hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3082        hw->revision_id = 0;
3083        hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3084        if (hw->hw_addr == NULL) {
3085                PMD_INIT_LOG(ERR, "Bad mem resource."
3086                        " Try to blacklist unused devices.");
3087                return -EIO;
3088        }
3089
3090        /* Store fm10k_adapter pointer */
3091        hw->back = dev->data->dev_private;
3092
3093        /* Initialize the shared code */
3094        diag = fm10k_init_shared_code(hw);
3095        if (diag != FM10K_SUCCESS) {
3096                PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3097                return -EIO;
3098        }
3099
3100        /* Initialize parameters */
3101        fm10k_params_init(dev);
3102
3103        /* Initialize the hw */
3104        diag = fm10k_init_hw(hw);
3105        if (diag != FM10K_SUCCESS) {
3106                PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3107                return -EIO;
3108        }
3109
3110        /* Initialize MAC address(es) */
3111        dev->data->mac_addrs = rte_zmalloc("fm10k",
3112                        RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3113        if (dev->data->mac_addrs == NULL) {
3114                PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3115                return -ENOMEM;
3116        }
3117
3118        diag = fm10k_read_mac_addr(hw);
3119
3120        rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
3121                        &dev->data->mac_addrs[0]);
3122
3123        if (diag != FM10K_SUCCESS ||
3124                !rte_is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3125
3126                /* Generate a random addr */
3127                rte_eth_random_addr(hw->mac.addr);
3128                memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3129                rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
3130                &dev->data->mac_addrs[0]);
3131        }
3132
3133        /* Pass the information to the rte_eth_dev_close() that it should also
3134         * release the private port resources.
3135         */
3136        dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
3137
3138        /* Reset the hw statistics */
3139        diag = fm10k_stats_reset(dev);
3140        if (diag != 0) {
3141                PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag);
3142                return diag;
3143        }
3144
3145        /* Reset the hw */
3146        diag = fm10k_reset_hw(hw);
3147        if (diag != FM10K_SUCCESS) {
3148                PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3149                return -EIO;
3150        }
3151
3152        /* Setup mailbox service */
3153        diag = fm10k_setup_mbx_service(hw);
3154        if (diag != FM10K_SUCCESS) {
3155                PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3156                return -EIO;
3157        }
3158
3159        /*PF/VF has different interrupt handling mechanism */
3160        if (hw->mac.type == fm10k_mac_pf) {
3161                /* register callback func to eal lib */
3162                rte_intr_callback_register(intr_handle,
3163                        fm10k_dev_interrupt_handler_pf, (void *)dev);
3164
3165                /* enable MISC interrupt */
3166                fm10k_dev_enable_intr_pf(dev);
3167        } else { /* VF */
3168                rte_intr_callback_register(intr_handle,
3169                        fm10k_dev_interrupt_handler_vf, (void *)dev);
3170
3171                fm10k_dev_enable_intr_vf(dev);
3172        }
3173
3174        /* Enable intr after callback registered */
3175        rte_intr_enable(intr_handle);
3176
3177        hw->mac.ops.update_int_moderator(hw);
3178
3179        /* Make sure Switch Manager is ready before going forward. */
3180        if (hw->mac.type == fm10k_mac_pf) {
3181                bool switch_ready = false;
3182
3183                for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3184                        fm10k_mbx_lock(hw);
3185                        hw->mac.ops.get_host_state(hw, &switch_ready);
3186                        fm10k_mbx_unlock(hw);
3187                        if (switch_ready == true)
3188                                break;
3189                        /* Delay some time to acquire async LPORT_MAP info. */
3190                        rte_delay_us(WAIT_SWITCH_MSG_US);
3191                }
3192
3193                if (switch_ready == false) {
3194                        PMD_INIT_LOG(ERR, "switch is not ready");
3195                        return -1;
3196                }
3197        }
3198
3199        /*
3200         * Below function will trigger operations on mailbox, acquire lock to
3201         * avoid race condition from interrupt handler. Operations on mailbox
3202         * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3203         * will handle and generate an interrupt to our side. Then,  FIFO in
3204         * mailbox will be touched.
3205         */
3206        fm10k_mbx_lock(hw);
3207        /* Enable port first */
3208        hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3209                                        MAX_LPORT_NUM, 1);
3210
3211        /* Set unicast mode by default. App can change to other mode in other
3212         * API func.
3213         */
3214        hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3215                                        FM10K_XCAST_MODE_NONE);
3216
3217        fm10k_mbx_unlock(hw);
3218
3219        /* Make sure default VID is ready before going forward. */
3220        if (hw->mac.type == fm10k_mac_pf) {
3221                for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3222                        if (hw->mac.default_vid)
3223                                break;
3224                        /* Delay some time to acquire async port VLAN info. */
3225                        rte_delay_us(WAIT_SWITCH_MSG_US);
3226                }
3227
3228                if (!hw->mac.default_vid) {
3229                        PMD_INIT_LOG(ERR, "default VID is not ready");
3230                        return -1;
3231                }
3232        }
3233
3234        /* Add default mac address */
3235        fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3236                MAIN_VSI_POOL_NUMBER);
3237
3238        return 0;
3239}
3240
3241static int
3242eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3243{
3244        PMD_INIT_FUNC_TRACE();
3245
3246        /* only uninitialize in the primary process */
3247        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3248                return 0;
3249
3250        /* safe to close dev here */
3251        fm10k_dev_close(dev);
3252
3253        return 0;
3254}
3255
3256static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3257        struct rte_pci_device *pci_dev)
3258{
3259        return rte_eth_dev_pci_generic_probe(pci_dev,
3260                sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3261}
3262
3263static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3264{
3265        return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3266}
3267
3268/*
3269 * The set of PCI devices this driver supports. This driver will enable both PF
3270 * and SRIOV-VF devices.
3271 */
3272static const struct rte_pci_id pci_id_fm10k_map[] = {
3273        { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3274        { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3275        { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3276        { .vendor_id = 0, /* sentinel */ },
3277};
3278
3279static struct rte_pci_driver rte_pmd_fm10k = {
3280        .id_table = pci_id_fm10k_map,
3281        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3282        .probe = eth_fm10k_pci_probe,
3283        .remove = eth_fm10k_pci_remove,
3284};
3285
3286RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3287RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3288RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3289RTE_LOG_REGISTER(fm10k_logtype_init, pmd.net.fm10k.init, NOTICE);
3290RTE_LOG_REGISTER(fm10k_logtype_driver, pmd.net.fm10k.driver, NOTICE);
3291#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
3292RTE_LOG_REGISTER(fm10k_logtype_rx, pmd.net.fm10k.rx, DEBUG);
3293#endif
3294#ifdef RTE_LIBRTE_FM10K_DEBUG_TX
3295RTE_LOG_REGISTER(fm10k_logtype_tx, pmd.net.fm10k.tx, DEBUG);
3296#endif
3297#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
3298RTE_LOG_REGISTER(fm10k_logtype_tx_free, pmd.net.fm10k.tx_free, DEBUG);
3299#endif
3300