dpdk/drivers/net/iavf/iavf_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Intel Corporation
   3 */
   4
   5#include <sys/queue.h>
   6#include <stdio.h>
   7#include <errno.h>
   8#include <stdint.h>
   9#include <string.h>
  10#include <unistd.h>
  11#include <stdarg.h>
  12#include <inttypes.h>
  13#include <rte_byteorder.h>
  14#include <rte_common.h>
  15
  16#include <rte_interrupts.h>
  17#include <rte_debug.h>
  18#include <rte_pci.h>
  19#include <rte_atomic.h>
  20#include <rte_eal.h>
  21#include <rte_ether.h>
  22#include <ethdev_driver.h>
  23#include <ethdev_pci.h>
  24#include <rte_malloc.h>
  25#include <rte_memzone.h>
  26#include <rte_dev.h>
  27
  28#include "iavf.h"
  29#include "iavf_rxtx.h"
  30#include "iavf_generic_flow.h"
  31#include "rte_pmd_iavf.h"
  32
  33/* devargs */
  34#define IAVF_PROTO_XTR_ARG         "proto_xtr"
  35
  36static const char * const iavf_valid_args[] = {
  37        IAVF_PROTO_XTR_ARG,
  38        NULL
  39};
  40
  41static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
  42        .name = "intel_pmd_dynfield_proto_xtr_metadata",
  43        .size = sizeof(uint32_t),
  44        .align = __alignof__(uint32_t),
  45        .flags = 0,
  46};
  47
  48struct iavf_proto_xtr_ol {
  49        const struct rte_mbuf_dynflag param;
  50        uint64_t *ol_flag;
  51        bool required;
  52};
  53
  54static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
  55        [IAVF_PROTO_XTR_VLAN] = {
  56                .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
  57                .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
  58        [IAVF_PROTO_XTR_IPV4] = {
  59                .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
  60                .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
  61        [IAVF_PROTO_XTR_IPV6] = {
  62                .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
  63                .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
  64        [IAVF_PROTO_XTR_IPV6_FLOW] = {
  65                .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
  66                .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
  67        [IAVF_PROTO_XTR_TCP] = {
  68                .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
  69                .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
  70        [IAVF_PROTO_XTR_IP_OFFSET] = {
  71                .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
  72                .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
  73};
  74
  75static int iavf_dev_configure(struct rte_eth_dev *dev);
  76static int iavf_dev_start(struct rte_eth_dev *dev);
  77static int iavf_dev_stop(struct rte_eth_dev *dev);
  78static int iavf_dev_close(struct rte_eth_dev *dev);
  79static int iavf_dev_reset(struct rte_eth_dev *dev);
  80static int iavf_dev_info_get(struct rte_eth_dev *dev,
  81                             struct rte_eth_dev_info *dev_info);
  82static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
  83static int iavf_dev_stats_get(struct rte_eth_dev *dev,
  84                             struct rte_eth_stats *stats);
  85static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
  86static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
  87                                 struct rte_eth_xstat *xstats, unsigned int n);
  88static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
  89                                       struct rte_eth_xstat_name *xstats_names,
  90                                       unsigned int limit);
  91static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
  92static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
  93static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
  94static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
  95static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
  96                                struct rte_ether_addr *addr,
  97                                uint32_t index,
  98                                uint32_t pool);
  99static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
 100static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
 101                                   uint16_t vlan_id, int on);
 102static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 103static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 104                                   struct rte_eth_rss_reta_entry64 *reta_conf,
 105                                   uint16_t reta_size);
 106static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 107                                  struct rte_eth_rss_reta_entry64 *reta_conf,
 108                                  uint16_t reta_size);
 109static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
 110                                   struct rte_eth_rss_conf *rss_conf);
 111static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 112                                     struct rte_eth_rss_conf *rss_conf);
 113static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 114static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
 115                                         struct rte_ether_addr *mac_addr);
 116static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
 117                                        uint16_t queue_id);
 118static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
 119                                         uint16_t queue_id);
 120static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
 121                                 const struct rte_flow_ops **ops);
 122static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 123                        struct rte_ether_addr *mc_addrs,
 124                        uint32_t mc_addrs_num);
 125static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
 126
 127static const struct rte_pci_id pci_id_iavf_map[] = {
 128        { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
 129        { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
 130        { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
 131        { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
 132        { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
 133        { .vendor_id = 0, /* sentinel */ },
 134};
 135
 136struct rte_iavf_xstats_name_off {
 137        char name[RTE_ETH_XSTATS_NAME_SIZE];
 138        unsigned int offset;
 139};
 140
 141static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 142        {"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
 143        {"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
 144        {"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
 145        {"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
 146        {"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
 147        {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 148                rx_unknown_protocol)},
 149        {"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
 150        {"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
 151        {"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
 152        {"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
 153        {"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
 154        {"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
 155};
 156
 157#define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 158                sizeof(rte_iavf_stats_strings[0]))
 159
 160static const struct eth_dev_ops iavf_eth_dev_ops = {
 161        .dev_configure              = iavf_dev_configure,
 162        .dev_start                  = iavf_dev_start,
 163        .dev_stop                   = iavf_dev_stop,
 164        .dev_close                  = iavf_dev_close,
 165        .dev_reset                  = iavf_dev_reset,
 166        .dev_infos_get              = iavf_dev_info_get,
 167        .dev_supported_ptypes_get   = iavf_dev_supported_ptypes_get,
 168        .link_update                = iavf_dev_link_update,
 169        .stats_get                  = iavf_dev_stats_get,
 170        .stats_reset                = iavf_dev_stats_reset,
 171        .xstats_get                 = iavf_dev_xstats_get,
 172        .xstats_get_names           = iavf_dev_xstats_get_names,
 173        .xstats_reset               = iavf_dev_stats_reset,
 174        .promiscuous_enable         = iavf_dev_promiscuous_enable,
 175        .promiscuous_disable        = iavf_dev_promiscuous_disable,
 176        .allmulticast_enable        = iavf_dev_allmulticast_enable,
 177        .allmulticast_disable       = iavf_dev_allmulticast_disable,
 178        .mac_addr_add               = iavf_dev_add_mac_addr,
 179        .mac_addr_remove            = iavf_dev_del_mac_addr,
 180        .set_mc_addr_list                       = iavf_set_mc_addr_list,
 181        .vlan_filter_set            = iavf_dev_vlan_filter_set,
 182        .vlan_offload_set           = iavf_dev_vlan_offload_set,
 183        .rx_queue_start             = iavf_dev_rx_queue_start,
 184        .rx_queue_stop              = iavf_dev_rx_queue_stop,
 185        .tx_queue_start             = iavf_dev_tx_queue_start,
 186        .tx_queue_stop              = iavf_dev_tx_queue_stop,
 187        .rx_queue_setup             = iavf_dev_rx_queue_setup,
 188        .rx_queue_release           = iavf_dev_rx_queue_release,
 189        .tx_queue_setup             = iavf_dev_tx_queue_setup,
 190        .tx_queue_release           = iavf_dev_tx_queue_release,
 191        .mac_addr_set               = iavf_dev_set_default_mac_addr,
 192        .reta_update                = iavf_dev_rss_reta_update,
 193        .reta_query                 = iavf_dev_rss_reta_query,
 194        .rss_hash_update            = iavf_dev_rss_hash_update,
 195        .rss_hash_conf_get          = iavf_dev_rss_hash_conf_get,
 196        .rxq_info_get               = iavf_dev_rxq_info_get,
 197        .txq_info_get               = iavf_dev_txq_info_get,
 198        .mtu_set                    = iavf_dev_mtu_set,
 199        .rx_queue_intr_enable       = iavf_dev_rx_queue_intr_enable,
 200        .rx_queue_intr_disable      = iavf_dev_rx_queue_intr_disable,
 201        .flow_ops_get               = iavf_dev_flow_ops_get,
 202        .tx_done_cleanup            = iavf_dev_tx_done_cleanup,
 203        .get_monitor_addr           = iavf_get_monitor_addr,
 204        .tm_ops_get                 = iavf_tm_ops_get,
 205};
 206
 207static int
 208iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 209                        void *arg)
 210{
 211        if (!arg)
 212                return -EINVAL;
 213
 214        *(const void **)arg = &iavf_tm_ops;
 215
 216        return 0;
 217}
 218
 219static int
 220iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 221                        struct rte_ether_addr *mc_addrs,
 222                        uint32_t mc_addrs_num)
 223{
 224        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 225        struct iavf_adapter *adapter =
 226                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 227        int err, ret;
 228
 229        if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
 230                PMD_DRV_LOG(ERR,
 231                            "can't add more than a limited number (%u) of addresses.",
 232                            (uint32_t)IAVF_NUM_MACADDR_MAX);
 233                return -EINVAL;
 234        }
 235
 236        /* flush previous addresses */
 237        err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 238                                        false);
 239        if (err)
 240                return err;
 241
 242        /* add new ones */
 243        err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
 244
 245        if (err) {
 246                /* if adding mac address list fails, should add the previous
 247                 * addresses back.
 248                 */
 249                ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
 250                                                vf->mc_addrs_num, true);
 251                if (ret)
 252                        return ret;
 253        } else {
 254                vf->mc_addrs_num = mc_addrs_num;
 255                memcpy(vf->mc_addrs,
 256                       mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
 257        }
 258
 259        return err;
 260}
 261
 262static void
 263iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
 264{
 265        static const uint64_t map_hena_rss[] = {
 266                /* IPv4 */
 267                [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
 268                                ETH_RSS_NONFRAG_IPV4_UDP,
 269                [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
 270                                ETH_RSS_NONFRAG_IPV4_UDP,
 271                [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
 272                                ETH_RSS_NONFRAG_IPV4_UDP,
 273                [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
 274                                ETH_RSS_NONFRAG_IPV4_TCP,
 275                [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
 276                                ETH_RSS_NONFRAG_IPV4_TCP,
 277                [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
 278                                ETH_RSS_NONFRAG_IPV4_SCTP,
 279                [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
 280                                ETH_RSS_NONFRAG_IPV4_OTHER,
 281                [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
 282
 283                /* IPv6 */
 284                [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
 285                                ETH_RSS_NONFRAG_IPV6_UDP,
 286                [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
 287                                ETH_RSS_NONFRAG_IPV6_UDP,
 288                [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
 289                                ETH_RSS_NONFRAG_IPV6_UDP,
 290                [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
 291                                ETH_RSS_NONFRAG_IPV6_TCP,
 292                [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
 293                                ETH_RSS_NONFRAG_IPV6_TCP,
 294                [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
 295                                ETH_RSS_NONFRAG_IPV6_SCTP,
 296                [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
 297                                ETH_RSS_NONFRAG_IPV6_OTHER,
 298                [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
 299
 300                /* L2 Payload */
 301                [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
 302        };
 303
 304        const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
 305                                  ETH_RSS_NONFRAG_IPV4_TCP |
 306                                  ETH_RSS_NONFRAG_IPV4_SCTP |
 307                                  ETH_RSS_NONFRAG_IPV4_OTHER |
 308                                  ETH_RSS_FRAG_IPV4;
 309
 310        const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
 311                                  ETH_RSS_NONFRAG_IPV6_TCP |
 312                                  ETH_RSS_NONFRAG_IPV6_SCTP |
 313                                  ETH_RSS_NONFRAG_IPV6_OTHER |
 314                                  ETH_RSS_FRAG_IPV6;
 315
 316        struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 317        uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
 318        uint32_t i;
 319        int ret;
 320
 321        ret = iavf_get_hena_caps(adapter, &caps);
 322        if (ret) {
 323                /**
 324                 * RSS offload type configuration is not a necessary feature
 325                 * for VF, so here just print a warning and return.
 326                 */
 327                PMD_DRV_LOG(WARNING,
 328                            "fail to get RSS offload type caps, ret: %d", ret);
 329                return;
 330        }
 331
 332        /**
 333         * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
 334         * generalizations of all other IPv4 and IPv6 RSS types.
 335         */
 336        if (rss_hf & ETH_RSS_IPV4)
 337                rss_hf |= ipv4_rss;
 338
 339        if (rss_hf & ETH_RSS_IPV6)
 340                rss_hf |= ipv6_rss;
 341
 342        RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
 343
 344        for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
 345                uint64_t bit = BIT_ULL(i);
 346
 347                if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
 348                        valid_rss_hf |= map_hena_rss[i];
 349                        hena |= bit;
 350                }
 351        }
 352
 353        ret = iavf_set_hena(adapter, hena);
 354        if (ret) {
 355                /**
 356                 * RSS offload type configuration is not a necessary feature
 357                 * for VF, so here just print a warning and return.
 358                 */
 359                PMD_DRV_LOG(WARNING,
 360                            "fail to set RSS offload types, ret: %d", ret);
 361                return;
 362        }
 363
 364        if (valid_rss_hf & ipv4_rss)
 365                valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
 366
 367        if (valid_rss_hf & ipv6_rss)
 368                valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
 369
 370        if (rss_hf & ~valid_rss_hf)
 371                PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
 372                            rss_hf & ~valid_rss_hf);
 373
 374        vf->rss_hf = valid_rss_hf;
 375}
 376
 377static int
 378iavf_init_rss(struct iavf_adapter *adapter)
 379{
 380        struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 381        struct rte_eth_rss_conf *rss_conf;
 382        uint16_t i, j, nb_q;
 383        int ret;
 384
 385        rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
 386        nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
 387                       vf->max_rss_qregion);
 388
 389        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
 390                PMD_DRV_LOG(DEBUG, "RSS is not supported");
 391                return -ENOTSUP;
 392        }
 393
 394        /* configure RSS key */
 395        if (!rss_conf->rss_key) {
 396                /* Calculate the default hash key */
 397                for (i = 0; i < vf->vf_res->rss_key_size; i++)
 398                        vf->rss_key[i] = (uint8_t)rte_rand();
 399        } else
 400                rte_memcpy(vf->rss_key, rss_conf->rss_key,
 401                           RTE_MIN(rss_conf->rss_key_len,
 402                                   vf->vf_res->rss_key_size));
 403
 404        /* init RSS LUT table */
 405        for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
 406                if (j >= nb_q)
 407                        j = 0;
 408                vf->rss_lut[i] = j;
 409        }
 410        /* send virtchnnl ops to configure rss*/
 411        ret = iavf_configure_rss_lut(adapter);
 412        if (ret)
 413                return ret;
 414        ret = iavf_configure_rss_key(adapter);
 415        if (ret)
 416                return ret;
 417
 418        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
 419                /* Set RSS hash configuration based on rss_conf->rss_hf. */
 420                ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
 421                if (ret) {
 422                        PMD_DRV_LOG(ERR, "fail to set default RSS");
 423                        return ret;
 424                }
 425        } else {
 426                iavf_config_rss_hf(adapter, rss_conf->rss_hf);
 427        }
 428
 429        return 0;
 430}
 431
 432static int
 433iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
 434{
 435        struct iavf_adapter *ad =
 436                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 437        struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
 438        int ret;
 439
 440        ret = iavf_request_queues(ad, num);
 441        if (ret) {
 442                PMD_DRV_LOG(ERR, "request queues from PF failed");
 443                return ret;
 444        }
 445        PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
 446                        vf->vsi_res->num_queue_pairs, num);
 447
 448        ret = iavf_dev_reset(dev);
 449        if (ret) {
 450                PMD_DRV_LOG(ERR, "vf reset failed");
 451                return ret;
 452        }
 453
 454        return 0;
 455}
 456
 457static int
 458iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
 459{
 460        struct iavf_adapter *adapter =
 461                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 462        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 463        bool enable;
 464
 465        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
 466                return 0;
 467
 468        enable = !!(dev->data->dev_conf.txmode.offloads &
 469                    DEV_TX_OFFLOAD_VLAN_INSERT);
 470        iavf_config_vlan_insert_v2(adapter, enable);
 471
 472        return 0;
 473}
 474
 475static int
 476iavf_dev_init_vlan(struct rte_eth_dev *dev)
 477{
 478        int err;
 479
 480        err = iavf_dev_vlan_offload_set(dev,
 481                                        ETH_VLAN_STRIP_MASK |
 482                                        ETH_QINQ_STRIP_MASK |
 483                                        ETH_VLAN_FILTER_MASK |
 484                                        ETH_VLAN_EXTEND_MASK);
 485        if (err) {
 486                PMD_DRV_LOG(ERR, "Failed to update vlan offload");
 487                return err;
 488        }
 489
 490        err = iavf_dev_vlan_insert_set(dev);
 491        if (err)
 492                PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
 493
 494        return err;
 495}
 496
 497static int
 498iavf_dev_configure(struct rte_eth_dev *dev)
 499{
 500        struct iavf_adapter *ad =
 501                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 502        struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
 503        uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
 504                dev->data->nb_tx_queues);
 505        int ret;
 506
 507        ad->rx_bulk_alloc_allowed = true;
 508        /* Initialize to TRUE. If any of Rx queues doesn't meet the
 509         * vector Rx/Tx preconditions, it will be reset.
 510         */
 511        ad->rx_vec_allowed = true;
 512        ad->tx_vec_allowed = true;
 513
 514        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
 515                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 516
 517        /* Large VF setting */
 518        if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
 519                if (!(vf->vf_res->vf_cap_flags &
 520                                VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
 521                        PMD_DRV_LOG(ERR, "large VF is not supported");
 522                        return -1;
 523                }
 524
 525                if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
 526                        PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
 527                                IAVF_MAX_NUM_QUEUES_LV);
 528                        return -1;
 529                }
 530
 531                ret = iavf_queues_req_reset(dev, num_queue_pairs);
 532                if (ret)
 533                        return ret;
 534
 535                ret = iavf_get_max_rss_queue_region(ad);
 536                if (ret) {
 537                        PMD_INIT_LOG(ERR, "get max rss queue region failed");
 538                        return ret;
 539                }
 540
 541                vf->lv_enabled = true;
 542        } else {
 543                /* Check if large VF is already enabled. If so, disable and
 544                 * release redundant queue resource.
 545                 * Or check if enough queue pairs. If not, request them from PF.
 546                 */
 547                if (vf->lv_enabled ||
 548                    num_queue_pairs > vf->vsi_res->num_queue_pairs) {
 549                        ret = iavf_queues_req_reset(dev, num_queue_pairs);
 550                        if (ret)
 551                                return ret;
 552
 553                        vf->lv_enabled = false;
 554                }
 555                /* if large VF is not required, use default rss queue region */
 556                vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
 557        }
 558
 559        ret = iavf_dev_init_vlan(dev);
 560        if (ret)
 561                PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
 562
 563        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
 564                if (iavf_init_rss(ad) != 0) {
 565                        PMD_DRV_LOG(ERR, "configure rss failed");
 566                        return -1;
 567                }
 568        }
 569        return 0;
 570}
 571
 572static int
 573iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 574{
 575        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 576        struct rte_eth_dev_data *dev_data = dev->data;
 577        uint16_t buf_size, max_pkt_len, len;
 578
 579        buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 580
 581        /* Calculate the maximum packet length allowed */
 582        len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
 583        max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
 584
 585        /* Check if the jumbo frame and maximum packet length are set
 586         * correctly.
 587         */
 588        if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 589                if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
 590                    max_pkt_len > IAVF_FRAME_SIZE_MAX) {
 591                        PMD_DRV_LOG(ERR, "maximum packet length must be "
 592                                    "larger than %u and smaller than %u, "
 593                                    "as jumbo frame is enabled",
 594                                    (uint32_t)IAVF_ETH_MAX_LEN,
 595                                    (uint32_t)IAVF_FRAME_SIZE_MAX);
 596                        return -EINVAL;
 597                }
 598        } else {
 599                if (max_pkt_len < RTE_ETHER_MIN_LEN ||
 600                    max_pkt_len > IAVF_ETH_MAX_LEN) {
 601                        PMD_DRV_LOG(ERR, "maximum packet length must be "
 602                                    "larger than %u and smaller than %u, "
 603                                    "as jumbo frame is disabled",
 604                                    (uint32_t)RTE_ETHER_MIN_LEN,
 605                                    (uint32_t)IAVF_ETH_MAX_LEN);
 606                        return -EINVAL;
 607                }
 608        }
 609
 610        rxq->max_pkt_len = max_pkt_len;
 611        if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
 612            rxq->max_pkt_len > buf_size) {
 613                dev_data->scattered_rx = 1;
 614        }
 615        IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 616        IAVF_WRITE_FLUSH(hw);
 617
 618        return 0;
 619}
 620
 621static int
 622iavf_init_queues(struct rte_eth_dev *dev)
 623{
 624        struct iavf_rx_queue **rxq =
 625                (struct iavf_rx_queue **)dev->data->rx_queues;
 626        int i, ret = IAVF_SUCCESS;
 627
 628        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 629                if (!rxq[i] || !rxq[i]->q_set)
 630                        continue;
 631                ret = iavf_init_rxq(dev, rxq[i]);
 632                if (ret != IAVF_SUCCESS)
 633                        break;
 634        }
 635        /* set rx/tx function to vector/scatter/single-segment
 636         * according to parameters
 637         */
 638        iavf_set_rx_function(dev);
 639        iavf_set_tx_function(dev);
 640
 641        return ret;
 642}
 643
 644static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 645                                     struct rte_intr_handle *intr_handle)
 646{
 647        struct iavf_adapter *adapter =
 648                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 649        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 650        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 651        struct iavf_qv_map *qv_map;
 652        uint16_t interval, i;
 653        int vec;
 654
 655        if (rte_intr_cap_multiple(intr_handle) &&
 656            dev->data->dev_conf.intr_conf.rxq) {
 657                if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
 658                        return -1;
 659        }
 660
 661        if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
 662                intr_handle->intr_vec =
 663                        rte_zmalloc("intr_vec",
 664                                    dev->data->nb_rx_queues * sizeof(int), 0);
 665                if (!intr_handle->intr_vec) {
 666                        PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
 667                                    dev->data->nb_rx_queues);
 668                        return -1;
 669                }
 670        }
 671
 672        qv_map = rte_zmalloc("qv_map",
 673                dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
 674        if (!qv_map) {
 675                PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
 676                                dev->data->nb_rx_queues);
 677                return -1;
 678        }
 679
 680        if (!dev->data->dev_conf.intr_conf.rxq ||
 681            !rte_intr_dp_is_en(intr_handle)) {
 682                /* Rx interrupt disabled, Map interrupt only for writeback */
 683                vf->nb_msix = 1;
 684                if (vf->vf_res->vf_cap_flags &
 685                    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
 686                        /* If WB_ON_ITR supports, enable it */
 687                        vf->msix_base = IAVF_RX_VEC_START;
 688                        /* Set the ITR for index zero, to 2us to make sure that
 689                         * we leave time for aggregation to occur, but don't
 690                         * increase latency dramatically.
 691                         */
 692                        IAVF_WRITE_REG(hw,
 693                                       IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
 694                                       (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
 695                                       IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
 696                                       (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
 697                        /* debug - check for success! the return value
 698                         * should be 2, offset is 0x2800
 699                         */
 700                        /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
 701                } else {
 702                        /* If no WB_ON_ITR offload flags, need to set
 703                         * interrupt for descriptor write back.
 704                         */
 705                        vf->msix_base = IAVF_MISC_VEC_ID;
 706
 707                        /* set ITR to max */
 708                        interval = iavf_calc_itr_interval(
 709                                        IAVF_QUEUE_ITR_INTERVAL_MAX);
 710                        IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
 711                                       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
 712                                       (IAVF_ITR_INDEX_DEFAULT <<
 713                                        IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
 714                                       (interval <<
 715                                        IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
 716                }
 717                IAVF_WRITE_FLUSH(hw);
 718                /* map all queues to the same interrupt */
 719                for (i = 0; i < dev->data->nb_rx_queues; i++) {
 720                        qv_map[i].queue_id = i;
 721                        qv_map[i].vector_id = vf->msix_base;
 722                }
 723                vf->qv_map = qv_map;
 724        } else {
 725                if (!rte_intr_allow_others(intr_handle)) {
 726                        vf->nb_msix = 1;
 727                        vf->msix_base = IAVF_MISC_VEC_ID;
 728                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 729                                qv_map[i].queue_id = i;
 730                                qv_map[i].vector_id = vf->msix_base;
 731                                intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
 732                        }
 733                        vf->qv_map = qv_map;
 734                        PMD_DRV_LOG(DEBUG,
 735                                    "vector %u are mapping to all Rx queues",
 736                                    vf->msix_base);
 737                } else {
 738                        /* If Rx interrupt is reuquired, and we can use
 739                         * multi interrupts, then the vec is from 1
 740                         */
 741                        vf->nb_msix = RTE_MIN(intr_handle->nb_efd,
 742                                 (uint16_t)(vf->vf_res->max_vectors - 1));
 743                        vf->msix_base = IAVF_RX_VEC_START;
 744                        vec = IAVF_RX_VEC_START;
 745                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 746                                qv_map[i].queue_id = i;
 747                                qv_map[i].vector_id = vec;
 748                                intr_handle->intr_vec[i] = vec++;
 749                                if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
 750                                        vec = IAVF_RX_VEC_START;
 751                        }
 752                        vf->qv_map = qv_map;
 753                        PMD_DRV_LOG(DEBUG,
 754                                    "%u vectors are mapping to %u Rx queues",
 755                                    vf->nb_msix, dev->data->nb_rx_queues);
 756                }
 757        }
 758
 759        if (!vf->lv_enabled) {
 760                if (iavf_config_irq_map(adapter)) {
 761                        PMD_DRV_LOG(ERR, "config interrupt mapping failed");
 762                        return -1;
 763                }
 764        } else {
 765                uint16_t num_qv_maps = dev->data->nb_rx_queues;
 766                uint16_t index = 0;
 767
 768                while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
 769                        if (iavf_config_irq_map_lv(adapter,
 770                                        IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
 771                                PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
 772                                return -1;
 773                        }
 774                        num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
 775                        index += IAVF_IRQ_MAP_NUM_PER_BUF;
 776                }
 777
 778                if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
 779                        PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
 780                        return -1;
 781                }
 782        }
 783        return 0;
 784}
 785
 786static int
 787iavf_start_queues(struct rte_eth_dev *dev)
 788{
 789        struct iavf_rx_queue *rxq;
 790        struct iavf_tx_queue *txq;
 791        int i;
 792
 793        for (i = 0; i < dev->data->nb_tx_queues; i++) {
 794                txq = dev->data->tx_queues[i];
 795                if (txq->tx_deferred_start)
 796                        continue;
 797                if (iavf_dev_tx_queue_start(dev, i) != 0) {
 798                        PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
 799                        return -1;
 800                }
 801        }
 802
 803        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 804                rxq = dev->data->rx_queues[i];
 805                if (rxq->rx_deferred_start)
 806                        continue;
 807                if (iavf_dev_rx_queue_start(dev, i) != 0) {
 808                        PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
 809                        return -1;
 810                }
 811        }
 812
 813        return 0;
 814}
 815
 816static int
 817iavf_dev_start(struct rte_eth_dev *dev)
 818{
 819        struct iavf_adapter *adapter =
 820                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 821        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 822        struct rte_intr_handle *intr_handle = dev->intr_handle;
 823        uint16_t num_queue_pairs;
 824        uint16_t index = 0;
 825
 826        PMD_INIT_FUNC_TRACE();
 827
 828        adapter->stopped = 0;
 829
 830        vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 831        vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
 832                                      dev->data->nb_tx_queues);
 833        num_queue_pairs = vf->num_queue_pairs;
 834
 835        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
 836                if (iavf_get_qos_cap(adapter)) {
 837                        PMD_INIT_LOG(ERR, "Failed to get qos capability");
 838                        return -1;
 839                }
 840
 841        if (iavf_init_queues(dev) != 0) {
 842                PMD_DRV_LOG(ERR, "failed to do Queue init");
 843                return -1;
 844        }
 845
 846        /* If needed, send configure queues msg multiple times to make the
 847         * adminq buffer length smaller than the 4K limitation.
 848         */
 849        while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
 850                if (iavf_configure_queues(adapter,
 851                                IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
 852                        PMD_DRV_LOG(ERR, "configure queues failed");
 853                        goto err_queue;
 854                }
 855                num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
 856                index += IAVF_CFG_Q_NUM_PER_BUF;
 857        }
 858
 859        if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
 860                PMD_DRV_LOG(ERR, "configure queues failed");
 861                goto err_queue;
 862        }
 863
 864        if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
 865                PMD_DRV_LOG(ERR, "configure irq failed");
 866                goto err_queue;
 867        }
 868        /* re-enable intr again, because efd assign may change */
 869        if (dev->data->dev_conf.intr_conf.rxq != 0) {
 870                rte_intr_disable(intr_handle);
 871                rte_intr_enable(intr_handle);
 872        }
 873
 874        /* Set all mac addrs */
 875        iavf_add_del_all_mac_addr(adapter, true);
 876
 877        /* Set all multicast addresses */
 878        iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 879                                  true);
 880
 881        if (iavf_start_queues(dev) != 0) {
 882                PMD_DRV_LOG(ERR, "enable queues failed");
 883                goto err_mac;
 884        }
 885
 886        return 0;
 887
 888err_mac:
 889        iavf_add_del_all_mac_addr(adapter, false);
 890err_queue:
 891        return -1;
 892}
 893
 894static int
 895iavf_dev_stop(struct rte_eth_dev *dev)
 896{
 897        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 898        struct iavf_adapter *adapter =
 899                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 900        struct rte_intr_handle *intr_handle = dev->intr_handle;
 901
 902        PMD_INIT_FUNC_TRACE();
 903
 904        if (adapter->stopped == 1)
 905                return 0;
 906
 907        iavf_stop_queues(dev);
 908
 909        /* Disable the interrupt for Rx */
 910        rte_intr_efd_disable(intr_handle);
 911        /* Rx interrupt vector mapping free */
 912        if (intr_handle->intr_vec) {
 913                rte_free(intr_handle->intr_vec);
 914                intr_handle->intr_vec = NULL;
 915        }
 916
 917        /* remove all mac addrs */
 918        iavf_add_del_all_mac_addr(adapter, false);
 919
 920        /* remove all multicast addresses */
 921        iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 922                                  false);
 923
 924        adapter->stopped = 1;
 925        dev->data->dev_started = 0;
 926
 927        return 0;
 928}
 929
 930static int
 931iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 932{
 933        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 934
 935        dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 936        dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
 937        dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
 938        dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
 939        dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
 940        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 941        dev_info->hash_key_size = vf->vf_res->rss_key_size;
 942        dev_info->reta_size = vf->vf_res->rss_lut_size;
 943        dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
 944        dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
 945        dev_info->rx_offload_capa =
 946                DEV_RX_OFFLOAD_VLAN_STRIP |
 947                DEV_RX_OFFLOAD_QINQ_STRIP |
 948                DEV_RX_OFFLOAD_IPV4_CKSUM |
 949                DEV_RX_OFFLOAD_UDP_CKSUM |
 950                DEV_RX_OFFLOAD_TCP_CKSUM |
 951                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 952                DEV_RX_OFFLOAD_SCATTER |
 953                DEV_RX_OFFLOAD_JUMBO_FRAME |
 954                DEV_RX_OFFLOAD_VLAN_FILTER |
 955                DEV_RX_OFFLOAD_RSS_HASH;
 956
 957        dev_info->tx_offload_capa =
 958                DEV_TX_OFFLOAD_VLAN_INSERT |
 959                DEV_TX_OFFLOAD_QINQ_INSERT |
 960                DEV_TX_OFFLOAD_IPV4_CKSUM |
 961                DEV_TX_OFFLOAD_UDP_CKSUM |
 962                DEV_TX_OFFLOAD_TCP_CKSUM |
 963                DEV_TX_OFFLOAD_SCTP_CKSUM |
 964                DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 965                DEV_TX_OFFLOAD_TCP_TSO |
 966                DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 967                DEV_TX_OFFLOAD_GRE_TNL_TSO |
 968                DEV_TX_OFFLOAD_IPIP_TNL_TSO |
 969                DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
 970                DEV_TX_OFFLOAD_MULTI_SEGS |
 971                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 972
 973        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 974                dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 975
 976        dev_info->default_rxconf = (struct rte_eth_rxconf) {
 977                .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 978                .rx_drop_en = 0,
 979                .offloads = 0,
 980        };
 981
 982        dev_info->default_txconf = (struct rte_eth_txconf) {
 983                .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
 984                .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
 985                .offloads = 0,
 986        };
 987
 988        dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
 989                .nb_max = IAVF_MAX_RING_DESC,
 990                .nb_min = IAVF_MIN_RING_DESC,
 991                .nb_align = IAVF_ALIGN_RING_DESC,
 992        };
 993
 994        dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
 995                .nb_max = IAVF_MAX_RING_DESC,
 996                .nb_min = IAVF_MIN_RING_DESC,
 997                .nb_align = IAVF_ALIGN_RING_DESC,
 998        };
 999
1000        return 0;
1001}
1002
1003static const uint32_t *
1004iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1005{
1006        static const uint32_t ptypes[] = {
1007                RTE_PTYPE_L2_ETHER,
1008                RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1009                RTE_PTYPE_L4_FRAG,
1010                RTE_PTYPE_L4_ICMP,
1011                RTE_PTYPE_L4_NONFRAG,
1012                RTE_PTYPE_L4_SCTP,
1013                RTE_PTYPE_L4_TCP,
1014                RTE_PTYPE_L4_UDP,
1015                RTE_PTYPE_UNKNOWN
1016        };
1017        return ptypes;
1018}
1019
1020int
1021iavf_dev_link_update(struct rte_eth_dev *dev,
1022                    __rte_unused int wait_to_complete)
1023{
1024        struct rte_eth_link new_link;
1025        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1026
1027        memset(&new_link, 0, sizeof(new_link));
1028
1029        /* Only read status info stored in VF, and the info is updated
1030         *  when receive LINK_CHANGE evnet from PF by Virtchnnl.
1031         */
1032        switch (vf->link_speed) {
1033        case 10:
1034                new_link.link_speed = ETH_SPEED_NUM_10M;
1035                break;
1036        case 100:
1037                new_link.link_speed = ETH_SPEED_NUM_100M;
1038                break;
1039        case 1000:
1040                new_link.link_speed = ETH_SPEED_NUM_1G;
1041                break;
1042        case 10000:
1043                new_link.link_speed = ETH_SPEED_NUM_10G;
1044                break;
1045        case 20000:
1046                new_link.link_speed = ETH_SPEED_NUM_20G;
1047                break;
1048        case 25000:
1049                new_link.link_speed = ETH_SPEED_NUM_25G;
1050                break;
1051        case 40000:
1052                new_link.link_speed = ETH_SPEED_NUM_40G;
1053                break;
1054        case 50000:
1055                new_link.link_speed = ETH_SPEED_NUM_50G;
1056                break;
1057        case 100000:
1058                new_link.link_speed = ETH_SPEED_NUM_100G;
1059                break;
1060        default:
1061                new_link.link_speed = ETH_SPEED_NUM_NONE;
1062                break;
1063        }
1064
1065        new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1066        new_link.link_status = vf->link_up ? ETH_LINK_UP :
1067                                             ETH_LINK_DOWN;
1068        new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1069                                ETH_LINK_SPEED_FIXED);
1070
1071        return rte_eth_linkstatus_set(dev, &new_link);
1072}
1073
1074static int
1075iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1076{
1077        struct iavf_adapter *adapter =
1078                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1079        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1080
1081        return iavf_config_promisc(adapter,
1082                                  true, vf->promisc_multicast_enabled);
1083}
1084
1085static int
1086iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1087{
1088        struct iavf_adapter *adapter =
1089                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1090        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1091
1092        return iavf_config_promisc(adapter,
1093                                  false, vf->promisc_multicast_enabled);
1094}
1095
1096static int
1097iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1098{
1099        struct iavf_adapter *adapter =
1100                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1101        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1102
1103        return iavf_config_promisc(adapter,
1104                                  vf->promisc_unicast_enabled, true);
1105}
1106
1107static int
1108iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1109{
1110        struct iavf_adapter *adapter =
1111                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1112        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1113
1114        return iavf_config_promisc(adapter,
1115                                  vf->promisc_unicast_enabled, false);
1116}
1117
1118static int
1119iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1120                     __rte_unused uint32_t index,
1121                     __rte_unused uint32_t pool)
1122{
1123        struct iavf_adapter *adapter =
1124                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1125        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1126        int err;
1127
1128        if (rte_is_zero_ether_addr(addr)) {
1129                PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1130                return -EINVAL;
1131        }
1132
1133        err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1134        if (err) {
1135                PMD_DRV_LOG(ERR, "fail to add MAC address");
1136                return -EIO;
1137        }
1138
1139        vf->mac_num++;
1140
1141        return 0;
1142}
1143
1144static void
1145iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1146{
1147        struct iavf_adapter *adapter =
1148                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1149        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1150        struct rte_ether_addr *addr;
1151        int err;
1152
1153        addr = &dev->data->mac_addrs[index];
1154
1155        err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1156        if (err)
1157                PMD_DRV_LOG(ERR, "fail to delete MAC address");
1158
1159        vf->mac_num--;
1160}
1161
1162static int
1163iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1164{
1165        struct iavf_adapter *adapter =
1166                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1167        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1168        int err;
1169
1170        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1171                err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1172                if (err)
1173                        return -EIO;
1174                return 0;
1175        }
1176
1177        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1178                return -ENOTSUP;
1179
1180        err = iavf_add_del_vlan(adapter, vlan_id, on);
1181        if (err)
1182                return -EIO;
1183        return 0;
1184}
1185
1186static void
1187iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1188{
1189        struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1190        struct iavf_adapter *adapter =
1191                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1192        uint32_t i, j;
1193        uint64_t ids;
1194
1195        for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1196                if (vfc->ids[i] == 0)
1197                        continue;
1198
1199                ids = vfc->ids[i];
1200                for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1201                        if (ids & 1)
1202                                iavf_add_del_vlan_v2(adapter,
1203                                                     64 * i + j, enable);
1204                }
1205        }
1206}
1207
1208static int
1209iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1210{
1211        struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1212        struct iavf_adapter *adapter =
1213                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1214        bool enable;
1215        int err;
1216
1217        if (mask & ETH_VLAN_FILTER_MASK) {
1218                enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
1219
1220                iavf_iterate_vlan_filters_v2(dev, enable);
1221        }
1222
1223        if (mask & ETH_VLAN_STRIP_MASK) {
1224                enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1225
1226                err = iavf_config_vlan_strip_v2(adapter, enable);
1227                /* If not support, the stripping is already disabled by PF */
1228                if (err == -ENOTSUP && !enable)
1229                        err = 0;
1230                if (err)
1231                        return -EIO;
1232        }
1233
1234        return 0;
1235}
1236
1237static int
1238iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1239{
1240        struct iavf_adapter *adapter =
1241                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1242        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1243        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1244        int err;
1245
1246        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1247                return iavf_dev_vlan_offload_set_v2(dev, mask);
1248
1249        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1250                return -ENOTSUP;
1251
1252        /* Vlan stripping setting */
1253        if (mask & ETH_VLAN_STRIP_MASK) {
1254                /* Enable or disable VLAN stripping */
1255                if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1256                        err = iavf_enable_vlan_strip(adapter);
1257                else
1258                        err = iavf_disable_vlan_strip(adapter);
1259
1260                if (err)
1261                        return -EIO;
1262        }
1263        return 0;
1264}
1265
1266static int
1267iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1268                        struct rte_eth_rss_reta_entry64 *reta_conf,
1269                        uint16_t reta_size)
1270{
1271        struct iavf_adapter *adapter =
1272                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1273        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1274        uint8_t *lut;
1275        uint16_t i, idx, shift;
1276        int ret;
1277
1278        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1279                return -ENOTSUP;
1280
1281        if (reta_size != vf->vf_res->rss_lut_size) {
1282                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1283                        "(%d) doesn't match the number of hardware can "
1284                        "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1285                return -EINVAL;
1286        }
1287
1288        lut = rte_zmalloc("rss_lut", reta_size, 0);
1289        if (!lut) {
1290                PMD_DRV_LOG(ERR, "No memory can be allocated");
1291                return -ENOMEM;
1292        }
1293        /* store the old lut table temporarily */
1294        rte_memcpy(lut, vf->rss_lut, reta_size);
1295
1296        for (i = 0; i < reta_size; i++) {
1297                idx = i / RTE_RETA_GROUP_SIZE;
1298                shift = i % RTE_RETA_GROUP_SIZE;
1299                if (reta_conf[idx].mask & (1ULL << shift))
1300                        lut[i] = reta_conf[idx].reta[shift];
1301        }
1302
1303        rte_memcpy(vf->rss_lut, lut, reta_size);
1304        /* send virtchnnl ops to configure rss*/
1305        ret = iavf_configure_rss_lut(adapter);
1306        if (ret) /* revert back */
1307                rte_memcpy(vf->rss_lut, lut, reta_size);
1308        rte_free(lut);
1309
1310        return ret;
1311}
1312
1313static int
1314iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1315                       struct rte_eth_rss_reta_entry64 *reta_conf,
1316                       uint16_t reta_size)
1317{
1318        struct iavf_adapter *adapter =
1319                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1320        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1321        uint16_t i, idx, shift;
1322
1323        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1324                return -ENOTSUP;
1325
1326        if (reta_size != vf->vf_res->rss_lut_size) {
1327                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1328                        "(%d) doesn't match the number of hardware can "
1329                        "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1330                return -EINVAL;
1331        }
1332
1333        for (i = 0; i < reta_size; i++) {
1334                idx = i / RTE_RETA_GROUP_SIZE;
1335                shift = i % RTE_RETA_GROUP_SIZE;
1336                if (reta_conf[idx].mask & (1ULL << shift))
1337                        reta_conf[idx].reta[shift] = vf->rss_lut[i];
1338        }
1339
1340        return 0;
1341}
1342
1343static int
1344iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1345{
1346        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1347
1348        /* HENA setting, it is enabled by default, no change */
1349        if (!key || key_len == 0) {
1350                PMD_DRV_LOG(DEBUG, "No key to be configured");
1351                return 0;
1352        } else if (key_len != vf->vf_res->rss_key_size) {
1353                PMD_DRV_LOG(ERR, "The size of hash key configured "
1354                        "(%d) doesn't match the size of hardware can "
1355                        "support (%d)", key_len,
1356                        vf->vf_res->rss_key_size);
1357                return -EINVAL;
1358        }
1359
1360        rte_memcpy(vf->rss_key, key, key_len);
1361
1362        return iavf_configure_rss_key(adapter);
1363}
1364
1365static int
1366iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1367                        struct rte_eth_rss_conf *rss_conf)
1368{
1369        struct iavf_adapter *adapter =
1370                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1371        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1372        int ret;
1373
1374        adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1375
1376        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1377                return -ENOTSUP;
1378
1379        /* Set hash key. */
1380        ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1381                               rss_conf->rss_key_len);
1382        if (ret)
1383                return ret;
1384
1385        if (rss_conf->rss_hf == 0) {
1386                vf->rss_hf = 0;
1387                ret = iavf_set_hena(adapter, 0);
1388
1389                /* It is a workaround, temporarily allow error to be returned
1390                 * due to possible lack of PF handling for hena = 0.
1391                 */
1392                if (ret)
1393                        PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1394                return 0;
1395        }
1396
1397        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1398                /* Clear existing RSS. */
1399                ret = iavf_set_hena(adapter, 0);
1400
1401                /* It is a workaround, temporarily allow error to be returned
1402                 * due to possible lack of PF handling for hena = 0.
1403                 */
1404                if (ret)
1405                        PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1406                                    "lack PF support");
1407
1408                /* Set new RSS configuration. */
1409                ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1410                if (ret) {
1411                        PMD_DRV_LOG(ERR, "fail to set new RSS");
1412                        return ret;
1413                }
1414        } else {
1415                iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1416        }
1417
1418        return 0;
1419}
1420
1421static int
1422iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1423                          struct rte_eth_rss_conf *rss_conf)
1424{
1425        struct iavf_adapter *adapter =
1426                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1427        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1428
1429        if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1430                return -ENOTSUP;
1431
1432        rss_conf->rss_hf = vf->rss_hf;
1433
1434        if (!rss_conf->rss_key)
1435                return 0;
1436
1437        rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1438        rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1439
1440        return 0;
1441}
1442
1443static int
1444iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1445{
1446        uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
1447        int ret = 0;
1448
1449        if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
1450                return -EINVAL;
1451
1452        /* mtu setting is forbidden if port is start */
1453        if (dev->data->dev_started) {
1454                PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1455                return -EBUSY;
1456        }
1457
1458        if (frame_size > IAVF_ETH_MAX_LEN)
1459                dev->data->dev_conf.rxmode.offloads |=
1460                                DEV_RX_OFFLOAD_JUMBO_FRAME;
1461        else
1462                dev->data->dev_conf.rxmode.offloads &=
1463                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1464
1465        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1466
1467        return ret;
1468}
1469
1470static int
1471iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1472                             struct rte_ether_addr *mac_addr)
1473{
1474        struct iavf_adapter *adapter =
1475                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1476        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1477        struct rte_ether_addr *old_addr;
1478        int ret;
1479
1480        old_addr = (struct rte_ether_addr *)hw->mac.addr;
1481
1482        if (rte_is_same_ether_addr(old_addr, mac_addr))
1483                return 0;
1484
1485        ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1486        if (ret)
1487                PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1488                            " %02X:%02X:%02X:%02X:%02X:%02X",
1489                            old_addr->addr_bytes[0],
1490                            old_addr->addr_bytes[1],
1491                            old_addr->addr_bytes[2],
1492                            old_addr->addr_bytes[3],
1493                            old_addr->addr_bytes[4],
1494                            old_addr->addr_bytes[5]);
1495
1496        ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1497        if (ret)
1498                PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1499                            " %02X:%02X:%02X:%02X:%02X:%02X",
1500                            mac_addr->addr_bytes[0],
1501                            mac_addr->addr_bytes[1],
1502                            mac_addr->addr_bytes[2],
1503                            mac_addr->addr_bytes[3],
1504                            mac_addr->addr_bytes[4],
1505                            mac_addr->addr_bytes[5]);
1506
1507        if (ret)
1508                return -EIO;
1509
1510        rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1511        return 0;
1512}
1513
1514static void
1515iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1516{
1517        if (*stat >= *offset)
1518                *stat = *stat - *offset;
1519        else
1520                *stat = (uint64_t)((*stat +
1521                        ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1522
1523        *stat &= IAVF_48_BIT_MASK;
1524}
1525
1526static void
1527iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1528{
1529        if (*stat >= *offset)
1530                *stat = (uint64_t)(*stat - *offset);
1531        else
1532                *stat = (uint64_t)((*stat +
1533                        ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1534}
1535
1536static void
1537iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1538{
1539        struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
1540
1541        iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1542        iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1543        iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1544        iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1545        iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1546        iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1547        iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1548        iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1549        iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1550        iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1551        iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1552}
1553
1554static int
1555iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1556{
1557        struct iavf_adapter *adapter =
1558                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1559        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1560        struct iavf_vsi *vsi = &vf->vsi;
1561        struct virtchnl_eth_stats *pstats = NULL;
1562        int ret;
1563
1564        ret = iavf_query_stats(adapter, &pstats);
1565        if (ret == 0) {
1566                uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1567                                         DEV_RX_OFFLOAD_KEEP_CRC) ? 0 :
1568                                         RTE_ETHER_CRC_LEN;
1569                iavf_update_stats(vsi, pstats);
1570                stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1571                                pstats->rx_broadcast - pstats->rx_discards;
1572                stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1573                                                pstats->tx_unicast;
1574                stats->imissed = pstats->rx_discards;
1575                stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1576                stats->ibytes = pstats->rx_bytes;
1577                stats->ibytes -= stats->ipackets * crc_stats_len;
1578                stats->obytes = pstats->tx_bytes;
1579        } else {
1580                PMD_DRV_LOG(ERR, "Get statistics failed");
1581        }
1582        return ret;
1583}
1584
1585static int
1586iavf_dev_stats_reset(struct rte_eth_dev *dev)
1587{
1588        int ret;
1589        struct iavf_adapter *adapter =
1590                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1591        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1592        struct iavf_vsi *vsi = &vf->vsi;
1593        struct virtchnl_eth_stats *pstats = NULL;
1594
1595        /* read stat values to clear hardware registers */
1596        ret = iavf_query_stats(adapter, &pstats);
1597        if (ret != 0)
1598                return ret;
1599
1600        /* set stats offset base on current values */
1601        vsi->eth_stats_offset = *pstats;
1602
1603        return 0;
1604}
1605
1606static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1607                                      struct rte_eth_xstat_name *xstats_names,
1608                                      __rte_unused unsigned int limit)
1609{
1610        unsigned int i;
1611
1612        if (xstats_names != NULL)
1613                for (i = 0; i < IAVF_NB_XSTATS; i++) {
1614                        snprintf(xstats_names[i].name,
1615                                sizeof(xstats_names[i].name),
1616                                "%s", rte_iavf_stats_strings[i].name);
1617                }
1618        return IAVF_NB_XSTATS;
1619}
1620
1621static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1622                                 struct rte_eth_xstat *xstats, unsigned int n)
1623{
1624        int ret;
1625        unsigned int i;
1626        struct iavf_adapter *adapter =
1627                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1628        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1629        struct iavf_vsi *vsi = &vf->vsi;
1630        struct virtchnl_eth_stats *pstats = NULL;
1631
1632        if (n < IAVF_NB_XSTATS)
1633                return IAVF_NB_XSTATS;
1634
1635        ret = iavf_query_stats(adapter, &pstats);
1636        if (ret != 0)
1637                return 0;
1638
1639        if (!xstats)
1640                return 0;
1641
1642        iavf_update_stats(vsi, pstats);
1643
1644        /* loop over xstats array and values from pstats */
1645        for (i = 0; i < IAVF_NB_XSTATS; i++) {
1646                xstats[i].id = i;
1647                xstats[i].value = *(uint64_t *)(((char *)pstats) +
1648                        rte_iavf_stats_strings[i].offset);
1649        }
1650
1651        return IAVF_NB_XSTATS;
1652}
1653
1654
1655static int
1656iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1657{
1658        struct iavf_adapter *adapter =
1659                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1660        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1661        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1662        uint16_t msix_intr;
1663
1664        msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1665        if (msix_intr == IAVF_MISC_VEC_ID) {
1666                PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1667                IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1668                               IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1669                               IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1670                               IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1671        } else {
1672                IAVF_WRITE_REG(hw,
1673                               IAVF_VFINT_DYN_CTLN1
1674                                (msix_intr - IAVF_RX_VEC_START),
1675                               IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1676                               IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1677                               IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1678        }
1679
1680        IAVF_WRITE_FLUSH(hw);
1681
1682        rte_intr_ack(&pci_dev->intr_handle);
1683
1684        return 0;
1685}
1686
1687static int
1688iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1689{
1690        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1691        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1692        uint16_t msix_intr;
1693
1694        msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
1695        if (msix_intr == IAVF_MISC_VEC_ID) {
1696                PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1697                return -EIO;
1698        }
1699
1700        IAVF_WRITE_REG(hw,
1701                      IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1702                      0);
1703
1704        IAVF_WRITE_FLUSH(hw);
1705        return 0;
1706}
1707
1708static int
1709iavf_check_vf_reset_done(struct iavf_hw *hw)
1710{
1711        int i, reset;
1712
1713        for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1714                reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1715                        IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1716                reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1717                if (reset == VIRTCHNL_VFR_VFACTIVE ||
1718                    reset == VIRTCHNL_VFR_COMPLETED)
1719                        break;
1720                rte_delay_ms(20);
1721        }
1722
1723        if (i >= IAVF_RESET_WAIT_CNT)
1724                return -1;
1725
1726        return 0;
1727}
1728
1729static int
1730iavf_lookup_proto_xtr_type(const char *flex_name)
1731{
1732        static struct {
1733                const char *name;
1734                enum iavf_proto_xtr_type type;
1735        } xtr_type_map[] = {
1736                { "vlan",      IAVF_PROTO_XTR_VLAN      },
1737                { "ipv4",      IAVF_PROTO_XTR_IPV4      },
1738                { "ipv6",      IAVF_PROTO_XTR_IPV6      },
1739                { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1740                { "tcp",       IAVF_PROTO_XTR_TCP       },
1741                { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1742        };
1743        uint32_t i;
1744
1745        for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1746                if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1747                        return xtr_type_map[i].type;
1748        }
1749
1750        PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
1751                    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
1752
1753        return -1;
1754}
1755
1756/**
1757 * Parse elem, the elem could be single number/range or '(' ')' group
1758 * 1) A single number elem, it's just a simple digit. e.g. 9
1759 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1760 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1761 *    Within group elem, '-' used for a range separator;
1762 *                       ',' used for a single number.
1763 */
1764static int
1765iavf_parse_queue_set(const char *input, int xtr_type,
1766                     struct iavf_devargs *devargs)
1767{
1768        const char *str = input;
1769        char *end = NULL;
1770        uint32_t min, max;
1771        uint32_t idx;
1772
1773        while (isblank(*str))
1774                str++;
1775
1776        if (!isdigit(*str) && *str != '(')
1777                return -1;
1778
1779        /* process single number or single range of number */
1780        if (*str != '(') {
1781                errno = 0;
1782                idx = strtoul(str, &end, 10);
1783                if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1784                        return -1;
1785
1786                while (isblank(*end))
1787                        end++;
1788
1789                min = idx;
1790                max = idx;
1791
1792                /* process single <number>-<number> */
1793                if (*end == '-') {
1794                        end++;
1795                        while (isblank(*end))
1796                                end++;
1797                        if (!isdigit(*end))
1798                                return -1;
1799
1800                        errno = 0;
1801                        idx = strtoul(end, &end, 10);
1802                        if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1803                                return -1;
1804
1805                        max = idx;
1806                        while (isblank(*end))
1807                                end++;
1808                }
1809
1810                if (*end != ':')
1811                        return -1;
1812
1813                for (idx = RTE_MIN(min, max);
1814                     idx <= RTE_MAX(min, max); idx++)
1815                        devargs->proto_xtr[idx] = xtr_type;
1816
1817                return 0;
1818        }
1819
1820        /* process set within bracket */
1821        str++;
1822        while (isblank(*str))
1823                str++;
1824        if (*str == '\0')
1825                return -1;
1826
1827        min = IAVF_MAX_QUEUE_NUM;
1828        do {
1829                /* go ahead to the first digit */
1830                while (isblank(*str))
1831                        str++;
1832                if (!isdigit(*str))
1833                        return -1;
1834
1835                /* get the digit value */
1836                errno = 0;
1837                idx = strtoul(str, &end, 10);
1838                if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1839                        return -1;
1840
1841                /* go ahead to separator '-',',' and ')' */
1842                while (isblank(*end))
1843                        end++;
1844                if (*end == '-') {
1845                        if (min == IAVF_MAX_QUEUE_NUM)
1846                                min = idx;
1847                        else /* avoid continuous '-' */
1848                                return -1;
1849                } else if (*end == ',' || *end == ')') {
1850                        max = idx;
1851                        if (min == IAVF_MAX_QUEUE_NUM)
1852                                min = idx;
1853
1854                        for (idx = RTE_MIN(min, max);
1855                             idx <= RTE_MAX(min, max); idx++)
1856                                devargs->proto_xtr[idx] = xtr_type;
1857
1858                        min = IAVF_MAX_QUEUE_NUM;
1859                } else {
1860                        return -1;
1861                }
1862
1863                str = end + 1;
1864        } while (*end != ')' && *end != '\0');
1865
1866        return 0;
1867}
1868
1869static int
1870iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
1871{
1872        const char *queue_start;
1873        uint32_t idx;
1874        int xtr_type;
1875        char flex_name[32];
1876
1877        while (isblank(*queues))
1878                queues++;
1879
1880        if (*queues != '[') {
1881                xtr_type = iavf_lookup_proto_xtr_type(queues);
1882                if (xtr_type < 0)
1883                        return -1;
1884
1885                devargs->proto_xtr_dflt = xtr_type;
1886
1887                return 0;
1888        }
1889
1890        queues++;
1891        do {
1892                while (isblank(*queues))
1893                        queues++;
1894                if (*queues == '\0')
1895                        return -1;
1896
1897                queue_start = queues;
1898
1899                /* go across a complete bracket */
1900                if (*queue_start == '(') {
1901                        queues += strcspn(queues, ")");
1902                        if (*queues != ')')
1903                                return -1;
1904                }
1905
1906                /* scan the separator ':' */
1907                queues += strcspn(queues, ":");
1908                if (*queues++ != ':')
1909                        return -1;
1910                while (isblank(*queues))
1911                        queues++;
1912
1913                for (idx = 0; ; idx++) {
1914                        if (isblank(queues[idx]) ||
1915                            queues[idx] == ',' ||
1916                            queues[idx] == ']' ||
1917                            queues[idx] == '\0')
1918                                break;
1919
1920                        if (idx > sizeof(flex_name) - 2)
1921                                return -1;
1922
1923                        flex_name[idx] = queues[idx];
1924                }
1925                flex_name[idx] = '\0';
1926                xtr_type = iavf_lookup_proto_xtr_type(flex_name);
1927                if (xtr_type < 0)
1928                        return -1;
1929
1930                queues += idx;
1931
1932                while (isblank(*queues) || *queues == ',' || *queues == ']')
1933                        queues++;
1934
1935                if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
1936                        return -1;
1937        } while (*queues != '\0');
1938
1939        return 0;
1940}
1941
1942static int
1943iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
1944                          void *extra_args)
1945{
1946        struct iavf_devargs *devargs = extra_args;
1947
1948        if (!value || !extra_args)
1949                return -EINVAL;
1950
1951        if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
1952                PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
1953                            value);
1954                return -1;
1955        }
1956
1957        return 0;
1958}
1959
1960static int iavf_parse_devargs(struct rte_eth_dev *dev)
1961{
1962        struct iavf_adapter *ad =
1963                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1964        struct rte_devargs *devargs = dev->device->devargs;
1965        struct rte_kvargs *kvlist;
1966        int ret;
1967
1968        if (!devargs)
1969                return 0;
1970
1971        kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
1972        if (!kvlist) {
1973                PMD_INIT_LOG(ERR, "invalid kvargs key\n");
1974                return -EINVAL;
1975        }
1976
1977        ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
1978        memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
1979               sizeof(ad->devargs.proto_xtr));
1980
1981        ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
1982                                 &iavf_handle_proto_xtr_arg, &ad->devargs);
1983        if (ret)
1984                goto bail;
1985
1986bail:
1987        rte_kvargs_free(kvlist);
1988        return ret;
1989}
1990
1991static void
1992iavf_init_proto_xtr(struct rte_eth_dev *dev)
1993{
1994        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1995        struct iavf_adapter *ad =
1996                        IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1997        const struct iavf_proto_xtr_ol *xtr_ol;
1998        bool proto_xtr_enable = false;
1999        int offset;
2000        uint16_t i;
2001
2002        vf->proto_xtr = rte_zmalloc("vf proto xtr",
2003                                    vf->vsi_res->num_queue_pairs, 0);
2004        if (unlikely(!(vf->proto_xtr))) {
2005                PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
2006                return;
2007        }
2008
2009        for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
2010                vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
2011                                        IAVF_PROTO_XTR_NONE ?
2012                                        ad->devargs.proto_xtr[i] :
2013                                        ad->devargs.proto_xtr_dflt;
2014
2015                if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
2016                        uint8_t type = vf->proto_xtr[i];
2017
2018                        iavf_proto_xtr_params[type].required = true;
2019                        proto_xtr_enable = true;
2020                }
2021        }
2022
2023        if (likely(!proto_xtr_enable))
2024                return;
2025
2026        offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2027        if (unlikely(offset == -1)) {
2028                PMD_DRV_LOG(ERR,
2029                            "failed to extract protocol metadata, error %d",
2030                            -rte_errno);
2031                return;
2032        }
2033
2034        PMD_DRV_LOG(DEBUG,
2035                    "proto_xtr metadata offset in mbuf is : %d",
2036                    offset);
2037        rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2038
2039        for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2040                xtr_ol = &iavf_proto_xtr_params[i];
2041
2042                uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2043
2044                if (!xtr_ol->required)
2045                        continue;
2046
2047                if (!(vf->supported_rxdid & BIT(rxdid))) {
2048                        PMD_DRV_LOG(ERR,
2049                                    "rxdid[%u] is not supported in hardware",
2050                                    rxdid);
2051                        rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2052                        break;
2053                }
2054
2055                offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2056                if (unlikely(offset == -1)) {
2057                        PMD_DRV_LOG(ERR,
2058                                    "failed to register proto_xtr offload '%s', error %d",
2059                                    xtr_ol->param.name, -rte_errno);
2060
2061                        rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2062                        break;
2063                }
2064
2065                PMD_DRV_LOG(DEBUG,
2066                            "proto_xtr offload '%s' offset in mbuf is : %d",
2067                            xtr_ol->param.name, offset);
2068                *xtr_ol->ol_flag = 1ULL << offset;
2069        }
2070}
2071
2072static int
2073iavf_init_vf(struct rte_eth_dev *dev)
2074{
2075        int err, bufsz;
2076        struct iavf_adapter *adapter =
2077                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2078        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2079        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2080
2081        err = iavf_parse_devargs(dev);
2082        if (err) {
2083                PMD_INIT_LOG(ERR, "Failed to parse devargs");
2084                goto err;
2085        }
2086
2087        err = iavf_set_mac_type(hw);
2088        if (err) {
2089                PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2090                goto err;
2091        }
2092
2093        err = iavf_check_vf_reset_done(hw);
2094        if (err) {
2095                PMD_INIT_LOG(ERR, "VF is still resetting");
2096                goto err;
2097        }
2098
2099        iavf_init_adminq_parameter(hw);
2100        err = iavf_init_adminq(hw);
2101        if (err) {
2102                PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2103                goto err;
2104        }
2105
2106        vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2107        if (!vf->aq_resp) {
2108                PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2109                goto err_aq;
2110        }
2111        if (iavf_check_api_version(adapter) != 0) {
2112                PMD_INIT_LOG(ERR, "check_api version failed");
2113                goto err_api;
2114        }
2115
2116        bufsz = sizeof(struct virtchnl_vf_resource) +
2117                (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2118        vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2119        if (!vf->vf_res) {
2120                PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2121                goto err_api;
2122        }
2123
2124        if (iavf_get_vf_resource(adapter) != 0) {
2125                PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2126                goto err_alloc;
2127        }
2128        /* Allocate memort for RSS info */
2129        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2130                vf->rss_key = rte_zmalloc("rss_key",
2131                                          vf->vf_res->rss_key_size, 0);
2132                if (!vf->rss_key) {
2133                        PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2134                        goto err_rss;
2135                }
2136                vf->rss_lut = rte_zmalloc("rss_lut",
2137                                          vf->vf_res->rss_lut_size, 0);
2138                if (!vf->rss_lut) {
2139                        PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2140                        goto err_rss;
2141                }
2142        }
2143
2144        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2145                if (iavf_get_supported_rxdid(adapter) != 0) {
2146                        PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2147                        goto err_rss;
2148                }
2149        }
2150
2151        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2152                if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2153                        PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2154                        goto err_rss;
2155                }
2156        }
2157
2158        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2159                bufsz = sizeof(struct virtchnl_qos_cap_list) +
2160                        IAVF_MAX_TRAFFIC_CLASS *
2161                        sizeof(struct virtchnl_qos_cap_elem);
2162                vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2163                if (!vf->qos_cap) {
2164                        PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2165                        goto err_rss;
2166                }
2167                iavf_tm_conf_init(dev);
2168        }
2169
2170        iavf_init_proto_xtr(dev);
2171
2172        return 0;
2173err_rss:
2174        rte_free(vf->rss_key);
2175        rte_free(vf->rss_lut);
2176err_alloc:
2177        rte_free(vf->qos_cap);
2178        rte_free(vf->vf_res);
2179        vf->vsi_res = NULL;
2180err_api:
2181        rte_free(vf->aq_resp);
2182err_aq:
2183        iavf_shutdown_adminq(hw);
2184err:
2185        return -1;
2186}
2187
2188/* Enable default admin queue interrupt setting */
2189static inline void
2190iavf_enable_irq0(struct iavf_hw *hw)
2191{
2192        /* Enable admin queue interrupt trigger */
2193        IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2194                       IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2195
2196        IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2197                       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2198                       IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2199                       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2200
2201        IAVF_WRITE_FLUSH(hw);
2202}
2203
2204static inline void
2205iavf_disable_irq0(struct iavf_hw *hw)
2206{
2207        /* Disable all interrupt types */
2208        IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2209        IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2210                       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2211        IAVF_WRITE_FLUSH(hw);
2212}
2213
2214static void
2215iavf_dev_interrupt_handler(void *param)
2216{
2217        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2218        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2219
2220        iavf_disable_irq0(hw);
2221
2222        iavf_handle_virtchnl_msg(dev);
2223
2224        iavf_enable_irq0(hw);
2225}
2226
2227static int
2228iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2229                      const struct rte_flow_ops **ops)
2230{
2231        if (!dev)
2232                return -EINVAL;
2233
2234        *ops = &iavf_flow_ops;
2235        return 0;
2236}
2237
2238static void
2239iavf_default_rss_disable(struct iavf_adapter *adapter)
2240{
2241        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2242        int ret = 0;
2243
2244        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2245                /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2246                ret = iavf_set_hena(adapter, 0);
2247                if (ret)
2248                        /* It is a workaround, temporarily allow error to be
2249                         * returned due to possible lack of PF handling for
2250                         * hena = 0.
2251                         */
2252                        PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2253                                    "lack PF support");
2254        }
2255}
2256
2257static int
2258iavf_dev_init(struct rte_eth_dev *eth_dev)
2259{
2260        struct iavf_adapter *adapter =
2261                IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2262        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2263        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2264        int ret = 0;
2265
2266        PMD_INIT_FUNC_TRACE();
2267
2268        /* assign ops func pointer */
2269        eth_dev->dev_ops = &iavf_eth_dev_ops;
2270        eth_dev->rx_queue_count = iavf_dev_rxq_count;
2271        eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2272        eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2273        eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2274        eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2275        eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2276
2277        /* For secondary processes, we don't initialise any further as primary
2278         * has already done this work. Only check if we need a different RX
2279         * and TX function.
2280         */
2281        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2282                iavf_set_rx_function(eth_dev);
2283                iavf_set_tx_function(eth_dev);
2284                return 0;
2285        }
2286        rte_eth_copy_pci_info(eth_dev, pci_dev);
2287        eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2288
2289        hw->vendor_id = pci_dev->id.vendor_id;
2290        hw->device_id = pci_dev->id.device_id;
2291        hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2292        hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2293        hw->bus.bus_id = pci_dev->addr.bus;
2294        hw->bus.device = pci_dev->addr.devid;
2295        hw->bus.func = pci_dev->addr.function;
2296        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2297        hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2298        adapter->eth_dev = eth_dev;
2299        adapter->stopped = 1;
2300
2301        if (iavf_init_vf(eth_dev) != 0) {
2302                PMD_INIT_LOG(ERR, "Init vf failed");
2303                return -1;
2304        }
2305
2306        /* set default ptype table */
2307        adapter->ptype_tbl = iavf_get_default_ptype_table();
2308
2309        /* copy mac addr */
2310        eth_dev->data->mac_addrs = rte_zmalloc(
2311                "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2312        if (!eth_dev->data->mac_addrs) {
2313                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2314                             " store MAC addresses",
2315                             RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2316                return -ENOMEM;
2317        }
2318        /* If the MAC address is not configured by host,
2319         * generate a random one.
2320         */
2321        if (!rte_is_valid_assigned_ether_addr(
2322                        (struct rte_ether_addr *)hw->mac.addr))
2323                rte_eth_random_addr(hw->mac.addr);
2324        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2325                        &eth_dev->data->mac_addrs[0]);
2326
2327        /* register callback func to eal lib */
2328        rte_intr_callback_register(&pci_dev->intr_handle,
2329                                   iavf_dev_interrupt_handler,
2330                                   (void *)eth_dev);
2331
2332        /* enable uio intr after callback register */
2333        rte_intr_enable(&pci_dev->intr_handle);
2334
2335        /* configure and enable device interrupt */
2336        iavf_enable_irq0(hw);
2337
2338        ret = iavf_flow_init(adapter);
2339        if (ret) {
2340                PMD_INIT_LOG(ERR, "Failed to initialize flow");
2341                return ret;
2342        }
2343
2344        iavf_default_rss_disable(adapter);
2345
2346        return 0;
2347}
2348
2349static int
2350iavf_dev_close(struct rte_eth_dev *dev)
2351{
2352        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2353        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2354        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2355        struct iavf_adapter *adapter =
2356                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2357        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2358        int ret;
2359
2360        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2361                return 0;
2362
2363        ret = iavf_dev_stop(dev);
2364
2365        iavf_flow_flush(dev, NULL);
2366        iavf_flow_uninit(adapter);
2367
2368        /*
2369         * disable promiscuous mode before reset vf
2370         * it is a workaround solution when work with kernel driver
2371         * and it is not the normal way
2372         */
2373        if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2374                iavf_config_promisc(adapter, false, false);
2375
2376        iavf_shutdown_adminq(hw);
2377        /* disable uio intr before callback unregister */
2378        rte_intr_disable(intr_handle);
2379
2380        /* unregister callback func from eal lib */
2381        rte_intr_callback_unregister(intr_handle,
2382                                     iavf_dev_interrupt_handler, dev);
2383        iavf_disable_irq0(hw);
2384
2385        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2386                iavf_tm_conf_uninit(dev);
2387
2388        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2389                if (vf->rss_lut) {
2390                        rte_free(vf->rss_lut);
2391                        vf->rss_lut = NULL;
2392                }
2393                if (vf->rss_key) {
2394                        rte_free(vf->rss_key);
2395                        vf->rss_key = NULL;
2396                }
2397        }
2398
2399        rte_free(vf->vf_res);
2400        vf->vsi_res = NULL;
2401        vf->vf_res = NULL;
2402
2403        rte_free(vf->aq_resp);
2404        vf->aq_resp = NULL;
2405
2406        /*
2407         * If the VF is reset via VFLR, the device will be knocked out of bus
2408         * master mode, and the driver will fail to recover from the reset. Fix
2409         * this by enabling bus mastering after every reset. In a non-VFLR case,
2410         * the bus master bit will not be disabled, and this call will have no
2411         * effect.
2412         */
2413        if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2414                vf->vf_reset = false;
2415
2416        return ret;
2417}
2418
2419static int
2420iavf_dev_uninit(struct rte_eth_dev *dev)
2421{
2422        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2423                return -EPERM;
2424
2425        iavf_dev_close(dev);
2426
2427        return 0;
2428}
2429
2430/*
2431 * Reset VF device only to re-initialize resources in PMD layer
2432 */
2433static int
2434iavf_dev_reset(struct rte_eth_dev *dev)
2435{
2436        int ret;
2437
2438        ret = iavf_dev_uninit(dev);
2439        if (ret)
2440                return ret;
2441
2442        return iavf_dev_init(dev);
2443}
2444
2445static int
2446iavf_dcf_cap_check_handler(__rte_unused const char *key,
2447                           const char *value, __rte_unused void *opaque)
2448{
2449        if (strcmp(value, "dcf"))
2450                return -1;
2451
2452        return 0;
2453}
2454
2455static int
2456iavf_dcf_cap_selected(struct rte_devargs *devargs)
2457{
2458        struct rte_kvargs *kvlist;
2459        const char *key = "cap";
2460        int ret = 0;
2461
2462        if (devargs == NULL)
2463                return 0;
2464
2465        kvlist = rte_kvargs_parse(devargs->args, NULL);
2466        if (kvlist == NULL)
2467                return 0;
2468
2469        if (!rte_kvargs_count(kvlist, key))
2470                goto exit;
2471
2472        /* dcf capability selected when there's a key-value pair: cap=dcf */
2473        if (rte_kvargs_process(kvlist, key,
2474                               iavf_dcf_cap_check_handler, NULL) < 0)
2475                goto exit;
2476
2477        ret = 1;
2478
2479exit:
2480        rte_kvargs_free(kvlist);
2481        return ret;
2482}
2483
2484static int
2485iavf_drv_i40evf_check_handler(__rte_unused const char *key,
2486                              const char *value, __rte_unused void *opaque)
2487{
2488        if (strcmp(value, "i40evf"))
2489                return -1;
2490
2491        return 0;
2492}
2493
2494static int
2495iavf_drv_i40evf_selected(struct rte_devargs *devargs, uint16_t device_id)
2496{
2497        struct rte_kvargs *kvlist;
2498        int ret = 0;
2499
2500        if (device_id != IAVF_DEV_ID_VF &&
2501            device_id != IAVF_DEV_ID_VF_HV &&
2502            device_id != IAVF_DEV_ID_X722_VF &&
2503            device_id != IAVF_DEV_ID_X722_A0_VF)
2504                return 0;
2505
2506        if (devargs == NULL)
2507                return 0;
2508
2509        kvlist = rte_kvargs_parse(devargs->args, NULL);
2510        if (kvlist == NULL)
2511                return 0;
2512
2513        if (!rte_kvargs_count(kvlist, RTE_DEVARGS_KEY_DRIVER))
2514                goto exit;
2515
2516        /* i40evf driver selected when there's a key-value pair:
2517         * driver=i40evf
2518         */
2519        if (rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_DRIVER,
2520                               iavf_drv_i40evf_check_handler, NULL) < 0)
2521                goto exit;
2522
2523        ret = 1;
2524
2525exit:
2526        rte_kvargs_free(kvlist);
2527        return ret;
2528}
2529
2530static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2531                             struct rte_pci_device *pci_dev)
2532{
2533        if (iavf_dcf_cap_selected(pci_dev->device.devargs) ||
2534            iavf_drv_i40evf_selected(pci_dev->device.devargs,
2535                                     pci_dev->id.device_id))
2536                return 1;
2537
2538        return rte_eth_dev_pci_generic_probe(pci_dev,
2539                sizeof(struct iavf_adapter), iavf_dev_init);
2540}
2541
2542static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2543{
2544        return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2545}
2546
2547/* Adaptive virtual function driver struct */
2548static struct rte_pci_driver rte_iavf_pmd = {
2549        .id_table = pci_id_iavf_map,
2550        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2551        .probe = eth_iavf_pci_probe,
2552        .remove = eth_iavf_pci_remove,
2553};
2554
2555RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2556RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2557RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2558RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf driver=i40evf");
2559RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2560RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2561#ifdef RTE_ETHDEV_DEBUG_RX
2562RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2563#endif
2564#ifdef RTE_ETHDEV_DEBUG_TX
2565RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);
2566#endif
2567