dpdk/drivers/net/hns3/hns3_ethdev_vf.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 HiSilicon Limited.
   3 */
   4
   5#include <linux/pci_regs.h>
   6#include <rte_alarm.h>
   7#include <ethdev_pci.h>
   8#include <rte_io.h>
   9#include <rte_pci.h>
  10#include <rte_vfio.h>
  11
  12#include "hns3_ethdev.h"
  13#include "hns3_common.h"
  14#include "hns3_logs.h"
  15#include "hns3_rxtx.h"
  16#include "hns3_regs.h"
  17#include "hns3_intr.h"
  18#include "hns3_dcb.h"
  19#include "hns3_mp.h"
  20#include "hns3_flow.h"
  21
  22#define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
  23#define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
  24
  25#define HNS3VF_RESET_WAIT_MS    20
  26#define HNS3VF_RESET_WAIT_CNT   2000
  27
  28/* Reset related Registers */
  29#define HNS3_GLOBAL_RESET_BIT           0
  30#define HNS3_CORE_RESET_BIT             1
  31#define HNS3_IMP_RESET_BIT              2
  32#define HNS3_FUN_RST_ING_B              0
  33
  34enum hns3vf_evt_cause {
  35        HNS3VF_VECTOR0_EVENT_RST,
  36        HNS3VF_VECTOR0_EVENT_MBX,
  37        HNS3VF_VECTOR0_EVENT_OTHER,
  38};
  39
  40static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
  41                                                    uint64_t *levels);
  42static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
  43static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
  44
  45static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
  46                                  struct rte_ether_addr *mac_addr);
  47static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
  48                                     struct rte_ether_addr *mac_addr);
  49static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
  50                                   __rte_unused int wait_to_complete);
  51
  52/* set PCI bus mastering */
  53static int
  54hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
  55{
  56        uint16_t reg;
  57        int ret;
  58
  59        ret = rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
  60        if (ret < 0) {
  61                PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
  62                             PCI_COMMAND);
  63                return ret;
  64        }
  65
  66        if (op)
  67                /* set the master bit */
  68                reg |= PCI_COMMAND_MASTER;
  69        else
  70                reg &= ~(PCI_COMMAND_MASTER);
  71
  72        return rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
  73}
  74
  75/**
  76 * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
  77 * @cap: the capability
  78 *
  79 * Return the address of the given capability within the PCI capability list.
  80 */
  81static int
  82hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
  83{
  84#define MAX_PCIE_CAPABILITY 48
  85        uint16_t status;
  86        uint8_t pos;
  87        uint8_t id;
  88        int ttl;
  89        int ret;
  90
  91        ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
  92        if (ret < 0) {
  93                PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
  94                return 0;
  95        }
  96
  97        if (!(status & PCI_STATUS_CAP_LIST))
  98                return 0;
  99
 100        ttl = MAX_PCIE_CAPABILITY;
 101        ret = rte_pci_read_config(device, &pos, sizeof(pos),
 102                                  PCI_CAPABILITY_LIST);
 103        if (ret < 0) {
 104                PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 105                             PCI_CAPABILITY_LIST);
 106                return 0;
 107        }
 108
 109        while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
 110                ret = rte_pci_read_config(device, &id, sizeof(id),
 111                                          (pos + PCI_CAP_LIST_ID));
 112                if (ret < 0) {
 113                        PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 114                                     (pos + PCI_CAP_LIST_ID));
 115                        break;
 116                }
 117
 118                if (id == 0xFF)
 119                        break;
 120
 121                if (id == cap)
 122                        return (int)pos;
 123
 124                ret = rte_pci_read_config(device, &pos, sizeof(pos),
 125                                          (pos + PCI_CAP_LIST_NEXT));
 126                if (ret < 0) {
 127                        PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 128                                     (pos + PCI_CAP_LIST_NEXT));
 129                        break;
 130                }
 131        }
 132        return 0;
 133}
 134
 135static int
 136hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
 137{
 138        uint16_t control;
 139        int pos;
 140        int ret;
 141
 142        pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
 143        if (pos) {
 144                ret = rte_pci_read_config(device, &control, sizeof(control),
 145                                    (pos + PCI_MSIX_FLAGS));
 146                if (ret < 0) {
 147                        PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 148                                     (pos + PCI_MSIX_FLAGS));
 149                        return -ENXIO;
 150                }
 151
 152                if (op)
 153                        control |= PCI_MSIX_FLAGS_ENABLE;
 154                else
 155                        control &= ~PCI_MSIX_FLAGS_ENABLE;
 156                ret = rte_pci_write_config(device, &control, sizeof(control),
 157                                          (pos + PCI_MSIX_FLAGS));
 158                if (ret < 0) {
 159                        PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
 160                                    (pos + PCI_MSIX_FLAGS));
 161                        return -ENXIO;
 162                }
 163
 164                return 0;
 165        }
 166
 167        return -ENXIO;
 168}
 169
 170static int
 171hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
 172{
 173        /* mac address was checked by upper level interface */
 174        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 175        int ret;
 176
 177        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
 178                                HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
 179                                RTE_ETHER_ADDR_LEN, false, NULL, 0);
 180        if (ret) {
 181                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 182                                      mac_addr);
 183                hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
 184                         mac_str, ret);
 185        }
 186        return ret;
 187}
 188
 189static int
 190hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
 191{
 192        /* mac address was checked by upper level interface */
 193        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 194        int ret;
 195
 196        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
 197                                HNS3_MBX_MAC_VLAN_UC_REMOVE,
 198                                mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
 199                                false, NULL, 0);
 200        if (ret) {
 201                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 202                                      mac_addr);
 203                hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
 204                         mac_str, ret);
 205        }
 206        return ret;
 207}
 208
 209static int
 210hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
 211                            struct rte_ether_addr *mac_addr)
 212{
 213#define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
 214        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 215        struct rte_ether_addr *old_addr;
 216        uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
 217        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 218        int ret;
 219
 220        /*
 221         * It has been guaranteed that input parameter named mac_addr is valid
 222         * address in the rte layer of DPDK framework.
 223         */
 224        old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
 225        rte_spinlock_lock(&hw->lock);
 226        memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
 227        memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
 228               RTE_ETHER_ADDR_LEN);
 229
 230        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
 231                                HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
 232                                HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
 233        if (ret) {
 234                /*
 235                 * The hns3 VF PMD depends on the hns3 PF kernel ethdev
 236                 * driver. When user has configured a MAC address for VF device
 237                 * by "ip link set ..." command based on the PF device, the hns3
 238                 * PF kernel ethdev driver does not allow VF driver to request
 239                 * reconfiguring a different default MAC address, and return
 240                 * -EPREM to VF driver through mailbox.
 241                 */
 242                if (ret == -EPERM) {
 243                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 244                                              old_addr);
 245                        hns3_warn(hw, "Has permanet mac addr(%s) for vf",
 246                                  mac_str);
 247                } else {
 248                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 249                                              mac_addr);
 250                        hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
 251                                 mac_str, ret);
 252                }
 253        }
 254
 255        rte_ether_addr_copy(mac_addr,
 256                            (struct rte_ether_addr *)hw->mac.mac_addr);
 257        rte_spinlock_unlock(&hw->lock);
 258
 259        return ret;
 260}
 261
 262static int
 263hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
 264                       struct rte_ether_addr *mac_addr)
 265{
 266        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 267        int ret;
 268
 269        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
 270                                HNS3_MBX_MAC_VLAN_MC_ADD,
 271                                mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
 272                                NULL, 0);
 273        if (ret) {
 274                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 275                                      mac_addr);
 276                hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
 277                         mac_str, ret);
 278        }
 279
 280        return ret;
 281}
 282
 283static int
 284hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
 285                          struct rte_ether_addr *mac_addr)
 286{
 287        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 288        int ret;
 289
 290        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
 291                                HNS3_MBX_MAC_VLAN_MC_REMOVE,
 292                                mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
 293                                NULL, 0);
 294        if (ret) {
 295                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 296                                      mac_addr);
 297                hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
 298                         mac_str, ret);
 299        }
 300
 301        return ret;
 302}
 303
 304static int
 305hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
 306                        bool en_uc_pmc, bool en_mc_pmc)
 307{
 308        struct hns3_mbx_vf_to_pf_cmd *req;
 309        struct hns3_cmd_desc desc;
 310        int ret;
 311
 312        req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
 313
 314        /*
 315         * The hns3 VF PMD depends on the hns3 PF kernel ethdev driver,
 316         * so there are some features for promiscuous/allmulticast mode in hns3
 317         * VF PMD as below:
 318         * 1. The promiscuous/allmulticast mode can be configured successfully
 319         *    only based on the trusted VF device. If based on the non trusted
 320         *    VF device, configuring promiscuous/allmulticast mode will fail.
 321         *    The hns3 VF device can be confiruged as trusted device by hns3 PF
 322         *    kernel ethdev driver on the host by the following command:
 323         *      "ip link set <eth num> vf <vf id> turst on"
 324         * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
 325         *    can receive the ingress and outgoing traffic. This includes
 326         *    all the ingress packets, all the packets sent from the PF and
 327         *    other VFs on the same physical port.
 328         * 3. Note: Because of the hardware constraints, By default vlan filter
 329         *    is enabled and couldn't be turned off based on VF device, so vlan
 330         *    filter is still effective even in promiscuous mode. If upper
 331         *    applications don't call rte_eth_dev_vlan_filter API function to
 332         *    set vlan based on VF device, hns3 VF PMD will can't receive
 333         *    the packets with vlan tag in promiscuoue mode.
 334         */
 335        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
 336        req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
 337        req->msg[1] = en_bc_pmc ? 1 : 0;
 338        req->msg[2] = en_uc_pmc ? 1 : 0;
 339        req->msg[3] = en_mc_pmc ? 1 : 0;
 340        req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
 341
 342        ret = hns3_cmd_send(hw, &desc, 1);
 343        if (ret)
 344                hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
 345
 346        return ret;
 347}
 348
 349static int
 350hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
 351{
 352        struct hns3_adapter *hns = dev->data->dev_private;
 353        struct hns3_hw *hw = &hns->hw;
 354        int ret;
 355
 356        ret = hns3vf_set_promisc_mode(hw, true, true, true);
 357        if (ret)
 358                hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
 359                        ret);
 360        return ret;
 361}
 362
 363static int
 364hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
 365{
 366        bool allmulti = dev->data->all_multicast ? true : false;
 367        struct hns3_adapter *hns = dev->data->dev_private;
 368        struct hns3_hw *hw = &hns->hw;
 369        int ret;
 370
 371        ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
 372        if (ret)
 373                hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
 374                        ret);
 375        return ret;
 376}
 377
 378static int
 379hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
 380{
 381        struct hns3_adapter *hns = dev->data->dev_private;
 382        struct hns3_hw *hw = &hns->hw;
 383        int ret;
 384
 385        if (dev->data->promiscuous)
 386                return 0;
 387
 388        ret = hns3vf_set_promisc_mode(hw, true, false, true);
 389        if (ret)
 390                hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
 391                        ret);
 392        return ret;
 393}
 394
 395static int
 396hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
 397{
 398        struct hns3_adapter *hns = dev->data->dev_private;
 399        struct hns3_hw *hw = &hns->hw;
 400        int ret;
 401
 402        if (dev->data->promiscuous)
 403                return 0;
 404
 405        ret = hns3vf_set_promisc_mode(hw, true, false, false);
 406        if (ret)
 407                hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
 408                        ret);
 409        return ret;
 410}
 411
 412static int
 413hns3vf_restore_promisc(struct hns3_adapter *hns)
 414{
 415        struct hns3_hw *hw = &hns->hw;
 416        bool allmulti = hw->data->all_multicast ? true : false;
 417
 418        if (hw->data->promiscuous)
 419                return hns3vf_set_promisc_mode(hw, true, true, true);
 420
 421        return hns3vf_set_promisc_mode(hw, true, false, allmulti);
 422}
 423
 424static int
 425hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
 426                             bool mmap, enum hns3_ring_type queue_type,
 427                             uint16_t queue_id)
 428{
 429        struct hns3_vf_bind_vector_msg bind_msg;
 430        const char *op_str;
 431        uint16_t code;
 432        int ret;
 433
 434        memset(&bind_msg, 0, sizeof(bind_msg));
 435        code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
 436                HNS3_MBX_UNMAP_RING_TO_VECTOR;
 437        bind_msg.vector_id = (uint8_t)vector_id;
 438
 439        if (queue_type == HNS3_RING_TYPE_RX)
 440                bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
 441        else
 442                bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
 443
 444        bind_msg.param[0].ring_type = queue_type;
 445        bind_msg.ring_num = 1;
 446        bind_msg.param[0].tqp_index = queue_id;
 447        op_str = mmap ? "Map" : "Unmap";
 448        ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
 449                                sizeof(bind_msg), false, NULL, 0);
 450        if (ret)
 451                hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
 452                         op_str, queue_id, bind_msg.vector_id, ret);
 453
 454        return ret;
 455}
 456
 457static int
 458hns3vf_dev_configure(struct rte_eth_dev *dev)
 459{
 460        struct hns3_adapter *hns = dev->data->dev_private;
 461        struct hns3_hw *hw = &hns->hw;
 462        struct rte_eth_conf *conf = &dev->data->dev_conf;
 463        enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
 464        uint16_t nb_rx_q = dev->data->nb_rx_queues;
 465        uint16_t nb_tx_q = dev->data->nb_tx_queues;
 466        struct rte_eth_rss_conf rss_conf;
 467        bool gro_en;
 468        int ret;
 469
 470        hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
 471
 472        /*
 473         * Some versions of hardware network engine does not support
 474         * individually enable/disable/reset the Tx or Rx queue. These devices
 475         * must enable/disable/reset Tx and Rx queues at the same time. When the
 476         * numbers of Tx queues allocated by upper applications are not equal to
 477         * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
 478         * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
 479         * work as usual. But these fake queues are imperceptible, and can not
 480         * be used by upper applications.
 481         */
 482        ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
 483        if (ret) {
 484                hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
 485                hw->cfg_max_queues = 0;
 486                return ret;
 487        }
 488
 489        hw->adapter_state = HNS3_NIC_CONFIGURING;
 490        if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
 491                hns3_err(hw, "setting link speed/duplex not supported");
 492                ret = -EINVAL;
 493                goto cfg_err;
 494        }
 495
 496        /* When RSS is not configured, redirect the packet queue 0 */
 497        if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
 498                conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 499                hw->rss_dis_flag = false;
 500                rss_conf = conf->rx_adv_conf.rss_conf;
 501                ret = hns3_dev_rss_hash_update(dev, &rss_conf);
 502                if (ret)
 503                        goto cfg_err;
 504        }
 505
 506        ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
 507        if (ret != 0)
 508                goto cfg_err;
 509
 510        ret = hns3vf_dev_configure_vlan(dev);
 511        if (ret)
 512                goto cfg_err;
 513
 514        /* config hardware GRO */
 515        gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
 516        ret = hns3_config_gro(hw, gro_en);
 517        if (ret)
 518                goto cfg_err;
 519
 520        hns3_init_rx_ptype_tble(dev);
 521
 522        hw->adapter_state = HNS3_NIC_CONFIGURED;
 523        return 0;
 524
 525cfg_err:
 526        hw->cfg_max_queues = 0;
 527        (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
 528        hw->adapter_state = HNS3_NIC_INITIALIZED;
 529
 530        return ret;
 531}
 532
 533static int
 534hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
 535{
 536        int ret;
 537
 538        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
 539                                sizeof(mtu), true, NULL, 0);
 540        if (ret)
 541                hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
 542
 543        return ret;
 544}
 545
 546static int
 547hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 548{
 549        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 550        uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
 551        int ret;
 552
 553        /*
 554         * The hns3 PF/VF devices on the same port share the hardware MTU
 555         * configuration. Currently, we send mailbox to inform hns3 PF kernel
 556         * ethdev driver to finish hardware MTU configuration in hns3 VF PMD,
 557         * there is no need to stop the port for hns3 VF device, and the
 558         * MTU value issued by hns3 VF PMD must be less than or equal to
 559         * PF's MTU.
 560         */
 561        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
 562                hns3_err(hw, "Failed to set mtu during resetting");
 563                return -EIO;
 564        }
 565
 566        /*
 567         * when Rx of scattered packets is off, we have some possibility of
 568         * using vector Rx process function or simple Rx functions in hns3 PMD.
 569         * If the input MTU is increased and the maximum length of
 570         * received packets is greater than the length of a buffer for Rx
 571         * packet, the hardware network engine needs to use multiple BDs and
 572         * buffers to store these packets. This will cause problems when still
 573         * using vector Rx process function or simple Rx function to receiving
 574         * packets. So, when Rx of scattered packets is off and device is
 575         * started, it is not permitted to increase MTU so that the maximum
 576         * length of Rx packets is greater than Rx buffer length.
 577         */
 578        if (dev->data->dev_started && !dev->data->scattered_rx &&
 579            frame_size > hw->rx_buf_len) {
 580                hns3_err(hw, "failed to set mtu because current is "
 581                        "not scattered rx mode");
 582                return -EOPNOTSUPP;
 583        }
 584
 585        rte_spinlock_lock(&hw->lock);
 586        ret = hns3vf_config_mtu(hw, mtu);
 587        if (ret) {
 588                rte_spinlock_unlock(&hw->lock);
 589                return ret;
 590        }
 591        rte_spinlock_unlock(&hw->lock);
 592
 593        return 0;
 594}
 595
 596static void
 597hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
 598{
 599        hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
 600}
 601
 602static void
 603hns3vf_disable_irq0(struct hns3_hw *hw)
 604{
 605        hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
 606}
 607
 608static void
 609hns3vf_enable_irq0(struct hns3_hw *hw)
 610{
 611        hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
 612}
 613
 614static enum hns3vf_evt_cause
 615hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 616{
 617        struct hns3_hw *hw = &hns->hw;
 618        enum hns3vf_evt_cause ret;
 619        uint32_t cmdq_stat_reg;
 620        uint32_t rst_ing_reg;
 621        uint32_t val;
 622
 623        /* Fetch the events from their corresponding regs */
 624        cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
 625        if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
 626                rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
 627                hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
 628                hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
 629                __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
 630                val = hns3_read_dev(hw, HNS3_VF_RST_ING);
 631                hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
 632                val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
 633                if (clearval) {
 634                        hw->reset.stats.global_cnt++;
 635                        hns3_warn(hw, "Global reset detected, clear reset status");
 636                } else {
 637                        hns3_schedule_delayed_reset(hns);
 638                        hns3_warn(hw, "Global reset detected, don't clear reset status");
 639                }
 640
 641                ret = HNS3VF_VECTOR0_EVENT_RST;
 642                goto out;
 643        }
 644
 645        /* Check for vector0 mailbox(=CMDQ RX) event source */
 646        if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
 647                val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
 648                ret = HNS3VF_VECTOR0_EVENT_MBX;
 649                goto out;
 650        }
 651
 652        val = 0;
 653        ret = HNS3VF_VECTOR0_EVENT_OTHER;
 654out:
 655        if (clearval)
 656                *clearval = val;
 657        return ret;
 658}
 659
 660static void
 661hns3vf_interrupt_handler(void *param)
 662{
 663        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 664        struct hns3_adapter *hns = dev->data->dev_private;
 665        struct hns3_hw *hw = &hns->hw;
 666        enum hns3vf_evt_cause event_cause;
 667        uint32_t clearval;
 668
 669        /* Disable interrupt */
 670        hns3vf_disable_irq0(hw);
 671
 672        /* Read out interrupt causes */
 673        event_cause = hns3vf_check_event_cause(hns, &clearval);
 674        /* Clear interrupt causes */
 675        hns3vf_clear_event_cause(hw, clearval);
 676
 677        switch (event_cause) {
 678        case HNS3VF_VECTOR0_EVENT_RST:
 679                hns3_schedule_reset(hns);
 680                break;
 681        case HNS3VF_VECTOR0_EVENT_MBX:
 682                hns3_dev_handle_mbx_msg(hw);
 683                break;
 684        default:
 685                break;
 686        }
 687
 688        /* Enable interrupt */
 689        hns3vf_enable_irq0(hw);
 690}
 691
 692static void
 693hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
 694{
 695        hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
 696        hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
 697        hw->rss_key_size = HNS3_RSS_KEY_SIZE;
 698        hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
 699}
 700
 701static void
 702hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
 703{
 704        struct hns3_dev_specs_0_cmd *req0;
 705
 706        req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
 707
 708        hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
 709        hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
 710        hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
 711        hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
 712}
 713
 714static int
 715hns3vf_check_dev_specifications(struct hns3_hw *hw)
 716{
 717        if (hw->rss_ind_tbl_size == 0 ||
 718            hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
 719                hns3_warn(hw, "the size of hash lookup table configured (%u)"
 720                              " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
 721                              HNS3_RSS_IND_TBL_SIZE_MAX);
 722                return -EINVAL;
 723        }
 724
 725        return 0;
 726}
 727
 728static int
 729hns3vf_query_dev_specifications(struct hns3_hw *hw)
 730{
 731        struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
 732        int ret;
 733        int i;
 734
 735        for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
 736                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
 737                                          true);
 738                desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
 739        }
 740        hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
 741
 742        ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
 743        if (ret)
 744                return ret;
 745
 746        hns3vf_parse_dev_specifications(hw, desc);
 747
 748        return hns3vf_check_dev_specifications(hw);
 749}
 750
 751void
 752hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
 753{
 754        uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
 755                                   HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
 756        uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
 757        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 758
 759        if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
 760                __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
 761                                          __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
 762}
 763
 764static void
 765hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 766{
 767#define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS      500
 768
 769        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
 770        int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
 771        uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
 772        uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
 773        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 774
 775        __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
 776                         __ATOMIC_RELEASE);
 777
 778        (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
 779                                NULL, 0);
 780
 781        while (remain_ms > 0) {
 782                rte_delay_ms(HNS3_POLL_RESPONE_MS);
 783                if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
 784                        HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
 785                        break;
 786                remain_ms--;
 787        }
 788
 789        /*
 790         * When exit above loop, the pf_push_lsc_cap could be one of the three
 791         * state: unknown (means pf not ack), not_supported, supported.
 792         * Here config it as 'not_supported' when it's 'unknown' state.
 793         */
 794        __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
 795                                  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
 796
 797        if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
 798                HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
 799                hns3_info(hw, "detect PF support push link status change!");
 800        } else {
 801                /*
 802                 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
 803                 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
 804                 * the RTE_ETH_DEV_INTR_LSC capability.
 805                 */
 806                dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
 807        }
 808}
 809
 810static int
 811hns3vf_get_capability(struct hns3_hw *hw)
 812{
 813        struct rte_pci_device *pci_dev;
 814        struct rte_eth_dev *eth_dev;
 815        uint8_t revision;
 816        int ret;
 817
 818        eth_dev = &rte_eth_devices[hw->data->port_id];
 819        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 820
 821        /* Get PCI revision id */
 822        ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
 823                                  HNS3_PCI_REVISION_ID);
 824        if (ret != HNS3_PCI_REVISION_ID_LEN) {
 825                PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
 826                             ret);
 827                return -EIO;
 828        }
 829        hw->revision = revision;
 830
 831        if (revision < PCI_REVISION_ID_HIP09_A) {
 832                hns3vf_set_default_dev_specifications(hw);
 833                hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
 834                hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
 835                hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
 836                hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
 837                hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
 838                hw->rss_info.ipv6_sctp_offload_supported = false;
 839                hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
 840                return 0;
 841        }
 842
 843        ret = hns3vf_query_dev_specifications(hw);
 844        if (ret) {
 845                PMD_INIT_LOG(ERR,
 846                             "failed to query dev specifications, ret = %d",
 847                             ret);
 848                return ret;
 849        }
 850
 851        hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
 852        hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
 853        hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
 854        hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
 855        hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
 856        hw->rss_info.ipv6_sctp_offload_supported = true;
 857        hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
 858
 859        return 0;
 860}
 861
 862static int
 863hns3vf_check_tqp_info(struct hns3_hw *hw)
 864{
 865        if (hw->tqps_num == 0) {
 866                PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
 867                return -EINVAL;
 868        }
 869
 870        if (hw->rss_size_max == 0) {
 871                PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
 872                return -EINVAL;
 873        }
 874
 875        hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
 876
 877        return 0;
 878}
 879
 880static int
 881hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
 882{
 883        uint8_t resp_msg;
 884        int ret;
 885
 886        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
 887                                HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
 888                                true, &resp_msg, sizeof(resp_msg));
 889        if (ret) {
 890                if (ret == -ETIME) {
 891                        /*
 892                         * Getting current port based VLAN state from PF driver
 893                         * will not affect VF driver's basic function. Because
 894                         * the VF driver relies on hns3 PF kernel ether driver,
 895                         * to avoid introducing compatibility issues with older
 896                         * version of PF driver, no failure will be returned
 897                         * when the return value is ETIME. This return value has
 898                         * the following scenarios:
 899                         * 1) Firmware didn't return the results in time
 900                         * 2) the result return by firmware is timeout
 901                         * 3) the older version of kernel side PF driver does
 902                         *    not support this mailbox message.
 903                         * For scenarios 1 and 2, it is most likely that a
 904                         * hardware error has occurred, or a hardware reset has
 905                         * occurred. In this case, these errors will be caught
 906                         * by other functions.
 907                         */
 908                        PMD_INIT_LOG(WARNING,
 909                                "failed to get PVID state for timeout, maybe "
 910                                "kernel side PF driver doesn't support this "
 911                                "mailbox message, or firmware didn't respond.");
 912                        resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
 913                } else {
 914                        PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
 915                                " ret = %d", ret);
 916                        return ret;
 917                }
 918        }
 919        hw->port_base_vlan_cfg.state = resp_msg ?
 920                HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
 921        return 0;
 922}
 923
 924static int
 925hns3vf_get_queue_info(struct hns3_hw *hw)
 926{
 927#define HNS3VF_TQPS_RSS_INFO_LEN        6
 928        uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
 929        int ret;
 930
 931        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
 932                                resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
 933        if (ret) {
 934                PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
 935                return ret;
 936        }
 937
 938        memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
 939        memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
 940
 941        return hns3vf_check_tqp_info(hw);
 942}
 943
 944static int
 945hns3vf_get_queue_depth(struct hns3_hw *hw)
 946{
 947#define HNS3VF_TQPS_DEPTH_INFO_LEN      4
 948        uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
 949        int ret;
 950
 951        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
 952                                resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
 953        if (ret) {
 954                PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
 955                             ret);
 956                return ret;
 957        }
 958
 959        memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
 960        memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
 961
 962        return 0;
 963}
 964
 965static void
 966hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
 967{
 968        if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
 969                hns3_set_bit(hw->capability,
 970                                HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
 971}
 972
 973static int
 974hns3vf_get_num_tc(struct hns3_hw *hw)
 975{
 976        uint8_t num_tc = 0;
 977        uint32_t i;
 978
 979        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
 980                if (hw->hw_tc_map & BIT(i))
 981                        num_tc++;
 982        }
 983        return num_tc;
 984}
 985
 986static int
 987hns3vf_get_basic_info(struct hns3_hw *hw)
 988{
 989        uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
 990        struct hns3_basic_info *basic_info;
 991        int ret;
 992
 993        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
 994                                true, resp_msg, sizeof(resp_msg));
 995        if (ret) {
 996                hns3_err(hw, "failed to get basic info from PF, ret = %d.",
 997                                ret);
 998                return ret;
 999        }
1000
1001        basic_info = (struct hns3_basic_info *)resp_msg;
1002        hw->hw_tc_map = basic_info->hw_tc_map;
1003        hw->num_tc = hns3vf_get_num_tc(hw);
1004        hw->pf_vf_if_version = basic_info->pf_vf_if_version;
1005        hns3vf_update_caps(hw, basic_info->caps);
1006
1007        return 0;
1008}
1009
1010static int
1011hns3vf_get_host_mac_addr(struct hns3_hw *hw)
1012{
1013        uint8_t host_mac[RTE_ETHER_ADDR_LEN];
1014        int ret;
1015
1016        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
1017                                true, host_mac, RTE_ETHER_ADDR_LEN);
1018        if (ret) {
1019                hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
1020                return ret;
1021        }
1022
1023        memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1024
1025        return 0;
1026}
1027
1028static int
1029hns3vf_get_configuration(struct hns3_hw *hw)
1030{
1031        int ret;
1032
1033        hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1034        hw->rss_dis_flag = false;
1035
1036        /* Get device capability */
1037        ret = hns3vf_get_capability(hw);
1038        if (ret) {
1039                PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1040                return ret;
1041        }
1042
1043        hns3vf_get_push_lsc_cap(hw);
1044
1045        /* Get basic info from PF */
1046        ret = hns3vf_get_basic_info(hw);
1047        if (ret)
1048                return ret;
1049
1050        /* Get queue configuration from PF */
1051        ret = hns3vf_get_queue_info(hw);
1052        if (ret)
1053                return ret;
1054
1055        /* Get queue depth info from PF */
1056        ret = hns3vf_get_queue_depth(hw);
1057        if (ret)
1058                return ret;
1059
1060        /* Get user defined VF MAC addr from PF */
1061        ret = hns3vf_get_host_mac_addr(hw);
1062        if (ret)
1063                return ret;
1064
1065        return hns3vf_get_port_base_vlan_filter_state(hw);
1066}
1067
1068static int
1069hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
1070                            uint16_t nb_tx_q)
1071{
1072        struct hns3_hw *hw = &hns->hw;
1073
1074        return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1075}
1076
1077static void
1078hns3vf_request_link_info(struct hns3_hw *hw)
1079{
1080        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1081        bool send_req;
1082        int ret;
1083
1084        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1085                return;
1086
1087        send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1088                   vf->req_link_info_cnt > 0;
1089        if (!send_req)
1090                return;
1091
1092        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1093                                NULL, 0);
1094        if (ret) {
1095                hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1096                return;
1097        }
1098
1099        if (vf->req_link_info_cnt > 0)
1100                vf->req_link_info_cnt--;
1101}
1102
1103void
1104hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1105                          uint32_t link_speed, uint8_t link_duplex)
1106{
1107        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1108        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1109        struct hns3_mac *mac = &hw->mac;
1110        int ret;
1111
1112        /*
1113         * PF kernel driver may push link status when VF driver is in resetting,
1114         * driver will stop polling job in this case, after resetting done
1115         * driver will start polling job again.
1116         * When polling job started, driver will get initial link status by
1117         * sending request to PF kernel driver, then could update link status by
1118         * process PF kernel driver's link status mailbox message.
1119         */
1120        if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1121                return;
1122
1123        if (hw->adapter_state != HNS3_NIC_STARTED)
1124                return;
1125
1126        mac->link_status = link_status;
1127        mac->link_speed = link_speed;
1128        mac->link_duplex = link_duplex;
1129        ret = hns3vf_dev_link_update(dev, 0);
1130        if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1131                hns3_start_report_lse(dev);
1132}
1133
1134static int
1135hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1136{
1137#define HNS3VF_VLAN_MBX_MSG_LEN 5
1138        struct hns3_hw *hw = &hns->hw;
1139        uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1140        uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1141        uint8_t is_kill = on ? 0 : 1;
1142
1143        msg_data[0] = is_kill;
1144        memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1145        memcpy(&msg_data[3], &proto, sizeof(proto));
1146
1147        return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1148                                 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1149                                 0);
1150}
1151
1152static int
1153hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1154{
1155        struct hns3_adapter *hns = dev->data->dev_private;
1156        struct hns3_hw *hw = &hns->hw;
1157        int ret;
1158
1159        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1160                hns3_err(hw,
1161                         "vf set vlan id failed during resetting, vlan_id =%u",
1162                         vlan_id);
1163                return -EIO;
1164        }
1165        rte_spinlock_lock(&hw->lock);
1166        ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1167        rte_spinlock_unlock(&hw->lock);
1168        if (ret)
1169                hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1170                         vlan_id, ret);
1171
1172        return ret;
1173}
1174
1175static int
1176hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1177{
1178        uint8_t msg_data;
1179        int ret;
1180
1181        if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1182                return 0;
1183
1184        msg_data = enable ? 1 : 0;
1185        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1186                        HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1187                        sizeof(msg_data), true, NULL, 0);
1188        if (ret)
1189                hns3_err(hw, "%s vlan filter failed, ret = %d.",
1190                                enable ? "enable" : "disable", ret);
1191
1192        return ret;
1193}
1194
1195static int
1196hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1197{
1198        uint8_t msg_data;
1199        int ret;
1200
1201        msg_data = enable ? 1 : 0;
1202        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1203                                &msg_data, sizeof(msg_data), false, NULL, 0);
1204        if (ret)
1205                hns3_err(hw, "vf %s strip failed, ret = %d.",
1206                                enable ? "enable" : "disable", ret);
1207
1208        return ret;
1209}
1210
1211static int
1212hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1213{
1214        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1216        unsigned int tmp_mask;
1217        int ret = 0;
1218
1219        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1220                hns3_err(hw, "vf set vlan offload failed during resetting, "
1221                             "mask = 0x%x", mask);
1222                return -EIO;
1223        }
1224
1225        tmp_mask = (unsigned int)mask;
1226
1227        if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1228                rte_spinlock_lock(&hw->lock);
1229                /* Enable or disable VLAN filter */
1230                if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1231                        ret = hns3vf_en_vlan_filter(hw, true);
1232                else
1233                        ret = hns3vf_en_vlan_filter(hw, false);
1234                rte_spinlock_unlock(&hw->lock);
1235                if (ret)
1236                        return ret;
1237        }
1238
1239        /* Vlan stripping setting */
1240        if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1241                rte_spinlock_lock(&hw->lock);
1242                /* Enable or disable VLAN stripping */
1243                if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1244                        ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1245                else
1246                        ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1247                rte_spinlock_unlock(&hw->lock);
1248        }
1249
1250        return ret;
1251}
1252
1253static int
1254hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1255{
1256        struct rte_vlan_filter_conf *vfc;
1257        struct hns3_hw *hw = &hns->hw;
1258        uint16_t vlan_id;
1259        uint64_t vbit;
1260        uint64_t ids;
1261        int ret = 0;
1262        uint32_t i;
1263
1264        vfc = &hw->data->vlan_filter_conf;
1265        for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1266                if (vfc->ids[i] == 0)
1267                        continue;
1268                ids = vfc->ids[i];
1269                while (ids) {
1270                        /*
1271                         * 64 means the num bits of ids, one bit corresponds to
1272                         * one vlan id
1273                         */
1274                        vlan_id = 64 * i;
1275                        /* count trailing zeroes */
1276                        vbit = ~ids & (ids - 1);
1277                        /* clear least significant bit set */
1278                        ids ^= (ids ^ (ids - 1)) ^ vbit;
1279                        for (; vbit;) {
1280                                vbit >>= 1;
1281                                vlan_id++;
1282                        }
1283                        ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1284                        if (ret) {
1285                                hns3_err(hw,
1286                                         "VF handle vlan table failed, ret =%d, on = %d",
1287                                         ret, on);
1288                                return ret;
1289                        }
1290                }
1291        }
1292
1293        return ret;
1294}
1295
1296static int
1297hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1298{
1299        return hns3vf_handle_all_vlan_table(hns, 0);
1300}
1301
1302static int
1303hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1304{
1305        struct hns3_hw *hw = &hns->hw;
1306        struct rte_eth_conf *dev_conf;
1307        bool en;
1308        int ret;
1309
1310        dev_conf = &hw->data->dev_conf;
1311        en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1312                                                                   : false;
1313        ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1314        if (ret)
1315                hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1316                         ret);
1317        return ret;
1318}
1319
1320static int
1321hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1322{
1323        struct hns3_adapter *hns = dev->data->dev_private;
1324        struct rte_eth_dev_data *data = dev->data;
1325        struct hns3_hw *hw = &hns->hw;
1326        int ret;
1327
1328        if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1329            data->dev_conf.txmode.hw_vlan_reject_untagged ||
1330            data->dev_conf.txmode.hw_vlan_insert_pvid) {
1331                hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1332                              "or hw_vlan_insert_pvid is not support!");
1333        }
1334
1335        /* Apply vlan offload setting */
1336        ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1337                                        RTE_ETH_VLAN_FILTER_MASK);
1338        if (ret)
1339                hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1340
1341        return ret;
1342}
1343
1344static int
1345hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1346{
1347        uint8_t msg_data;
1348
1349        msg_data = alive ? 1 : 0;
1350        return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1351                                 sizeof(msg_data), false, NULL, 0);
1352}
1353
1354static void
1355hns3vf_keep_alive_handler(void *param)
1356{
1357        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1358        struct hns3_adapter *hns = eth_dev->data->dev_private;
1359        struct hns3_hw *hw = &hns->hw;
1360        int ret;
1361
1362        ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1363                                false, NULL, 0);
1364        if (ret)
1365                hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1366                         ret);
1367
1368        rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1369                          eth_dev);
1370}
1371
1372static void
1373hns3vf_service_handler(void *param)
1374{
1375        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1376        struct hns3_adapter *hns = eth_dev->data->dev_private;
1377        struct hns3_hw *hw = &hns->hw;
1378
1379        /*
1380         * The query link status and reset processing are executed in the
1381         * interrupt thread. When the IMP reset occurs, IMP will not respond,
1382         * and the query operation will timeout after 30ms. In the case of
1383         * multiple PF/VFs, each query failure timeout causes the IMP reset
1384         * interrupt to fail to respond within 100ms.
1385         * Before querying the link status, check whether there is a reset
1386         * pending, and if so, abandon the query.
1387         */
1388        if (!hns3vf_is_reset_pending(hns))
1389                hns3vf_request_link_info(hw);
1390        else
1391                hns3_warn(hw, "Cancel the query when reset is pending");
1392
1393        rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1394                          eth_dev);
1395}
1396
1397static void
1398hns3vf_start_poll_job(struct rte_eth_dev *dev)
1399{
1400#define HNS3_REQUEST_LINK_INFO_REMAINS_CNT      3
1401
1402        struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1403
1404        if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1405                vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1406
1407        __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1408
1409        hns3vf_service_handler(dev);
1410}
1411
1412static void
1413hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1414{
1415        struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1416
1417        rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1418
1419        __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1420}
1421
1422static int
1423hns3_query_vf_resource(struct hns3_hw *hw)
1424{
1425        struct hns3_vf_res_cmd *req;
1426        struct hns3_cmd_desc desc;
1427        uint16_t num_msi;
1428        int ret;
1429
1430        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1431        ret = hns3_cmd_send(hw, &desc, 1);
1432        if (ret) {
1433                hns3_err(hw, "query vf resource failed, ret = %d", ret);
1434                return ret;
1435        }
1436
1437        req = (struct hns3_vf_res_cmd *)desc.data;
1438        num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1439                                 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1440        if (num_msi < HNS3_MIN_VECTOR_NUM) {
1441                hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1442                         num_msi, HNS3_MIN_VECTOR_NUM);
1443                return -EINVAL;
1444        }
1445
1446        hw->num_msi = num_msi;
1447
1448        return 0;
1449}
1450
1451static int
1452hns3vf_init_hardware(struct hns3_adapter *hns)
1453{
1454        struct hns3_hw *hw = &hns->hw;
1455        uint16_t mtu = hw->data->mtu;
1456        int ret;
1457
1458        ret = hns3vf_set_promisc_mode(hw, true, false, false);
1459        if (ret)
1460                return ret;
1461
1462        ret = hns3vf_config_mtu(hw, mtu);
1463        if (ret)
1464                goto err_init_hardware;
1465
1466        ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1467        if (ret) {
1468                PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1469                goto err_init_hardware;
1470        }
1471
1472        ret = hns3_config_gro(hw, false);
1473        if (ret) {
1474                PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1475                goto err_init_hardware;
1476        }
1477
1478        /*
1479         * In the initialization clearing the all hardware mapping relationship
1480         * configurations between queues and interrupt vectors is needed, so
1481         * some error caused by the residual configurations, such as the
1482         * unexpected interrupt, can be avoid.
1483         */
1484        ret = hns3_init_ring_with_vector(hw);
1485        if (ret) {
1486                PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1487                goto err_init_hardware;
1488        }
1489
1490        return 0;
1491
1492err_init_hardware:
1493        (void)hns3vf_set_promisc_mode(hw, false, false, false);
1494        return ret;
1495}
1496
1497static int
1498hns3vf_clear_vport_list(struct hns3_hw *hw)
1499{
1500        return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1501                                 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1502                                 NULL, 0);
1503}
1504
1505static int
1506hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1507{
1508        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1509        struct hns3_adapter *hns = eth_dev->data->dev_private;
1510        struct hns3_hw *hw = &hns->hw;
1511        int ret;
1512
1513        PMD_INIT_FUNC_TRACE();
1514
1515        /* Get hardware io base address from pcie BAR2 IO space */
1516        hw->io_base = pci_dev->mem_resource[2].addr;
1517
1518        /* Firmware command queue initialize */
1519        ret = hns3_cmd_init_queue(hw);
1520        if (ret) {
1521                PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1522                goto err_cmd_init_queue;
1523        }
1524
1525        /* Firmware command initialize */
1526        ret = hns3_cmd_init(hw);
1527        if (ret) {
1528                PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1529                goto err_cmd_init;
1530        }
1531
1532        hns3_tx_push_init(eth_dev);
1533
1534        /* Get VF resource */
1535        ret = hns3_query_vf_resource(hw);
1536        if (ret)
1537                goto err_cmd_init;
1538
1539        rte_spinlock_init(&hw->mbx_resp.lock);
1540
1541        hns3vf_clear_event_cause(hw, 0);
1542
1543        ret = rte_intr_callback_register(pci_dev->intr_handle,
1544                                         hns3vf_interrupt_handler, eth_dev);
1545        if (ret) {
1546                PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1547                goto err_intr_callback_register;
1548        }
1549
1550        /* Enable interrupt */
1551        rte_intr_enable(pci_dev->intr_handle);
1552        hns3vf_enable_irq0(hw);
1553
1554        /* Get configuration from PF */
1555        ret = hns3vf_get_configuration(hw);
1556        if (ret) {
1557                PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1558                goto err_get_config;
1559        }
1560
1561        ret = hns3_tqp_stats_init(hw);
1562        if (ret)
1563                goto err_get_config;
1564
1565        /* Hardware statistics of imissed registers cleared. */
1566        ret = hns3_update_imissed_stats(hw, true);
1567        if (ret) {
1568                hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1569                goto err_set_tc_queue;
1570        }
1571
1572        ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
1573        if (ret) {
1574                PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1575                goto err_set_tc_queue;
1576        }
1577
1578        ret = hns3vf_clear_vport_list(hw);
1579        if (ret) {
1580                PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1581                goto err_set_tc_queue;
1582        }
1583
1584        ret = hns3vf_init_hardware(hns);
1585        if (ret)
1586                goto err_set_tc_queue;
1587
1588        hns3_rss_set_default_args(hw);
1589
1590        ret = hns3vf_set_alive(hw, true);
1591        if (ret) {
1592                PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1593                goto err_set_tc_queue;
1594        }
1595
1596        return 0;
1597
1598err_set_tc_queue:
1599        hns3_tqp_stats_uninit(hw);
1600
1601err_get_config:
1602        hns3vf_disable_irq0(hw);
1603        rte_intr_disable(pci_dev->intr_handle);
1604        hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1605                             eth_dev);
1606err_intr_callback_register:
1607err_cmd_init:
1608        hns3_cmd_uninit(hw);
1609        hns3_cmd_destroy_queue(hw);
1610err_cmd_init_queue:
1611        hw->io_base = NULL;
1612
1613        return ret;
1614}
1615
1616static void
1617hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1618{
1619        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1620        struct hns3_adapter *hns = eth_dev->data->dev_private;
1621        struct hns3_hw *hw = &hns->hw;
1622
1623        PMD_INIT_FUNC_TRACE();
1624
1625        hns3_rss_uninit(hns);
1626        (void)hns3_config_gro(hw, false);
1627        (void)hns3vf_set_alive(hw, false);
1628        (void)hns3vf_set_promisc_mode(hw, false, false, false);
1629        hns3_flow_uninit(eth_dev);
1630        hns3_tqp_stats_uninit(hw);
1631        hns3vf_disable_irq0(hw);
1632        rte_intr_disable(pci_dev->intr_handle);
1633        hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1634                             eth_dev);
1635        hns3_cmd_uninit(hw);
1636        hns3_cmd_destroy_queue(hw);
1637        hw->io_base = NULL;
1638}
1639
1640static int
1641hns3vf_do_stop(struct hns3_adapter *hns)
1642{
1643        struct hns3_hw *hw = &hns->hw;
1644        int ret;
1645
1646        hw->mac.link_status = RTE_ETH_LINK_DOWN;
1647
1648        /*
1649         * The "hns3vf_do_stop" function will also be called by .stop_service to
1650         * prepare reset. At the time of global or IMP reset, the command cannot
1651         * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1652         * accessed during the reset process. So the mbuf can not be released
1653         * during reset and is required to be released after the reset is
1654         * completed.
1655         */
1656        if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
1657                hns3_dev_release_mbufs(hns);
1658
1659        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1660                hns3_configure_all_mac_addr(hns, true);
1661                ret = hns3_reset_all_tqps(hns);
1662                if (ret) {
1663                        hns3_err(hw, "failed to reset all queues ret = %d",
1664                                 ret);
1665                        return ret;
1666                }
1667        }
1668        return 0;
1669}
1670
1671static int
1672hns3vf_dev_stop(struct rte_eth_dev *dev)
1673{
1674        struct hns3_adapter *hns = dev->data->dev_private;
1675        struct hns3_hw *hw = &hns->hw;
1676
1677        PMD_INIT_FUNC_TRACE();
1678        dev->data->dev_started = 0;
1679
1680        hw->adapter_state = HNS3_NIC_STOPPING;
1681        hns3_set_rxtx_function(dev);
1682        rte_wmb();
1683        /* Disable datapath on secondary process. */
1684        hns3_mp_req_stop_rxtx(dev);
1685        /* Prevent crashes when queues are still in use. */
1686        rte_delay_ms(hw->cfg_max_queues);
1687
1688        rte_spinlock_lock(&hw->lock);
1689        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1690                hns3_stop_tqps(hw);
1691                hns3vf_do_stop(hns);
1692                hns3_unmap_rx_interrupt(dev);
1693                hw->adapter_state = HNS3_NIC_CONFIGURED;
1694        }
1695        hns3_rx_scattered_reset(dev);
1696        hns3vf_stop_poll_job(dev);
1697        hns3_stop_report_lse(dev);
1698        rte_spinlock_unlock(&hw->lock);
1699
1700        return 0;
1701}
1702
1703static int
1704hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1705{
1706        struct hns3_adapter *hns = eth_dev->data->dev_private;
1707        struct hns3_hw *hw = &hns->hw;
1708        int ret = 0;
1709
1710        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1711                hns3_mp_uninit(eth_dev);
1712                return 0;
1713        }
1714
1715        if (hw->adapter_state == HNS3_NIC_STARTED)
1716                ret = hns3vf_dev_stop(eth_dev);
1717
1718        hw->adapter_state = HNS3_NIC_CLOSING;
1719        hns3_reset_abort(hns);
1720        hw->adapter_state = HNS3_NIC_CLOSED;
1721        rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1722        hns3_configure_all_mc_mac_addr(hns, true);
1723        hns3vf_remove_all_vlan_table(hns);
1724        hns3vf_uninit_vf(eth_dev);
1725        hns3_free_all_queues(eth_dev);
1726        rte_free(hw->reset.wait_data);
1727        hns3_mp_uninit(eth_dev);
1728        hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1729
1730        return ret;
1731}
1732
1733static int
1734hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1735                       __rte_unused int wait_to_complete)
1736{
1737        struct hns3_adapter *hns = eth_dev->data->dev_private;
1738        struct hns3_hw *hw = &hns->hw;
1739        struct hns3_mac *mac = &hw->mac;
1740        struct rte_eth_link new_link;
1741
1742        memset(&new_link, 0, sizeof(new_link));
1743        switch (mac->link_speed) {
1744        case RTE_ETH_SPEED_NUM_10M:
1745        case RTE_ETH_SPEED_NUM_100M:
1746        case RTE_ETH_SPEED_NUM_1G:
1747        case RTE_ETH_SPEED_NUM_10G:
1748        case RTE_ETH_SPEED_NUM_25G:
1749        case RTE_ETH_SPEED_NUM_40G:
1750        case RTE_ETH_SPEED_NUM_50G:
1751        case RTE_ETH_SPEED_NUM_100G:
1752        case RTE_ETH_SPEED_NUM_200G:
1753                if (mac->link_status)
1754                        new_link.link_speed = mac->link_speed;
1755                break;
1756        default:
1757                if (mac->link_status)
1758                        new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1759                break;
1760        }
1761
1762        if (!mac->link_status)
1763                new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1764
1765        new_link.link_duplex = mac->link_duplex;
1766        new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1767        new_link.link_autoneg =
1768            !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1769
1770        return rte_eth_linkstatus_set(eth_dev, &new_link);
1771}
1772
1773static int
1774hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1775{
1776        struct hns3_hw *hw = &hns->hw;
1777        uint16_t nb_rx_q = hw->data->nb_rx_queues;
1778        uint16_t nb_tx_q = hw->data->nb_tx_queues;
1779        int ret;
1780
1781        ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
1782        if (ret)
1783                return ret;
1784
1785        hns3_enable_rxd_adv_layout(hw);
1786
1787        ret = hns3_init_queues(hns, reset_queue);
1788        if (ret)
1789                hns3_err(hw, "failed to init queues, ret = %d.", ret);
1790
1791        return ret;
1792}
1793
1794static void
1795hns3vf_restore_filter(struct rte_eth_dev *dev)
1796{
1797        hns3_restore_rss_filter(dev);
1798}
1799
1800static int
1801hns3vf_dev_start(struct rte_eth_dev *dev)
1802{
1803        struct hns3_adapter *hns = dev->data->dev_private;
1804        struct hns3_hw *hw = &hns->hw;
1805        int ret;
1806
1807        PMD_INIT_FUNC_TRACE();
1808        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1809                return -EBUSY;
1810
1811        rte_spinlock_lock(&hw->lock);
1812        hw->adapter_state = HNS3_NIC_STARTING;
1813        ret = hns3vf_do_start(hns, true);
1814        if (ret) {
1815                hw->adapter_state = HNS3_NIC_CONFIGURED;
1816                rte_spinlock_unlock(&hw->lock);
1817                return ret;
1818        }
1819        ret = hns3_map_rx_interrupt(dev);
1820        if (ret)
1821                goto map_rx_inter_err;
1822
1823        /*
1824         * There are three register used to control the status of a TQP
1825         * (contains a pair of Tx queue and Rx queue) in the new version network
1826         * engine. One is used to control the enabling of Tx queue, the other is
1827         * used to control the enabling of Rx queue, and the last is the master
1828         * switch used to control the enabling of the tqp. The Tx register and
1829         * TQP register must be enabled at the same time to enable a Tx queue.
1830         * The same applies to the Rx queue. For the older network enginem, this
1831         * function only refresh the enabled flag, and it is used to update the
1832         * status of queue in the dpdk framework.
1833         */
1834        ret = hns3_start_all_txqs(dev);
1835        if (ret)
1836                goto map_rx_inter_err;
1837
1838        ret = hns3_start_all_rxqs(dev);
1839        if (ret)
1840                goto start_all_rxqs_fail;
1841
1842        hw->adapter_state = HNS3_NIC_STARTED;
1843        rte_spinlock_unlock(&hw->lock);
1844
1845        hns3_rx_scattered_calc(dev);
1846        hns3_set_rxtx_function(dev);
1847        hns3_mp_req_start_rxtx(dev);
1848
1849        hns3vf_restore_filter(dev);
1850
1851        /* Enable interrupt of all rx queues before enabling queues */
1852        hns3_dev_all_rx_queue_intr_enable(hw, true);
1853        hns3_start_tqps(hw);
1854
1855        if (dev->data->dev_conf.intr_conf.lsc != 0)
1856                hns3vf_dev_link_update(dev, 0);
1857        hns3vf_start_poll_job(dev);
1858
1859        return ret;
1860
1861start_all_rxqs_fail:
1862        hns3_stop_all_txqs(dev);
1863map_rx_inter_err:
1864        (void)hns3vf_do_stop(hns);
1865        hw->adapter_state = HNS3_NIC_CONFIGURED;
1866        rte_spinlock_unlock(&hw->lock);
1867
1868        return ret;
1869}
1870
1871static bool
1872is_vf_reset_done(struct hns3_hw *hw)
1873{
1874#define HNS3_FUN_RST_ING_BITS \
1875        (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1876         BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1877         BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1878         BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1879
1880        uint32_t val;
1881
1882        if (hw->reset.level == HNS3_VF_RESET) {
1883                val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1884                if (val & HNS3_VF_RST_ING_BIT)
1885                        return false;
1886        } else {
1887                val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1888                if (val & HNS3_FUN_RST_ING_BITS)
1889                        return false;
1890        }
1891        return true;
1892}
1893
1894bool
1895hns3vf_is_reset_pending(struct hns3_adapter *hns)
1896{
1897        struct hns3_hw *hw = &hns->hw;
1898        enum hns3_reset_level reset;
1899
1900        /*
1901         * According to the protocol of PCIe, FLR to a PF device resets the PF
1902         * state as well as the SR-IOV extended capability including VF Enable
1903         * which means that VFs no longer exist.
1904         *
1905         * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1906         * is in FLR stage, the register state of VF device is not reliable,
1907         * so register states detection can not be carried out. In this case,
1908         * we just ignore the register states and return false to indicate that
1909         * there are no other reset states that need to be processed by driver.
1910         */
1911        if (hw->reset.level == HNS3_VF_FULL_RESET)
1912                return false;
1913
1914        /* Check the registers to confirm whether there is reset pending */
1915        hns3vf_check_event_cause(hns, NULL);
1916        reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1917        if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
1918            hw->reset.level < reset) {
1919                hns3_warn(hw, "High level reset %d is pending", reset);
1920                return true;
1921        }
1922        return false;
1923}
1924
1925static int
1926hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1927{
1928        struct hns3_hw *hw = &hns->hw;
1929        struct hns3_wait_data *wait_data = hw->reset.wait_data;
1930        struct timeval tv;
1931
1932        if (wait_data->result == HNS3_WAIT_SUCCESS) {
1933                /*
1934                 * After vf reset is ready, the PF may not have completed
1935                 * the reset processing. The vf sending mbox to PF may fail
1936                 * during the pf reset, so it is better to add extra delay.
1937                 */
1938                if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1939                    hw->reset.level == HNS3_FLR_RESET)
1940                        return 0;
1941                /* Reset retry process, no need to add extra delay. */
1942                if (hw->reset.attempts)
1943                        return 0;
1944                if (wait_data->check_completion == NULL)
1945                        return 0;
1946
1947                wait_data->check_completion = NULL;
1948                wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1949                wait_data->count = 1;
1950                wait_data->result = HNS3_WAIT_REQUEST;
1951                rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1952                                  wait_data);
1953                hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1954                return -EAGAIN;
1955        } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1956                hns3_clock_gettime(&tv);
1957                hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1958                          tv.tv_sec, tv.tv_usec);
1959                return -ETIME;
1960        } else if (wait_data->result == HNS3_WAIT_REQUEST)
1961                return -EAGAIN;
1962
1963        wait_data->hns = hns;
1964        wait_data->check_completion = is_vf_reset_done;
1965        wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1966                                HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1967        wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1968        wait_data->count = HNS3VF_RESET_WAIT_CNT;
1969        wait_data->result = HNS3_WAIT_REQUEST;
1970        rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1971        return -EAGAIN;
1972}
1973
1974static int
1975hns3vf_prepare_reset(struct hns3_adapter *hns)
1976{
1977        struct hns3_hw *hw = &hns->hw;
1978        int ret;
1979
1980        if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1981                ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1982                                        0, true, NULL, 0);
1983                if (ret)
1984                        return ret;
1985        }
1986        __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1987
1988        return 0;
1989}
1990
1991static int
1992hns3vf_stop_service(struct hns3_adapter *hns)
1993{
1994        struct hns3_hw *hw = &hns->hw;
1995        struct rte_eth_dev *eth_dev;
1996
1997        eth_dev = &rte_eth_devices[hw->data->port_id];
1998        if (hw->adapter_state == HNS3_NIC_STARTED) {
1999                /*
2000                 * Make sure call update link status before hns3vf_stop_poll_job
2001                 * because update link status depend on polling job exist.
2002                 */
2003                hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
2004                                          hw->mac.link_duplex);
2005                hns3vf_stop_poll_job(eth_dev);
2006        }
2007        hw->mac.link_status = RTE_ETH_LINK_DOWN;
2008
2009        hns3_set_rxtx_function(eth_dev);
2010        rte_wmb();
2011        /* Disable datapath on secondary process. */
2012        hns3_mp_req_stop_rxtx(eth_dev);
2013        rte_delay_ms(hw->cfg_max_queues);
2014
2015        rte_spinlock_lock(&hw->lock);
2016        if (hw->adapter_state == HNS3_NIC_STARTED ||
2017            hw->adapter_state == HNS3_NIC_STOPPING) {
2018                hns3_enable_all_queues(hw, false);
2019                hns3vf_do_stop(hns);
2020                hw->reset.mbuf_deferred_free = true;
2021        } else
2022                hw->reset.mbuf_deferred_free = false;
2023
2024        /*
2025         * It is cumbersome for hardware to pick-and-choose entries for deletion
2026         * from table space. Hence, for function reset software intervention is
2027         * required to delete the entries.
2028         */
2029        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
2030                hns3_configure_all_mc_mac_addr(hns, true);
2031        rte_spinlock_unlock(&hw->lock);
2032
2033        return 0;
2034}
2035
2036static int
2037hns3vf_start_service(struct hns3_adapter *hns)
2038{
2039        struct hns3_hw *hw = &hns->hw;
2040        struct rte_eth_dev *eth_dev;
2041
2042        eth_dev = &rte_eth_devices[hw->data->port_id];
2043        hns3_set_rxtx_function(eth_dev);
2044        hns3_mp_req_start_rxtx(eth_dev);
2045        if (hw->adapter_state == HNS3_NIC_STARTED) {
2046                hns3vf_start_poll_job(eth_dev);
2047
2048                /* Enable interrupt of all rx queues before enabling queues */
2049                hns3_dev_all_rx_queue_intr_enable(hw, true);
2050                /*
2051                 * Enable state of each rxq and txq will be recovered after
2052                 * reset, so we need to restore them before enable all tqps;
2053                 */
2054                hns3_restore_tqp_enable_state(hw);
2055                /*
2056                 * When finished the initialization, enable queues to receive
2057                 * and transmit packets.
2058                 */
2059                hns3_enable_all_queues(hw, true);
2060        }
2061
2062        return 0;
2063}
2064
2065static int
2066hns3vf_check_default_mac_change(struct hns3_hw *hw)
2067{
2068        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2069        struct rte_ether_addr *hw_mac;
2070        int ret;
2071
2072        /*
2073         * The hns3 PF ethdev driver in kernel support setting VF MAC address
2074         * on the host by "ip link set ..." command. If the hns3 PF kernel
2075         * ethdev driver sets the MAC address for VF device after the
2076         * initialization of the related VF device, the PF driver will notify
2077         * VF driver to reset VF device to make the new MAC address effective
2078         * immediately. The hns3 VF PMD should check whether the MAC
2079         * address has been changed by the PF kernel ethdev driver, if changed
2080         * VF driver should configure hardware using the new MAC address in the
2081         * recovering hardware configuration stage of the reset process.
2082         */
2083        ret = hns3vf_get_host_mac_addr(hw);
2084        if (ret)
2085                return ret;
2086
2087        hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2088        ret = rte_is_zero_ether_addr(hw_mac);
2089        if (ret) {
2090                rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2091        } else {
2092                ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2093                if (!ret) {
2094                        rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2095                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2096                                              &hw->data->mac_addrs[0]);
2097                        hns3_warn(hw, "Default MAC address has been changed to:"
2098                                  " %s by the host PF kernel ethdev driver",
2099                                  mac_str);
2100                }
2101        }
2102
2103        return 0;
2104}
2105
2106static int
2107hns3vf_restore_conf(struct hns3_adapter *hns)
2108{
2109        struct hns3_hw *hw = &hns->hw;
2110        int ret;
2111
2112        ret = hns3vf_check_default_mac_change(hw);
2113        if (ret)
2114                return ret;
2115
2116        ret = hns3_configure_all_mac_addr(hns, false);
2117        if (ret)
2118                return ret;
2119
2120        ret = hns3_configure_all_mc_mac_addr(hns, false);
2121        if (ret)
2122                goto err_mc_mac;
2123
2124        ret = hns3vf_restore_promisc(hns);
2125        if (ret)
2126                goto err_vlan_table;
2127
2128        ret = hns3vf_restore_vlan_conf(hns);
2129        if (ret)
2130                goto err_vlan_table;
2131
2132        ret = hns3vf_get_port_base_vlan_filter_state(hw);
2133        if (ret)
2134                goto err_vlan_table;
2135
2136        ret = hns3_restore_rx_interrupt(hw);
2137        if (ret)
2138                goto err_vlan_table;
2139
2140        ret = hns3_restore_gro_conf(hw);
2141        if (ret)
2142                goto err_vlan_table;
2143
2144        if (hw->adapter_state == HNS3_NIC_STARTED) {
2145                ret = hns3vf_do_start(hns, false);
2146                if (ret)
2147                        goto err_vlan_table;
2148                hns3_info(hw, "hns3vf dev restart successful!");
2149        } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2150                hw->adapter_state = HNS3_NIC_CONFIGURED;
2151
2152        ret = hns3vf_set_alive(hw, true);
2153        if (ret) {
2154                hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2155                goto err_vlan_table;
2156        }
2157
2158        return 0;
2159
2160err_vlan_table:
2161        hns3_configure_all_mc_mac_addr(hns, true);
2162err_mc_mac:
2163        hns3_configure_all_mac_addr(hns, true);
2164        return ret;
2165}
2166
2167static enum hns3_reset_level
2168hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2169{
2170        enum hns3_reset_level reset_level;
2171
2172        /* return the highest priority reset level amongst all */
2173        if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2174                reset_level = HNS3_VF_RESET;
2175        else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2176                reset_level = HNS3_VF_FULL_RESET;
2177        else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2178                reset_level = HNS3_VF_PF_FUNC_RESET;
2179        else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2180                reset_level = HNS3_VF_FUNC_RESET;
2181        else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2182                reset_level = HNS3_FLR_RESET;
2183        else
2184                reset_level = HNS3_NONE_RESET;
2185
2186        if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2187                return HNS3_NONE_RESET;
2188
2189        return reset_level;
2190}
2191
2192static void
2193hns3vf_reset_service(void *param)
2194{
2195        struct hns3_adapter *hns = (struct hns3_adapter *)param;
2196        struct hns3_hw *hw = &hns->hw;
2197        enum hns3_reset_level reset_level;
2198        struct timeval tv_delta;
2199        struct timeval tv_start;
2200        struct timeval tv;
2201        uint64_t msec;
2202
2203        /*
2204         * The interrupt is not triggered within the delay time.
2205         * The interrupt may have been lost. It is necessary to handle
2206         * the interrupt to recover from the error.
2207         */
2208        if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2209                            SCHEDULE_DEFERRED) {
2210                __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2211                                 __ATOMIC_RELAXED);
2212                hns3_err(hw, "Handling interrupts in delayed tasks");
2213                hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2214                reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2215                if (reset_level == HNS3_NONE_RESET) {
2216                        hns3_err(hw, "No reset level is set, try global reset");
2217                        hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2218                }
2219        }
2220        __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2221
2222        /*
2223         * Hardware reset has been notified, we now have to poll & check if
2224         * hardware has actually completed the reset sequence.
2225         */
2226        reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2227        if (reset_level != HNS3_NONE_RESET) {
2228                hns3_clock_gettime(&tv_start);
2229                hns3_reset_process(hns, reset_level);
2230                hns3_clock_gettime(&tv);
2231                timersub(&tv, &tv_start, &tv_delta);
2232                msec = hns3_clock_calctime_ms(&tv_delta);
2233                if (msec > HNS3_RESET_PROCESS_MS)
2234                        hns3_err(hw, "%d handle long time delta %" PRIu64
2235                                 " ms time=%ld.%.6ld",
2236                                 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2237        }
2238}
2239
2240static int
2241hns3vf_reinit_dev(struct hns3_adapter *hns)
2242{
2243        struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2244        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2245        struct hns3_hw *hw = &hns->hw;
2246        int ret;
2247
2248        if (hw->reset.level == HNS3_VF_FULL_RESET) {
2249                rte_intr_disable(pci_dev->intr_handle);
2250                ret = hns3vf_set_bus_master(pci_dev, true);
2251                if (ret < 0) {
2252                        hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2253                        return ret;
2254                }
2255        }
2256
2257        /* Firmware command initialize */
2258        ret = hns3_cmd_init(hw);
2259        if (ret) {
2260                hns3_err(hw, "Failed to init cmd: %d", ret);
2261                return ret;
2262        }
2263
2264        if (hw->reset.level == HNS3_VF_FULL_RESET) {
2265                /*
2266                 * UIO enables msix by writing the pcie configuration space
2267                 * vfio_pci enables msix in rte_intr_enable.
2268                 */
2269                if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2270                    pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2271                        if (hns3vf_enable_msix(pci_dev, true))
2272                                hns3_err(hw, "Failed to enable msix");
2273                }
2274
2275                rte_intr_enable(pci_dev->intr_handle);
2276        }
2277
2278        ret = hns3_reset_all_tqps(hns);
2279        if (ret) {
2280                hns3_err(hw, "Failed to reset all queues: %d", ret);
2281                return ret;
2282        }
2283
2284        ret = hns3vf_init_hardware(hns);
2285        if (ret) {
2286                hns3_err(hw, "Failed to init hardware: %d", ret);
2287                return ret;
2288        }
2289
2290        return 0;
2291}
2292
2293static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2294        .dev_configure      = hns3vf_dev_configure,
2295        .dev_start          = hns3vf_dev_start,
2296        .dev_stop           = hns3vf_dev_stop,
2297        .dev_close          = hns3vf_dev_close,
2298        .mtu_set            = hns3vf_dev_mtu_set,
2299        .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2300        .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2301        .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2302        .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2303        .stats_get          = hns3_stats_get,
2304        .stats_reset        = hns3_stats_reset,
2305        .xstats_get         = hns3_dev_xstats_get,
2306        .xstats_get_names   = hns3_dev_xstats_get_names,
2307        .xstats_reset       = hns3_dev_xstats_reset,
2308        .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2309        .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2310        .dev_infos_get      = hns3_dev_infos_get,
2311        .fw_version_get     = hns3_fw_version_get,
2312        .rx_queue_setup     = hns3_rx_queue_setup,
2313        .tx_queue_setup     = hns3_tx_queue_setup,
2314        .rx_queue_release   = hns3_dev_rx_queue_release,
2315        .tx_queue_release   = hns3_dev_tx_queue_release,
2316        .rx_queue_start     = hns3_dev_rx_queue_start,
2317        .rx_queue_stop      = hns3_dev_rx_queue_stop,
2318        .tx_queue_start     = hns3_dev_tx_queue_start,
2319        .tx_queue_stop      = hns3_dev_tx_queue_stop,
2320        .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2321        .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2322        .rxq_info_get       = hns3_rxq_info_get,
2323        .txq_info_get       = hns3_txq_info_get,
2324        .rx_burst_mode_get  = hns3_rx_burst_mode_get,
2325        .tx_burst_mode_get  = hns3_tx_burst_mode_get,
2326        .mac_addr_add       = hns3_add_mac_addr,
2327        .mac_addr_remove    = hns3_remove_mac_addr,
2328        .mac_addr_set       = hns3vf_set_default_mac_addr,
2329        .set_mc_addr_list   = hns3_set_mc_mac_addr_list,
2330        .link_update        = hns3vf_dev_link_update,
2331        .rss_hash_update    = hns3_dev_rss_hash_update,
2332        .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2333        .reta_update        = hns3_dev_rss_reta_update,
2334        .reta_query         = hns3_dev_rss_reta_query,
2335        .flow_ops_get       = hns3_dev_flow_ops_get,
2336        .vlan_filter_set    = hns3vf_vlan_filter_set,
2337        .vlan_offload_set   = hns3vf_vlan_offload_set,
2338        .get_reg            = hns3_get_regs,
2339        .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2340        .tx_done_cleanup    = hns3_tx_done_cleanup,
2341};
2342
2343static const struct hns3_reset_ops hns3vf_reset_ops = {
2344        .reset_service       = hns3vf_reset_service,
2345        .stop_service        = hns3vf_stop_service,
2346        .prepare_reset       = hns3vf_prepare_reset,
2347        .wait_hardware_ready = hns3vf_wait_hardware_ready,
2348        .reinit_dev          = hns3vf_reinit_dev,
2349        .restore_conf        = hns3vf_restore_conf,
2350        .start_service       = hns3vf_start_service,
2351};
2352
2353static void
2354hns3vf_init_hw_ops(struct hns3_hw *hw)
2355{
2356        hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2357        hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2358        hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2359        hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2360        hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2361}
2362
2363static int
2364hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2365{
2366        struct hns3_adapter *hns = eth_dev->data->dev_private;
2367        struct hns3_hw *hw = &hns->hw;
2368        int ret;
2369
2370        PMD_INIT_FUNC_TRACE();
2371
2372        hns3_flow_init(eth_dev);
2373
2374        hns3_set_rxtx_function(eth_dev);
2375        eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2376        eth_dev->rx_queue_count = hns3_rx_queue_count;
2377        ret = hns3_mp_init(eth_dev);
2378        if (ret)
2379                goto err_mp_init;
2380
2381        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2382                hns3_tx_push_init(eth_dev);
2383                return 0;
2384        }
2385
2386        hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2387        hns->is_vf = true;
2388        hw->data = eth_dev->data;
2389        hns3_parse_devargs(eth_dev);
2390
2391        ret = hns3_reset_init(hw);
2392        if (ret)
2393                goto err_init_reset;
2394        hw->reset.ops = &hns3vf_reset_ops;
2395
2396        hns3vf_init_hw_ops(hw);
2397        ret = hns3vf_init_vf(eth_dev);
2398        if (ret) {
2399                PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2400                goto err_init_vf;
2401        }
2402
2403        /* Allocate memory for storing MAC addresses */
2404        eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2405                                               sizeof(struct rte_ether_addr) *
2406                                               HNS3_VF_UC_MACADDR_NUM, 0);
2407        if (eth_dev->data->mac_addrs == NULL) {
2408                PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2409                             "to store MAC addresses",
2410                             sizeof(struct rte_ether_addr) *
2411                             HNS3_VF_UC_MACADDR_NUM);
2412                ret = -ENOMEM;
2413                goto err_rte_zmalloc;
2414        }
2415
2416        /*
2417         * The hns3 PF ethdev driver in kernel support setting VF MAC address
2418         * on the host by "ip link set ..." command. To avoid some incorrect
2419         * scenes, for example, hns3 VF PMD fails to receive and send
2420         * packets after user configure the MAC address by using the
2421         * "ip link set ..." command, hns3 VF PMD keep the same MAC
2422         * address strategy as the hns3 kernel ethdev driver in the
2423         * initialization. If user configure a MAC address by the ip command
2424         * for VF device, then hns3 VF PMD will start with it, otherwise
2425         * start with a random MAC address in the initialization.
2426         */
2427        if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr))
2428                rte_eth_random_addr(hw->mac.mac_addr);
2429        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2430                            &eth_dev->data->mac_addrs[0]);
2431
2432        hw->adapter_state = HNS3_NIC_INITIALIZED;
2433
2434        if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2435                            SCHEDULE_PENDING) {
2436                hns3_err(hw, "Reschedule reset service after dev_init");
2437                hns3_schedule_reset(hns);
2438        } else {
2439                /* IMP will wait ready flag before reset */
2440                hns3_notify_reset_ready(hw, false);
2441        }
2442        rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2443                          eth_dev);
2444        return 0;
2445
2446err_rte_zmalloc:
2447        hns3vf_uninit_vf(eth_dev);
2448
2449err_init_vf:
2450        rte_free(hw->reset.wait_data);
2451
2452err_init_reset:
2453        hns3_mp_uninit(eth_dev);
2454
2455err_mp_init:
2456        eth_dev->dev_ops = NULL;
2457        eth_dev->rx_pkt_burst = NULL;
2458        eth_dev->rx_descriptor_status = NULL;
2459        eth_dev->tx_pkt_burst = NULL;
2460        eth_dev->tx_pkt_prepare = NULL;
2461        eth_dev->tx_descriptor_status = NULL;
2462
2463        return ret;
2464}
2465
2466static int
2467hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2468{
2469        struct hns3_adapter *hns = eth_dev->data->dev_private;
2470        struct hns3_hw *hw = &hns->hw;
2471
2472        PMD_INIT_FUNC_TRACE();
2473
2474        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2475                __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
2476                hns3_mp_uninit(eth_dev);
2477                return 0;
2478        }
2479
2480        if (hw->adapter_state < HNS3_NIC_CLOSING)
2481                hns3vf_dev_close(eth_dev);
2482
2483        hw->adapter_state = HNS3_NIC_REMOVED;
2484        return 0;
2485}
2486
2487static int
2488eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2489                     struct rte_pci_device *pci_dev)
2490{
2491        return rte_eth_dev_pci_generic_probe(pci_dev,
2492                                             sizeof(struct hns3_adapter),
2493                                             hns3vf_dev_init);
2494}
2495
2496static int
2497eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2498{
2499        return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2500}
2501
2502static const struct rte_pci_id pci_id_hns3vf_map[] = {
2503        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2504        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2505        { .vendor_id = 0, }, /* sentinel */
2506};
2507
2508static struct rte_pci_driver rte_hns3vf_pmd = {
2509        .id_table = pci_id_hns3vf_map,
2510        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2511        .probe = eth_hns3vf_pci_probe,
2512        .remove = eth_hns3vf_pci_remove,
2513};
2514
2515RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2516RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2517RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2518RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2519                HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2520                HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2521                HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2522                HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");
2523