dpdk/drivers/net/hns3/hns3_ethdev_vf.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 HiSilicon Limited.
   3 */
   4
   5#include <linux/pci_regs.h>
   6#include <rte_alarm.h>
   7#include <ethdev_pci.h>
   8#include <rte_io.h>
   9#include <rte_pci.h>
  10#include <rte_vfio.h>
  11
  12#include "hns3_ethdev.h"
  13#include "hns3_logs.h"
  14#include "hns3_rxtx.h"
  15#include "hns3_regs.h"
  16#include "hns3_intr.h"
  17#include "hns3_dcb.h"
  18#include "hns3_mp.h"
  19
  20#define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
  21#define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
  22
  23#define HNS3VF_RESET_WAIT_MS    20
  24#define HNS3VF_RESET_WAIT_CNT   2000
  25
  26/* Reset related Registers */
  27#define HNS3_GLOBAL_RESET_BIT           0
  28#define HNS3_CORE_RESET_BIT             1
  29#define HNS3_IMP_RESET_BIT              2
  30#define HNS3_FUN_RST_ING_B              0
  31
  32enum hns3vf_evt_cause {
  33        HNS3VF_VECTOR0_EVENT_RST,
  34        HNS3VF_VECTOR0_EVENT_MBX,
  35        HNS3VF_VECTOR0_EVENT_OTHER,
  36};
  37
  38static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
  39                                                    uint64_t *levels);
  40static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
  41static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
  42
  43static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
  44                                  struct rte_ether_addr *mac_addr);
  45static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
  46                                     struct rte_ether_addr *mac_addr);
  47static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
  48                                   __rte_unused int wait_to_complete);
  49
  50/* set PCI bus mastering */
  51static int
  52hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
  53{
  54        uint16_t reg;
  55        int ret;
  56
  57        ret = rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
  58        if (ret < 0) {
  59                PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
  60                             PCI_COMMAND);
  61                return ret;
  62        }
  63
  64        if (op)
  65                /* set the master bit */
  66                reg |= PCI_COMMAND_MASTER;
  67        else
  68                reg &= ~(PCI_COMMAND_MASTER);
  69
  70        return rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
  71}
  72
  73/**
  74 * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
  75 * @cap: the capability
  76 *
  77 * Return the address of the given capability within the PCI capability list.
  78 */
  79static int
  80hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
  81{
  82#define MAX_PCIE_CAPABILITY 48
  83        uint16_t status;
  84        uint8_t pos;
  85        uint8_t id;
  86        int ttl;
  87        int ret;
  88
  89        ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
  90        if (ret < 0) {
  91                PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
  92                return 0;
  93        }
  94
  95        if (!(status & PCI_STATUS_CAP_LIST))
  96                return 0;
  97
  98        ttl = MAX_PCIE_CAPABILITY;
  99        ret = rte_pci_read_config(device, &pos, sizeof(pos),
 100                                  PCI_CAPABILITY_LIST);
 101        if (ret < 0) {
 102                PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 103                             PCI_CAPABILITY_LIST);
 104                return 0;
 105        }
 106
 107        while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
 108                ret = rte_pci_read_config(device, &id, sizeof(id),
 109                                          (pos + PCI_CAP_LIST_ID));
 110                if (ret < 0) {
 111                        PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 112                                     (pos + PCI_CAP_LIST_ID));
 113                        break;
 114                }
 115
 116                if (id == 0xFF)
 117                        break;
 118
 119                if (id == cap)
 120                        return (int)pos;
 121
 122                ret = rte_pci_read_config(device, &pos, sizeof(pos),
 123                                          (pos + PCI_CAP_LIST_NEXT));
 124                if (ret < 0) {
 125                        PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 126                                     (pos + PCI_CAP_LIST_NEXT));
 127                        break;
 128                }
 129        }
 130        return 0;
 131}
 132
 133static int
 134hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
 135{
 136        uint16_t control;
 137        int pos;
 138        int ret;
 139
 140        pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
 141        if (pos) {
 142                ret = rte_pci_read_config(device, &control, sizeof(control),
 143                                    (pos + PCI_MSIX_FLAGS));
 144                if (ret < 0) {
 145                        PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
 146                                     (pos + PCI_MSIX_FLAGS));
 147                        return -ENXIO;
 148                }
 149
 150                if (op)
 151                        control |= PCI_MSIX_FLAGS_ENABLE;
 152                else
 153                        control &= ~PCI_MSIX_FLAGS_ENABLE;
 154                ret = rte_pci_write_config(device, &control, sizeof(control),
 155                                          (pos + PCI_MSIX_FLAGS));
 156                if (ret < 0) {
 157                        PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
 158                                    (pos + PCI_MSIX_FLAGS));
 159                        return -ENXIO;
 160                }
 161
 162                return 0;
 163        }
 164
 165        return -ENXIO;
 166}
 167
 168static int
 169hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
 170{
 171        /* mac address was checked by upper level interface */
 172        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 173        int ret;
 174
 175        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
 176                                HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
 177                                RTE_ETHER_ADDR_LEN, false, NULL, 0);
 178        if (ret) {
 179                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 180                                      mac_addr);
 181                hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
 182                         mac_str, ret);
 183        }
 184        return ret;
 185}
 186
 187static int
 188hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
 189{
 190        /* mac address was checked by upper level interface */
 191        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 192        int ret;
 193
 194        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
 195                                HNS3_MBX_MAC_VLAN_UC_REMOVE,
 196                                mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
 197                                false, NULL, 0);
 198        if (ret) {
 199                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 200                                      mac_addr);
 201                hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
 202                         mac_str, ret);
 203        }
 204        return ret;
 205}
 206
 207static int
 208hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
 209{
 210        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 211        struct rte_ether_addr *addr;
 212        int ret;
 213        int i;
 214
 215        for (i = 0; i < hw->mc_addrs_num; i++) {
 216                addr = &hw->mc_addrs[i];
 217                /* Check if there are duplicate addresses */
 218                if (rte_is_same_ether_addr(addr, mac_addr)) {
 219                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 220                                              addr);
 221                        hns3_err(hw, "failed to add mc mac addr, same addrs"
 222                                 "(%s) is added by the set_mc_mac_addr_list "
 223                                 "API", mac_str);
 224                        return -EINVAL;
 225                }
 226        }
 227
 228        ret = hns3vf_add_mc_mac_addr(hw, mac_addr);
 229        if (ret) {
 230                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 231                                      mac_addr);
 232                hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
 233                         mac_str, ret);
 234        }
 235        return ret;
 236}
 237
 238static int
 239hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
 240                    __rte_unused uint32_t idx,
 241                    __rte_unused uint32_t pool)
 242{
 243        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 244        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 245        int ret;
 246
 247        rte_spinlock_lock(&hw->lock);
 248
 249        /*
 250         * In hns3 network engine adding UC and MC mac address with different
 251         * commands with firmware. We need to determine whether the input
 252         * address is a UC or a MC address to call different commands.
 253         * By the way, it is recommended calling the API function named
 254         * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
 255         * using the rte_eth_dev_mac_addr_add API function to set MC mac address
 256         * may affect the specifications of UC mac addresses.
 257         */
 258        if (rte_is_multicast_ether_addr(mac_addr))
 259                ret = hns3vf_add_mc_addr_common(hw, mac_addr);
 260        else
 261                ret = hns3vf_add_uc_mac_addr(hw, mac_addr);
 262
 263        rte_spinlock_unlock(&hw->lock);
 264        if (ret) {
 265                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 266                                      mac_addr);
 267                hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
 268                         ret);
 269        }
 270
 271        return ret;
 272}
 273
 274static void
 275hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
 276{
 277        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 278        /* index will be checked by upper level rte interface */
 279        struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
 280        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 281        int ret;
 282
 283        rte_spinlock_lock(&hw->lock);
 284
 285        if (rte_is_multicast_ether_addr(mac_addr))
 286                ret = hns3vf_remove_mc_mac_addr(hw, mac_addr);
 287        else
 288                ret = hns3vf_remove_uc_mac_addr(hw, mac_addr);
 289
 290        rte_spinlock_unlock(&hw->lock);
 291        if (ret) {
 292                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 293                                      mac_addr);
 294                hns3_err(hw, "failed to remove mac addr(%s), ret = %d",
 295                         mac_str, ret);
 296        }
 297}
 298
 299static int
 300hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
 301                            struct rte_ether_addr *mac_addr)
 302{
 303#define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
 304        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 305        struct rte_ether_addr *old_addr;
 306        uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
 307        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 308        int ret;
 309
 310        /*
 311         * It has been guaranteed that input parameter named mac_addr is valid
 312         * address in the rte layer of DPDK framework.
 313         */
 314        old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
 315        rte_spinlock_lock(&hw->lock);
 316        memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
 317        memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
 318               RTE_ETHER_ADDR_LEN);
 319
 320        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
 321                                HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
 322                                HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
 323        if (ret) {
 324                /*
 325                 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
 326                 * driver. When user has configured a MAC address for VF device
 327                 * by "ip link set ..." command based on the PF device, the hns3
 328                 * PF kernel ethdev driver does not allow VF driver to request
 329                 * reconfiguring a different default MAC address, and return
 330                 * -EPREM to VF driver through mailbox.
 331                 */
 332                if (ret == -EPERM) {
 333                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 334                                              old_addr);
 335                        hns3_warn(hw, "Has permanet mac addr(%s) for vf",
 336                                  mac_str);
 337                } else {
 338                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 339                                              mac_addr);
 340                        hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
 341                                 mac_str, ret);
 342                }
 343        }
 344
 345        rte_ether_addr_copy(mac_addr,
 346                            (struct rte_ether_addr *)hw->mac.mac_addr);
 347        rte_spinlock_unlock(&hw->lock);
 348
 349        return ret;
 350}
 351
 352static int
 353hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
 354{
 355        struct hns3_hw *hw = &hns->hw;
 356        struct rte_ether_addr *addr;
 357        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 358        int err = 0;
 359        int ret;
 360        int i;
 361
 362        for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
 363                addr = &hw->data->mac_addrs[i];
 364                if (rte_is_zero_ether_addr(addr))
 365                        continue;
 366                if (rte_is_multicast_ether_addr(addr))
 367                        ret = del ? hns3vf_remove_mc_mac_addr(hw, addr) :
 368                              hns3vf_add_mc_mac_addr(hw, addr);
 369                else
 370                        ret = del ? hns3vf_remove_uc_mac_addr(hw, addr) :
 371                              hns3vf_add_uc_mac_addr(hw, addr);
 372
 373                if (ret) {
 374                        err = ret;
 375                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 376                                              addr);
 377                        hns3_err(hw, "failed to %s mac addr(%s) index:%d "
 378                                 "ret = %d.", del ? "remove" : "restore",
 379                                 mac_str, i, ret);
 380                }
 381        }
 382        return err;
 383}
 384
 385static int
 386hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
 387                       struct rte_ether_addr *mac_addr)
 388{
 389        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 390        int ret;
 391
 392        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
 393                                HNS3_MBX_MAC_VLAN_MC_ADD,
 394                                mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
 395                                NULL, 0);
 396        if (ret) {
 397                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 398                                      mac_addr);
 399                hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
 400                         mac_str, ret);
 401        }
 402
 403        return ret;
 404}
 405
 406static int
 407hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
 408                          struct rte_ether_addr *mac_addr)
 409{
 410        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 411        int ret;
 412
 413        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
 414                                HNS3_MBX_MAC_VLAN_MC_REMOVE,
 415                                mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
 416                                NULL, 0);
 417        if (ret) {
 418                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 419                                      mac_addr);
 420                hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
 421                         mac_str, ret);
 422        }
 423
 424        return ret;
 425}
 426
 427static int
 428hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
 429                             struct rte_ether_addr *mc_addr_set,
 430                             uint32_t nb_mc_addr)
 431{
 432        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 433        struct rte_ether_addr *addr;
 434        uint32_t i;
 435        uint32_t j;
 436
 437        if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
 438                hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
 439                         "invalid. valid range: 0~%d",
 440                         nb_mc_addr, HNS3_MC_MACADDR_NUM);
 441                return -EINVAL;
 442        }
 443
 444        /* Check if input mac addresses are valid */
 445        for (i = 0; i < nb_mc_addr; i++) {
 446                addr = &mc_addr_set[i];
 447                if (!rte_is_multicast_ether_addr(addr)) {
 448                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 449                                              addr);
 450                        hns3_err(hw,
 451                                 "failed to set mc mac addr, addr(%s) invalid.",
 452                                 mac_str);
 453                        return -EINVAL;
 454                }
 455
 456                /* Check if there are duplicate addresses */
 457                for (j = i + 1; j < nb_mc_addr; j++) {
 458                        if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
 459                                hns3_ether_format_addr(mac_str,
 460                                                      RTE_ETHER_ADDR_FMT_SIZE,
 461                                                      addr);
 462                                hns3_err(hw, "failed to set mc mac addr, "
 463                                         "addrs invalid. two same addrs(%s).",
 464                                         mac_str);
 465                                return -EINVAL;
 466                        }
 467                }
 468
 469                /*
 470                 * Check if there are duplicate addresses between mac_addrs
 471                 * and mc_addr_set
 472                 */
 473                for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) {
 474                        if (rte_is_same_ether_addr(addr,
 475                                                   &hw->data->mac_addrs[j])) {
 476                                hns3_ether_format_addr(mac_str,
 477                                                      RTE_ETHER_ADDR_FMT_SIZE,
 478                                                      addr);
 479                                hns3_err(hw, "failed to set mc mac addr, "
 480                                         "addrs invalid. addrs(%s) has already "
 481                                         "configured in mac_addr add API",
 482                                         mac_str);
 483                                return -EINVAL;
 484                        }
 485                }
 486        }
 487
 488        return 0;
 489}
 490
 491static int
 492hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
 493                            struct rte_ether_addr *mc_addr_set,
 494                            uint32_t nb_mc_addr)
 495{
 496        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 497        struct rte_ether_addr *addr;
 498        int cur_addr_num;
 499        int set_addr_num;
 500        int num;
 501        int ret;
 502        int i;
 503
 504        ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
 505        if (ret)
 506                return ret;
 507
 508        rte_spinlock_lock(&hw->lock);
 509        cur_addr_num = hw->mc_addrs_num;
 510        for (i = 0; i < cur_addr_num; i++) {
 511                num = cur_addr_num - i - 1;
 512                addr = &hw->mc_addrs[num];
 513                ret = hns3vf_remove_mc_mac_addr(hw, addr);
 514                if (ret) {
 515                        rte_spinlock_unlock(&hw->lock);
 516                        return ret;
 517                }
 518
 519                hw->mc_addrs_num--;
 520        }
 521
 522        set_addr_num = (int)nb_mc_addr;
 523        for (i = 0; i < set_addr_num; i++) {
 524                addr = &mc_addr_set[i];
 525                ret = hns3vf_add_mc_mac_addr(hw, addr);
 526                if (ret) {
 527                        rte_spinlock_unlock(&hw->lock);
 528                        return ret;
 529                }
 530
 531                rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
 532                hw->mc_addrs_num++;
 533        }
 534        rte_spinlock_unlock(&hw->lock);
 535
 536        return 0;
 537}
 538
 539static int
 540hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
 541{
 542        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
 543        struct hns3_hw *hw = &hns->hw;
 544        struct rte_ether_addr *addr;
 545        int err = 0;
 546        int ret;
 547        int i;
 548
 549        for (i = 0; i < hw->mc_addrs_num; i++) {
 550                addr = &hw->mc_addrs[i];
 551                if (!rte_is_multicast_ether_addr(addr))
 552                        continue;
 553                if (del)
 554                        ret = hns3vf_remove_mc_mac_addr(hw, addr);
 555                else
 556                        ret = hns3vf_add_mc_mac_addr(hw, addr);
 557                if (ret) {
 558                        err = ret;
 559                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
 560                                              addr);
 561                        hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
 562                                 del ? "Remove" : "Restore", mac_str, ret);
 563                }
 564        }
 565        return err;
 566}
 567
 568static int
 569hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
 570                        bool en_uc_pmc, bool en_mc_pmc)
 571{
 572        struct hns3_mbx_vf_to_pf_cmd *req;
 573        struct hns3_cmd_desc desc;
 574        int ret;
 575
 576        req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
 577
 578        /*
 579         * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
 580         * so there are some features for promiscuous/allmulticast mode in hns3
 581         * VF PMD driver as below:
 582         * 1. The promiscuous/allmulticast mode can be configured successfully
 583         *    only based on the trusted VF device. If based on the non trusted
 584         *    VF device, configuring promiscuous/allmulticast mode will fail.
 585         *    The hns3 VF device can be confiruged as trusted device by hns3 PF
 586         *    kernel ethdev driver on the host by the following command:
 587         *      "ip link set <eth num> vf <vf id> turst on"
 588         * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
 589         *    driver can receive the ingress and outgoing traffic. In the words,
 590         *    all the ingress packets, all the packets sent from the PF and
 591         *    other VFs on the same physical port.
 592         * 3. Note: Because of the hardware constraints, By default vlan filter
 593         *    is enabled and couldn't be turned off based on VF device, so vlan
 594         *    filter is still effective even in promiscuous mode. If upper
 595         *    applications don't call rte_eth_dev_vlan_filter API function to
 596         *    set vlan based on VF device, hns3 VF PMD driver will can't receive
 597         *    the packets with vlan tag in promiscuoue mode.
 598         */
 599        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
 600        req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
 601        req->msg[1] = en_bc_pmc ? 1 : 0;
 602        req->msg[2] = en_uc_pmc ? 1 : 0;
 603        req->msg[3] = en_mc_pmc ? 1 : 0;
 604        req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
 605
 606        ret = hns3_cmd_send(hw, &desc, 1);
 607        if (ret)
 608                hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
 609
 610        return ret;
 611}
 612
 613static int
 614hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
 615{
 616        struct hns3_adapter *hns = dev->data->dev_private;
 617        struct hns3_hw *hw = &hns->hw;
 618        int ret;
 619
 620        ret = hns3vf_set_promisc_mode(hw, true, true, true);
 621        if (ret)
 622                hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
 623                        ret);
 624        return ret;
 625}
 626
 627static int
 628hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
 629{
 630        bool allmulti = dev->data->all_multicast ? true : false;
 631        struct hns3_adapter *hns = dev->data->dev_private;
 632        struct hns3_hw *hw = &hns->hw;
 633        int ret;
 634
 635        ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
 636        if (ret)
 637                hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
 638                        ret);
 639        return ret;
 640}
 641
 642static int
 643hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
 644{
 645        struct hns3_adapter *hns = dev->data->dev_private;
 646        struct hns3_hw *hw = &hns->hw;
 647        int ret;
 648
 649        if (dev->data->promiscuous)
 650                return 0;
 651
 652        ret = hns3vf_set_promisc_mode(hw, true, false, true);
 653        if (ret)
 654                hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
 655                        ret);
 656        return ret;
 657}
 658
 659static int
 660hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
 661{
 662        struct hns3_adapter *hns = dev->data->dev_private;
 663        struct hns3_hw *hw = &hns->hw;
 664        int ret;
 665
 666        if (dev->data->promiscuous)
 667                return 0;
 668
 669        ret = hns3vf_set_promisc_mode(hw, true, false, false);
 670        if (ret)
 671                hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
 672                        ret);
 673        return ret;
 674}
 675
 676static int
 677hns3vf_restore_promisc(struct hns3_adapter *hns)
 678{
 679        struct hns3_hw *hw = &hns->hw;
 680        bool allmulti = hw->data->all_multicast ? true : false;
 681
 682        if (hw->data->promiscuous)
 683                return hns3vf_set_promisc_mode(hw, true, true, true);
 684
 685        return hns3vf_set_promisc_mode(hw, true, false, allmulti);
 686}
 687
 688static int
 689hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
 690                             bool mmap, enum hns3_ring_type queue_type,
 691                             uint16_t queue_id)
 692{
 693        struct hns3_vf_bind_vector_msg bind_msg;
 694        const char *op_str;
 695        uint16_t code;
 696        int ret;
 697
 698        memset(&bind_msg, 0, sizeof(bind_msg));
 699        code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
 700                HNS3_MBX_UNMAP_RING_TO_VECTOR;
 701        bind_msg.vector_id = vector_id;
 702
 703        if (queue_type == HNS3_RING_TYPE_RX)
 704                bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
 705        else
 706                bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
 707
 708        bind_msg.param[0].ring_type = queue_type;
 709        bind_msg.ring_num = 1;
 710        bind_msg.param[0].tqp_index = queue_id;
 711        op_str = mmap ? "Map" : "Unmap";
 712        ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
 713                                sizeof(bind_msg), false, NULL, 0);
 714        if (ret)
 715                hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
 716                         op_str, queue_id, bind_msg.vector_id, ret);
 717
 718        return ret;
 719}
 720
 721static int
 722hns3vf_init_ring_with_vector(struct hns3_hw *hw)
 723{
 724        uint16_t vec;
 725        int ret;
 726        int i;
 727
 728        /*
 729         * In hns3 network engine, vector 0 is always the misc interrupt of this
 730         * function, vector 1~N can be used respectively for the queues of the
 731         * function. Tx and Rx queues with the same number share the interrupt
 732         * vector. In the initialization clearing the all hardware mapping
 733         * relationship configurations between queues and interrupt vectors is
 734         * needed, so some error caused by the residual configurations, such as
 735         * the unexpected Tx interrupt, can be avoid.
 736         */
 737        vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
 738        if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
 739                vec = vec - 1; /* the last interrupt is reserved */
 740        hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
 741        for (i = 0; i < hw->intr_tqps_num; i++) {
 742                /*
 743                 * Set gap limiter/rate limiter/quanity limiter algorithm
 744                 * configuration for interrupt coalesce of queue's interrupt.
 745                 */
 746                hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
 747                                       HNS3_TQP_INTR_GL_DEFAULT);
 748                hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
 749                                       HNS3_TQP_INTR_GL_DEFAULT);
 750                hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
 751                /*
 752                 * QL(quantity limiter) is not used currently, just set 0 to
 753                 * close it.
 754                 */
 755                hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
 756
 757                ret = hns3vf_bind_ring_with_vector(hw, vec, false,
 758                                                   HNS3_RING_TYPE_TX, i);
 759                if (ret) {
 760                        PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
 761                                          "vector: %u, ret=%d", i, vec, ret);
 762                        return ret;
 763                }
 764
 765                ret = hns3vf_bind_ring_with_vector(hw, vec, false,
 766                                                   HNS3_RING_TYPE_RX, i);
 767                if (ret) {
 768                        PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
 769                                          "vector: %u, ret=%d", i, vec, ret);
 770                        return ret;
 771                }
 772        }
 773
 774        return 0;
 775}
 776
 777static int
 778hns3vf_dev_configure(struct rte_eth_dev *dev)
 779{
 780        struct hns3_adapter *hns = dev->data->dev_private;
 781        struct hns3_hw *hw = &hns->hw;
 782        struct rte_eth_conf *conf = &dev->data->dev_conf;
 783        enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
 784        uint16_t nb_rx_q = dev->data->nb_rx_queues;
 785        uint16_t nb_tx_q = dev->data->nb_tx_queues;
 786        struct rte_eth_rss_conf rss_conf;
 787        uint32_t max_rx_pkt_len;
 788        uint16_t mtu;
 789        bool gro_en;
 790        int ret;
 791
 792        hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
 793
 794        /*
 795         * Some versions of hardware network engine does not support
 796         * individually enable/disable/reset the Tx or Rx queue. These devices
 797         * must enable/disable/reset Tx and Rx queues at the same time. When the
 798         * numbers of Tx queues allocated by upper applications are not equal to
 799         * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
 800         * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
 801         * work as usual. But these fake queues are imperceptible, and can not
 802         * be used by upper applications.
 803         */
 804        ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
 805        if (ret) {
 806                hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
 807                hw->cfg_max_queues = 0;
 808                return ret;
 809        }
 810
 811        hw->adapter_state = HNS3_NIC_CONFIGURING;
 812        if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
 813                hns3_err(hw, "setting link speed/duplex not supported");
 814                ret = -EINVAL;
 815                goto cfg_err;
 816        }
 817
 818        /* When RSS is not configured, redirect the packet queue 0 */
 819        if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
 820                conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 821                hw->rss_dis_flag = false;
 822                rss_conf = conf->rx_adv_conf.rss_conf;
 823                ret = hns3_dev_rss_hash_update(dev, &rss_conf);
 824                if (ret)
 825                        goto cfg_err;
 826        }
 827
 828        /*
 829         * If jumbo frames are enabled, MTU needs to be refreshed
 830         * according to the maximum RX packet length.
 831         */
 832        if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 833                max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
 834                if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
 835                    max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
 836                        hns3_err(hw, "maximum Rx packet length must be greater "
 837                                 "than %u and less than %u when jumbo frame enabled.",
 838                                 (uint16_t)HNS3_DEFAULT_FRAME_LEN,
 839                                 (uint16_t)HNS3_MAX_FRAME_LEN);
 840                        ret = -EINVAL;
 841                        goto cfg_err;
 842                }
 843
 844                mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
 845                ret = hns3vf_dev_mtu_set(dev, mtu);
 846                if (ret)
 847                        goto cfg_err;
 848                dev->data->mtu = mtu;
 849        }
 850
 851        ret = hns3vf_dev_configure_vlan(dev);
 852        if (ret)
 853                goto cfg_err;
 854
 855        /* config hardware GRO */
 856        gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
 857        ret = hns3_config_gro(hw, gro_en);
 858        if (ret)
 859                goto cfg_err;
 860
 861        hns3_init_rx_ptype_tble(dev);
 862
 863        hw->adapter_state = HNS3_NIC_CONFIGURED;
 864        return 0;
 865
 866cfg_err:
 867        hw->cfg_max_queues = 0;
 868        (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
 869        hw->adapter_state = HNS3_NIC_INITIALIZED;
 870
 871        return ret;
 872}
 873
 874static int
 875hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
 876{
 877        int ret;
 878
 879        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
 880                                sizeof(mtu), true, NULL, 0);
 881        if (ret)
 882                hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
 883
 884        return ret;
 885}
 886
 887static int
 888hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 889{
 890        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 891        uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
 892        int ret;
 893
 894        /*
 895         * The hns3 PF/VF devices on the same port share the hardware MTU
 896         * configuration. Currently, we send mailbox to inform hns3 PF kernel
 897         * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
 898         * driver, there is no need to stop the port for hns3 VF device, and the
 899         * MTU value issued by hns3 VF PMD driver must be less than or equal to
 900         * PF's MTU.
 901         */
 902        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
 903                hns3_err(hw, "Failed to set mtu during resetting");
 904                return -EIO;
 905        }
 906
 907        /*
 908         * when Rx of scattered packets is off, we have some possibility of
 909         * using vector Rx process function or simple Rx functions in hns3 PMD
 910         * driver. If the input MTU is increased and the maximum length of
 911         * received packets is greater than the length of a buffer for Rx
 912         * packet, the hardware network engine needs to use multiple BDs and
 913         * buffers to store these packets. This will cause problems when still
 914         * using vector Rx process function or simple Rx function to receiving
 915         * packets. So, when Rx of scattered packets is off and device is
 916         * started, it is not permitted to increase MTU so that the maximum
 917         * length of Rx packets is greater than Rx buffer length.
 918         */
 919        if (dev->data->dev_started && !dev->data->scattered_rx &&
 920            frame_size > hw->rx_buf_len) {
 921                hns3_err(hw, "failed to set mtu because current is "
 922                        "not scattered rx mode");
 923                return -EOPNOTSUPP;
 924        }
 925
 926        rte_spinlock_lock(&hw->lock);
 927        ret = hns3vf_config_mtu(hw, mtu);
 928        if (ret) {
 929                rte_spinlock_unlock(&hw->lock);
 930                return ret;
 931        }
 932        if (mtu > RTE_ETHER_MTU)
 933                dev->data->dev_conf.rxmode.offloads |=
 934                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
 935        else
 936                dev->data->dev_conf.rxmode.offloads &=
 937                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 938        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 939        rte_spinlock_unlock(&hw->lock);
 940
 941        return 0;
 942}
 943
 944static int
 945hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 946{
 947        struct hns3_adapter *hns = eth_dev->data->dev_private;
 948        struct hns3_hw *hw = &hns->hw;
 949        uint16_t q_num = hw->tqps_num;
 950
 951        /*
 952         * In interrupt mode, 'max_rx_queues' is set based on the number of
 953         * MSI-X interrupt resources of the hardware.
 954         */
 955        if (hw->data->dev_conf.intr_conf.rxq == 1)
 956                q_num = hw->intr_tqps_num;
 957
 958        info->max_rx_queues = q_num;
 959        info->max_tx_queues = hw->tqps_num;
 960        info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
 961        info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
 962        info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
 963        info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 964        info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 965
 966        info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
 967                                 DEV_RX_OFFLOAD_UDP_CKSUM |
 968                                 DEV_RX_OFFLOAD_TCP_CKSUM |
 969                                 DEV_RX_OFFLOAD_SCTP_CKSUM |
 970                                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 971                                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
 972                                 DEV_RX_OFFLOAD_SCATTER |
 973                                 DEV_RX_OFFLOAD_VLAN_STRIP |
 974                                 DEV_RX_OFFLOAD_VLAN_FILTER |
 975                                 DEV_RX_OFFLOAD_JUMBO_FRAME |
 976                                 DEV_RX_OFFLOAD_RSS_HASH |
 977                                 DEV_RX_OFFLOAD_TCP_LRO);
 978        info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 979                                 DEV_TX_OFFLOAD_IPV4_CKSUM |
 980                                 DEV_TX_OFFLOAD_TCP_CKSUM |
 981                                 DEV_TX_OFFLOAD_UDP_CKSUM |
 982                                 DEV_TX_OFFLOAD_SCTP_CKSUM |
 983                                 DEV_TX_OFFLOAD_MULTI_SEGS |
 984                                 DEV_TX_OFFLOAD_TCP_TSO |
 985                                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 986                                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
 987                                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
 988                                 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
 989                                 hns3_txvlan_cap_get(hw));
 990
 991        if (hns3_dev_outer_udp_cksum_supported(hw))
 992                info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
 993
 994        if (hns3_dev_indep_txrx_supported(hw))
 995                info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 996                                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 997
 998        info->rx_desc_lim = (struct rte_eth_desc_lim) {
 999                .nb_max = HNS3_MAX_RING_DESC,
1000                .nb_min = HNS3_MIN_RING_DESC,
1001                .nb_align = HNS3_ALIGN_RING_DESC,
1002        };
1003
1004        info->tx_desc_lim = (struct rte_eth_desc_lim) {
1005                .nb_max = HNS3_MAX_RING_DESC,
1006                .nb_min = HNS3_MIN_RING_DESC,
1007                .nb_align = HNS3_ALIGN_RING_DESC,
1008                .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
1009                .nb_mtu_seg_max = hw->max_non_tso_bd_num,
1010        };
1011
1012        info->default_rxconf = (struct rte_eth_rxconf) {
1013                .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
1014                /*
1015                 * If there are no available Rx buffer descriptors, incoming
1016                 * packets are always dropped by hardware based on hns3 network
1017                 * engine.
1018                 */
1019                .rx_drop_en = 1,
1020                .offloads = 0,
1021        };
1022        info->default_txconf = (struct rte_eth_txconf) {
1023                .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
1024                .offloads = 0,
1025        };
1026
1027        info->reta_size = hw->rss_ind_tbl_size;
1028        info->hash_key_size = HNS3_RSS_KEY_SIZE;
1029        info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
1030
1031        info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
1032        info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
1033        info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
1034        info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
1035        info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
1036        info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
1037
1038        return 0;
1039}
1040
1041static void
1042hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
1043{
1044        hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
1045}
1046
1047static void
1048hns3vf_disable_irq0(struct hns3_hw *hw)
1049{
1050        hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
1051}
1052
1053static void
1054hns3vf_enable_irq0(struct hns3_hw *hw)
1055{
1056        hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
1057}
1058
1059static enum hns3vf_evt_cause
1060hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
1061{
1062        struct hns3_hw *hw = &hns->hw;
1063        enum hns3vf_evt_cause ret;
1064        uint32_t cmdq_stat_reg;
1065        uint32_t rst_ing_reg;
1066        uint32_t val;
1067
1068        /* Fetch the events from their corresponding regs */
1069        cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
1070        if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
1071                rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1072                hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
1073                hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
1074                __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1075                val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1076                hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
1077                val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
1078                if (clearval) {
1079                        hw->reset.stats.global_cnt++;
1080                        hns3_warn(hw, "Global reset detected, clear reset status");
1081                } else {
1082                        hns3_schedule_delayed_reset(hns);
1083                        hns3_warn(hw, "Global reset detected, don't clear reset status");
1084                }
1085
1086                ret = HNS3VF_VECTOR0_EVENT_RST;
1087                goto out;
1088        }
1089
1090        /* Check for vector0 mailbox(=CMDQ RX) event source */
1091        if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
1092                val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
1093                ret = HNS3VF_VECTOR0_EVENT_MBX;
1094                goto out;
1095        }
1096
1097        val = 0;
1098        ret = HNS3VF_VECTOR0_EVENT_OTHER;
1099out:
1100        if (clearval)
1101                *clearval = val;
1102        return ret;
1103}
1104
1105static void
1106hns3vf_interrupt_handler(void *param)
1107{
1108        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1109        struct hns3_adapter *hns = dev->data->dev_private;
1110        struct hns3_hw *hw = &hns->hw;
1111        enum hns3vf_evt_cause event_cause;
1112        uint32_t clearval;
1113
1114        /* Disable interrupt */
1115        hns3vf_disable_irq0(hw);
1116
1117        /* Read out interrupt causes */
1118        event_cause = hns3vf_check_event_cause(hns, &clearval);
1119        /* Clear interrupt causes */
1120        hns3vf_clear_event_cause(hw, clearval);
1121
1122        switch (event_cause) {
1123        case HNS3VF_VECTOR0_EVENT_RST:
1124                hns3_schedule_reset(hns);
1125                break;
1126        case HNS3VF_VECTOR0_EVENT_MBX:
1127                hns3_dev_handle_mbx_msg(hw);
1128                break;
1129        default:
1130                break;
1131        }
1132
1133        /* Enable interrupt */
1134        hns3vf_enable_irq0(hw);
1135}
1136
1137static void
1138hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
1139{
1140        hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
1141        hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
1142        hw->rss_key_size = HNS3_RSS_KEY_SIZE;
1143        hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
1144}
1145
1146static void
1147hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
1148{
1149        struct hns3_dev_specs_0_cmd *req0;
1150
1151        req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
1152
1153        hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
1154        hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
1155        hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
1156        hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
1157}
1158
1159static int
1160hns3vf_check_dev_specifications(struct hns3_hw *hw)
1161{
1162        if (hw->rss_ind_tbl_size == 0 ||
1163            hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
1164                hns3_warn(hw, "the size of hash lookup table configured (%u)"
1165                              " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
1166                              HNS3_RSS_IND_TBL_SIZE_MAX);
1167                return -EINVAL;
1168        }
1169
1170        return 0;
1171}
1172
1173static int
1174hns3vf_query_dev_specifications(struct hns3_hw *hw)
1175{
1176        struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
1177        int ret;
1178        int i;
1179
1180        for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1181                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
1182                                          true);
1183                desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1184        }
1185        hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
1186
1187        ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
1188        if (ret)
1189                return ret;
1190
1191        hns3vf_parse_dev_specifications(hw, desc);
1192
1193        return hns3vf_check_dev_specifications(hw);
1194}
1195
1196void
1197hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
1198{
1199        uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
1200                                   HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
1201        uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
1202        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1203
1204        if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
1205                __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
1206                                          __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
1207}
1208
1209static void
1210hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
1211{
1212#define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS      500
1213
1214        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1215        int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
1216        uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
1217        uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
1218        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1219
1220        __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
1221                         __ATOMIC_RELEASE);
1222
1223        (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1224                                NULL, 0);
1225
1226        while (remain_ms > 0) {
1227                rte_delay_ms(HNS3_POLL_RESPONE_MS);
1228                if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
1229                        HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
1230                        break;
1231                remain_ms--;
1232        }
1233
1234        /*
1235         * When exit above loop, the pf_push_lsc_cap could be one of the three
1236         * state: unknown (means pf not ack), not_supported, supported.
1237         * Here config it as 'not_supported' when it's 'unknown' state.
1238         */
1239        __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
1240                                  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
1241
1242        if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
1243                HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
1244                hns3_info(hw, "detect PF support push link status change!");
1245        } else {
1246                /*
1247                 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
1248                 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
1249                 * the RTE_ETH_DEV_INTR_LSC capability.
1250                 */
1251                dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1252        }
1253}
1254
1255static int
1256hns3vf_get_capability(struct hns3_hw *hw)
1257{
1258        struct rte_pci_device *pci_dev;
1259        struct rte_eth_dev *eth_dev;
1260        uint8_t revision;
1261        int ret;
1262
1263        eth_dev = &rte_eth_devices[hw->data->port_id];
1264        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1265
1266        /* Get PCI revision id */
1267        ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
1268                                  HNS3_PCI_REVISION_ID);
1269        if (ret != HNS3_PCI_REVISION_ID_LEN) {
1270                PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
1271                             ret);
1272                return -EIO;
1273        }
1274        hw->revision = revision;
1275
1276        if (revision < PCI_REVISION_ID_HIP09_A) {
1277                hns3vf_set_default_dev_specifications(hw);
1278                hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
1279                hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
1280                hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
1281                hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
1282                hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
1283                hw->rss_info.ipv6_sctp_offload_supported = false;
1284                hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
1285                return 0;
1286        }
1287
1288        ret = hns3vf_query_dev_specifications(hw);
1289        if (ret) {
1290                PMD_INIT_LOG(ERR,
1291                             "failed to query dev specifications, ret = %d",
1292                             ret);
1293                return ret;
1294        }
1295
1296        hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
1297        hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
1298        hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
1299        hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
1300        hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
1301        hw->rss_info.ipv6_sctp_offload_supported = true;
1302        hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
1303
1304        return 0;
1305}
1306
1307static int
1308hns3vf_check_tqp_info(struct hns3_hw *hw)
1309{
1310        if (hw->tqps_num == 0) {
1311                PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
1312                return -EINVAL;
1313        }
1314
1315        if (hw->rss_size_max == 0) {
1316                PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
1317                return -EINVAL;
1318        }
1319
1320        hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
1321
1322        return 0;
1323}
1324
1325static int
1326hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
1327{
1328        uint8_t resp_msg;
1329        int ret;
1330
1331        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1332                                HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
1333                                true, &resp_msg, sizeof(resp_msg));
1334        if (ret) {
1335                if (ret == -ETIME) {
1336                        /*
1337                         * Getting current port based VLAN state from PF driver
1338                         * will not affect VF driver's basic function. Because
1339                         * the VF driver relies on hns3 PF kernel ether driver,
1340                         * to avoid introducing compatibility issues with older
1341                         * version of PF driver, no failure will be returned
1342                         * when the return value is ETIME. This return value has
1343                         * the following scenarios:
1344                         * 1) Firmware didn't return the results in time
1345                         * 2) the result return by firmware is timeout
1346                         * 3) the older version of kernel side PF driver does
1347                         *    not support this mailbox message.
1348                         * For scenarios 1 and 2, it is most likely that a
1349                         * hardware error has occurred, or a hardware reset has
1350                         * occurred. In this case, these errors will be caught
1351                         * by other functions.
1352                         */
1353                        PMD_INIT_LOG(WARNING,
1354                                "failed to get PVID state for timeout, maybe "
1355                                "kernel side PF driver doesn't support this "
1356                                "mailbox message, or firmware didn't respond.");
1357                        resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
1358                } else {
1359                        PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
1360                                " ret = %d", ret);
1361                        return ret;
1362                }
1363        }
1364        hw->port_base_vlan_cfg.state = resp_msg ?
1365                HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
1366        return 0;
1367}
1368
1369static int
1370hns3vf_get_queue_info(struct hns3_hw *hw)
1371{
1372#define HNS3VF_TQPS_RSS_INFO_LEN        6
1373        uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
1374        int ret;
1375
1376        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
1377                                resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
1378        if (ret) {
1379                PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
1380                return ret;
1381        }
1382
1383        memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
1384        memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
1385
1386        return hns3vf_check_tqp_info(hw);
1387}
1388
1389static int
1390hns3vf_get_queue_depth(struct hns3_hw *hw)
1391{
1392#define HNS3VF_TQPS_DEPTH_INFO_LEN      4
1393        uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
1394        int ret;
1395
1396        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
1397                                resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
1398        if (ret) {
1399                PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
1400                             ret);
1401                return ret;
1402        }
1403
1404        memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
1405        memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
1406
1407        return 0;
1408}
1409
1410static void
1411hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
1412{
1413        if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
1414                hns3_set_bit(hw->capability,
1415                                HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
1416}
1417
1418static int
1419hns3vf_get_num_tc(struct hns3_hw *hw)
1420{
1421        uint8_t num_tc = 0;
1422        uint32_t i;
1423
1424        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1425                if (hw->hw_tc_map & BIT(i))
1426                        num_tc++;
1427        }
1428        return num_tc;
1429}
1430
1431static int
1432hns3vf_get_basic_info(struct hns3_hw *hw)
1433{
1434        uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
1435        struct hns3_basic_info *basic_info;
1436        int ret;
1437
1438        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
1439                                true, resp_msg, sizeof(resp_msg));
1440        if (ret) {
1441                hns3_err(hw, "failed to get basic info from PF, ret = %d.",
1442                                ret);
1443                return ret;
1444        }
1445
1446        basic_info = (struct hns3_basic_info *)resp_msg;
1447        hw->hw_tc_map = basic_info->hw_tc_map;
1448        hw->num_tc = hns3vf_get_num_tc(hw);
1449        hw->pf_vf_if_version = basic_info->pf_vf_if_version;
1450        hns3vf_update_caps(hw, basic_info->caps);
1451
1452        return 0;
1453}
1454
1455static int
1456hns3vf_get_host_mac_addr(struct hns3_hw *hw)
1457{
1458        uint8_t host_mac[RTE_ETHER_ADDR_LEN];
1459        int ret;
1460
1461        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
1462                                true, host_mac, RTE_ETHER_ADDR_LEN);
1463        if (ret) {
1464                hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
1465                return ret;
1466        }
1467
1468        memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1469
1470        return 0;
1471}
1472
1473static int
1474hns3vf_get_configuration(struct hns3_hw *hw)
1475{
1476        int ret;
1477
1478        hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1479        hw->rss_dis_flag = false;
1480
1481        /* Get device capability */
1482        ret = hns3vf_get_capability(hw);
1483        if (ret) {
1484                PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1485                return ret;
1486        }
1487
1488        hns3vf_get_push_lsc_cap(hw);
1489
1490        /* Get basic info from PF */
1491        ret = hns3vf_get_basic_info(hw);
1492        if (ret)
1493                return ret;
1494
1495        /* Get queue configuration from PF */
1496        ret = hns3vf_get_queue_info(hw);
1497        if (ret)
1498                return ret;
1499
1500        /* Get queue depth info from PF */
1501        ret = hns3vf_get_queue_depth(hw);
1502        if (ret)
1503                return ret;
1504
1505        /* Get user defined VF MAC addr from PF */
1506        ret = hns3vf_get_host_mac_addr(hw);
1507        if (ret)
1508                return ret;
1509
1510        return hns3vf_get_port_base_vlan_filter_state(hw);
1511}
1512
1513static int
1514hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
1515                            uint16_t nb_tx_q)
1516{
1517        struct hns3_hw *hw = &hns->hw;
1518
1519        return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1520}
1521
1522static void
1523hns3vf_request_link_info(struct hns3_hw *hw)
1524{
1525        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1526        bool send_req;
1527        int ret;
1528
1529        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1530                return;
1531
1532        send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1533                   vf->req_link_info_cnt > 0;
1534        if (!send_req)
1535                return;
1536
1537        ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1538                                NULL, 0);
1539        if (ret) {
1540                hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1541                return;
1542        }
1543
1544        if (vf->req_link_info_cnt > 0)
1545                vf->req_link_info_cnt--;
1546}
1547
1548void
1549hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1550                          uint32_t link_speed, uint8_t link_duplex)
1551{
1552        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1553        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1554        struct hns3_mac *mac = &hw->mac;
1555        int ret;
1556
1557        /*
1558         * PF kernel driver may push link status when VF driver is in resetting,
1559         * driver will stop polling job in this case, after resetting done
1560         * driver will start polling job again.
1561         * When polling job started, driver will get initial link status by
1562         * sending request to PF kernel driver, then could update link status by
1563         * process PF kernel driver's link status mailbox message.
1564         */
1565        if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1566                return;
1567
1568        if (hw->adapter_state != HNS3_NIC_STARTED)
1569                return;
1570
1571        mac->link_status = link_status;
1572        mac->link_speed = link_speed;
1573        mac->link_duplex = link_duplex;
1574        ret = hns3vf_dev_link_update(dev, 0);
1575        if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1576                hns3_start_report_lse(dev);
1577}
1578
1579static int
1580hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1581{
1582#define HNS3VF_VLAN_MBX_MSG_LEN 5
1583        struct hns3_hw *hw = &hns->hw;
1584        uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1585        uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1586        uint8_t is_kill = on ? 0 : 1;
1587
1588        msg_data[0] = is_kill;
1589        memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1590        memcpy(&msg_data[3], &proto, sizeof(proto));
1591
1592        return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1593                                 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1594                                 0);
1595}
1596
1597static int
1598hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1599{
1600        struct hns3_adapter *hns = dev->data->dev_private;
1601        struct hns3_hw *hw = &hns->hw;
1602        int ret;
1603
1604        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1605                hns3_err(hw,
1606                         "vf set vlan id failed during resetting, vlan_id =%u",
1607                         vlan_id);
1608                return -EIO;
1609        }
1610        rte_spinlock_lock(&hw->lock);
1611        ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1612        rte_spinlock_unlock(&hw->lock);
1613        if (ret)
1614                hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1615                         vlan_id, ret);
1616
1617        return ret;
1618}
1619
1620static int
1621hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1622{
1623        uint8_t msg_data;
1624        int ret;
1625
1626        if (!hns3_dev_vf_vlan_flt_supported(hw))
1627                return 0;
1628
1629        msg_data = enable ? 1 : 0;
1630        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1631                        HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1632                        sizeof(msg_data), true, NULL, 0);
1633        if (ret)
1634                hns3_err(hw, "%s vlan filter failed, ret = %d.",
1635                                enable ? "enable" : "disable", ret);
1636
1637        return ret;
1638}
1639
1640static int
1641hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1642{
1643        uint8_t msg_data;
1644        int ret;
1645
1646        msg_data = enable ? 1 : 0;
1647        ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1648                                &msg_data, sizeof(msg_data), false, NULL, 0);
1649        if (ret)
1650                hns3_err(hw, "vf %s strip failed, ret = %d.",
1651                                enable ? "enable" : "disable", ret);
1652
1653        return ret;
1654}
1655
1656static int
1657hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1658{
1659        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1660        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1661        unsigned int tmp_mask;
1662        int ret = 0;
1663
1664        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1665                hns3_err(hw, "vf set vlan offload failed during resetting, "
1666                             "mask = 0x%x", mask);
1667                return -EIO;
1668        }
1669
1670        tmp_mask = (unsigned int)mask;
1671
1672        if (tmp_mask & ETH_VLAN_FILTER_MASK) {
1673                rte_spinlock_lock(&hw->lock);
1674                /* Enable or disable VLAN filter */
1675                if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1676                        ret = hns3vf_en_vlan_filter(hw, true);
1677                else
1678                        ret = hns3vf_en_vlan_filter(hw, false);
1679                rte_spinlock_unlock(&hw->lock);
1680                if (ret)
1681                        return ret;
1682        }
1683
1684        /* Vlan stripping setting */
1685        if (tmp_mask & ETH_VLAN_STRIP_MASK) {
1686                rte_spinlock_lock(&hw->lock);
1687                /* Enable or disable VLAN stripping */
1688                if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1689                        ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1690                else
1691                        ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1692                rte_spinlock_unlock(&hw->lock);
1693        }
1694
1695        return ret;
1696}
1697
1698static int
1699hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1700{
1701        struct rte_vlan_filter_conf *vfc;
1702        struct hns3_hw *hw = &hns->hw;
1703        uint16_t vlan_id;
1704        uint64_t vbit;
1705        uint64_t ids;
1706        int ret = 0;
1707        uint32_t i;
1708
1709        vfc = &hw->data->vlan_filter_conf;
1710        for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1711                if (vfc->ids[i] == 0)
1712                        continue;
1713                ids = vfc->ids[i];
1714                while (ids) {
1715                        /*
1716                         * 64 means the num bits of ids, one bit corresponds to
1717                         * one vlan id
1718                         */
1719                        vlan_id = 64 * i;
1720                        /* count trailing zeroes */
1721                        vbit = ~ids & (ids - 1);
1722                        /* clear least significant bit set */
1723                        ids ^= (ids ^ (ids - 1)) ^ vbit;
1724                        for (; vbit;) {
1725                                vbit >>= 1;
1726                                vlan_id++;
1727                        }
1728                        ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1729                        if (ret) {
1730                                hns3_err(hw,
1731                                         "VF handle vlan table failed, ret =%d, on = %d",
1732                                         ret, on);
1733                                return ret;
1734                        }
1735                }
1736        }
1737
1738        return ret;
1739}
1740
1741static int
1742hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1743{
1744        return hns3vf_handle_all_vlan_table(hns, 0);
1745}
1746
1747static int
1748hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1749{
1750        struct hns3_hw *hw = &hns->hw;
1751        struct rte_eth_conf *dev_conf;
1752        bool en;
1753        int ret;
1754
1755        dev_conf = &hw->data->dev_conf;
1756        en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
1757                                                                   : false;
1758        ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1759        if (ret)
1760                hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1761                         ret);
1762        return ret;
1763}
1764
1765static int
1766hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1767{
1768        struct hns3_adapter *hns = dev->data->dev_private;
1769        struct rte_eth_dev_data *data = dev->data;
1770        struct hns3_hw *hw = &hns->hw;
1771        int ret;
1772
1773        if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1774            data->dev_conf.txmode.hw_vlan_reject_untagged ||
1775            data->dev_conf.txmode.hw_vlan_insert_pvid) {
1776                hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1777                              "or hw_vlan_insert_pvid is not support!");
1778        }
1779
1780        /* Apply vlan offload setting */
1781        ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
1782                                        ETH_VLAN_FILTER_MASK);
1783        if (ret)
1784                hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1785
1786        return ret;
1787}
1788
1789static int
1790hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1791{
1792        uint8_t msg_data;
1793
1794        msg_data = alive ? 1 : 0;
1795        return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1796                                 sizeof(msg_data), false, NULL, 0);
1797}
1798
1799static void
1800hns3vf_keep_alive_handler(void *param)
1801{
1802        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1803        struct hns3_adapter *hns = eth_dev->data->dev_private;
1804        struct hns3_hw *hw = &hns->hw;
1805        int ret;
1806
1807        ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1808                                false, NULL, 0);
1809        if (ret)
1810                hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1811                         ret);
1812
1813        rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1814                          eth_dev);
1815}
1816
1817static void
1818hns3vf_service_handler(void *param)
1819{
1820        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1821        struct hns3_adapter *hns = eth_dev->data->dev_private;
1822        struct hns3_hw *hw = &hns->hw;
1823
1824        /*
1825         * The query link status and reset processing are executed in the
1826         * interrupt thread. When the IMP reset occurs, IMP will not respond,
1827         * and the query operation will timeout after 30ms. In the case of
1828         * multiple PF/VFs, each query failure timeout causes the IMP reset
1829         * interrupt to fail to respond within 100ms.
1830         * Before querying the link status, check whether there is a reset
1831         * pending, and if so, abandon the query.
1832         */
1833        if (!hns3vf_is_reset_pending(hns))
1834                hns3vf_request_link_info(hw);
1835        else
1836                hns3_warn(hw, "Cancel the query when reset is pending");
1837
1838        rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1839                          eth_dev);
1840}
1841
1842static void
1843hns3vf_start_poll_job(struct rte_eth_dev *dev)
1844{
1845#define HNS3_REQUEST_LINK_INFO_REMAINS_CNT      3
1846
1847        struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1848
1849        if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1850                vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1851
1852        __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1853
1854        hns3vf_service_handler(dev);
1855}
1856
1857static void
1858hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1859{
1860        struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1861
1862        rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1863
1864        __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1865}
1866
1867static int
1868hns3_query_vf_resource(struct hns3_hw *hw)
1869{
1870        struct hns3_vf_res_cmd *req;
1871        struct hns3_cmd_desc desc;
1872        uint16_t num_msi;
1873        int ret;
1874
1875        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1876        ret = hns3_cmd_send(hw, &desc, 1);
1877        if (ret) {
1878                hns3_err(hw, "query vf resource failed, ret = %d", ret);
1879                return ret;
1880        }
1881
1882        req = (struct hns3_vf_res_cmd *)desc.data;
1883        num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1884                                 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1885        if (num_msi < HNS3_MIN_VECTOR_NUM) {
1886                hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1887                         num_msi, HNS3_MIN_VECTOR_NUM);
1888                return -EINVAL;
1889        }
1890
1891        hw->num_msi = num_msi;
1892
1893        return 0;
1894}
1895
1896static int
1897hns3vf_init_hardware(struct hns3_adapter *hns)
1898{
1899        struct hns3_hw *hw = &hns->hw;
1900        uint16_t mtu = hw->data->mtu;
1901        int ret;
1902
1903        ret = hns3vf_set_promisc_mode(hw, true, false, false);
1904        if (ret)
1905                return ret;
1906
1907        ret = hns3vf_config_mtu(hw, mtu);
1908        if (ret)
1909                goto err_init_hardware;
1910
1911        ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1912        if (ret) {
1913                PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1914                goto err_init_hardware;
1915        }
1916
1917        ret = hns3_config_gro(hw, false);
1918        if (ret) {
1919                PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1920                goto err_init_hardware;
1921        }
1922
1923        /*
1924         * In the initialization clearing the all hardware mapping relationship
1925         * configurations between queues and interrupt vectors is needed, so
1926         * some error caused by the residual configurations, such as the
1927         * unexpected interrupt, can be avoid.
1928         */
1929        ret = hns3vf_init_ring_with_vector(hw);
1930        if (ret) {
1931                PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1932                goto err_init_hardware;
1933        }
1934
1935        return 0;
1936
1937err_init_hardware:
1938        (void)hns3vf_set_promisc_mode(hw, false, false, false);
1939        return ret;
1940}
1941
1942static int
1943hns3vf_clear_vport_list(struct hns3_hw *hw)
1944{
1945        return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1946                                 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1947                                 NULL, 0);
1948}
1949
1950static int
1951hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1952{
1953        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1954        struct hns3_adapter *hns = eth_dev->data->dev_private;
1955        struct hns3_hw *hw = &hns->hw;
1956        int ret;
1957
1958        PMD_INIT_FUNC_TRACE();
1959
1960        /* Get hardware io base address from pcie BAR2 IO space */
1961        hw->io_base = pci_dev->mem_resource[2].addr;
1962
1963        /* Firmware command queue initialize */
1964        ret = hns3_cmd_init_queue(hw);
1965        if (ret) {
1966                PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1967                goto err_cmd_init_queue;
1968        }
1969
1970        /* Firmware command initialize */
1971        ret = hns3_cmd_init(hw);
1972        if (ret) {
1973                PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1974                goto err_cmd_init;
1975        }
1976
1977        hns3_tx_push_init(eth_dev);
1978
1979        /* Get VF resource */
1980        ret = hns3_query_vf_resource(hw);
1981        if (ret)
1982                goto err_cmd_init;
1983
1984        rte_spinlock_init(&hw->mbx_resp.lock);
1985
1986        hns3vf_clear_event_cause(hw, 0);
1987
1988        ret = rte_intr_callback_register(&pci_dev->intr_handle,
1989                                         hns3vf_interrupt_handler, eth_dev);
1990        if (ret) {
1991                PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1992                goto err_intr_callback_register;
1993        }
1994
1995        /* Enable interrupt */
1996        rte_intr_enable(&pci_dev->intr_handle);
1997        hns3vf_enable_irq0(hw);
1998
1999        /* Get configuration from PF */
2000        ret = hns3vf_get_configuration(hw);
2001        if (ret) {
2002                PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
2003                goto err_get_config;
2004        }
2005
2006        ret = hns3_tqp_stats_init(hw);
2007        if (ret)
2008                goto err_get_config;
2009
2010        /* Hardware statistics of imissed registers cleared. */
2011        ret = hns3_update_imissed_stats(hw, true);
2012        if (ret) {
2013                hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
2014                goto err_set_tc_queue;
2015        }
2016
2017        ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
2018        if (ret) {
2019                PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
2020                goto err_set_tc_queue;
2021        }
2022
2023        ret = hns3vf_clear_vport_list(hw);
2024        if (ret) {
2025                PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
2026                goto err_set_tc_queue;
2027        }
2028
2029        ret = hns3vf_init_hardware(hns);
2030        if (ret)
2031                goto err_set_tc_queue;
2032
2033        hns3_rss_set_default_args(hw);
2034
2035        ret = hns3vf_set_alive(hw, true);
2036        if (ret) {
2037                PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
2038                goto err_set_tc_queue;
2039        }
2040
2041        return 0;
2042
2043err_set_tc_queue:
2044        hns3_tqp_stats_uninit(hw);
2045
2046err_get_config:
2047        hns3vf_disable_irq0(hw);
2048        rte_intr_disable(&pci_dev->intr_handle);
2049        hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
2050                             eth_dev);
2051err_intr_callback_register:
2052err_cmd_init:
2053        hns3_cmd_uninit(hw);
2054        hns3_cmd_destroy_queue(hw);
2055err_cmd_init_queue:
2056        hw->io_base = NULL;
2057
2058        return ret;
2059}
2060
2061static void
2062hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
2063{
2064        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2065        struct hns3_adapter *hns = eth_dev->data->dev_private;
2066        struct hns3_hw *hw = &hns->hw;
2067
2068        PMD_INIT_FUNC_TRACE();
2069
2070        hns3_rss_uninit(hns);
2071        (void)hns3_config_gro(hw, false);
2072        (void)hns3vf_set_alive(hw, false);
2073        (void)hns3vf_set_promisc_mode(hw, false, false, false);
2074        hns3_flow_uninit(eth_dev);
2075        hns3_tqp_stats_uninit(hw);
2076        hns3vf_disable_irq0(hw);
2077        rte_intr_disable(&pci_dev->intr_handle);
2078        hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
2079                             eth_dev);
2080        hns3_cmd_uninit(hw);
2081        hns3_cmd_destroy_queue(hw);
2082        hw->io_base = NULL;
2083}
2084
2085static int
2086hns3vf_do_stop(struct hns3_adapter *hns)
2087{
2088        struct hns3_hw *hw = &hns->hw;
2089        int ret;
2090
2091        hw->mac.link_status = ETH_LINK_DOWN;
2092
2093        /*
2094         * The "hns3vf_do_stop" function will also be called by .stop_service to
2095         * prepare reset. At the time of global or IMP reset, the command cannot
2096         * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
2097         * accessed during the reset process. So the mbuf can not be released
2098         * during reset and is required to be released after the reset is
2099         * completed.
2100         */
2101        if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
2102                hns3_dev_release_mbufs(hns);
2103
2104        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
2105                hns3vf_configure_mac_addr(hns, true);
2106                ret = hns3_reset_all_tqps(hns);
2107                if (ret) {
2108                        hns3_err(hw, "failed to reset all queues ret = %d",
2109                                 ret);
2110                        return ret;
2111                }
2112        }
2113        return 0;
2114}
2115
2116static void
2117hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
2118{
2119        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2120        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2121        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2122        uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
2123        uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
2124        uint16_t q_id;
2125
2126        if (dev->data->dev_conf.intr_conf.rxq == 0)
2127                return;
2128
2129        /* unmap the ring with vector */
2130        if (rte_intr_allow_others(intr_handle)) {
2131                vec = RTE_INTR_VEC_RXTX_OFFSET;
2132                base = RTE_INTR_VEC_RXTX_OFFSET;
2133        }
2134        if (rte_intr_dp_is_en(intr_handle)) {
2135                for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2136                        (void)hns3vf_bind_ring_with_vector(hw, vec, false,
2137                                                           HNS3_RING_TYPE_RX,
2138                                                           q_id);
2139                        if (vec < base + intr_handle->nb_efd - 1)
2140                                vec++;
2141                }
2142        }
2143        /* Clean datapath event and queue/vec mapping */
2144        rte_intr_efd_disable(intr_handle);
2145        if (intr_handle->intr_vec) {
2146                rte_free(intr_handle->intr_vec);
2147                intr_handle->intr_vec = NULL;
2148        }
2149}
2150
2151static int
2152hns3vf_dev_stop(struct rte_eth_dev *dev)
2153{
2154        struct hns3_adapter *hns = dev->data->dev_private;
2155        struct hns3_hw *hw = &hns->hw;
2156
2157        PMD_INIT_FUNC_TRACE();
2158        dev->data->dev_started = 0;
2159
2160        hw->adapter_state = HNS3_NIC_STOPPING;
2161        hns3_set_rxtx_function(dev);
2162        rte_wmb();
2163        /* Disable datapath on secondary process. */
2164        hns3_mp_req_stop_rxtx(dev);
2165        /* Prevent crashes when queues are still in use. */
2166        rte_delay_ms(hw->cfg_max_queues);
2167
2168        rte_spinlock_lock(&hw->lock);
2169        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
2170                hns3_stop_tqps(hw);
2171                hns3vf_do_stop(hns);
2172                hns3vf_unmap_rx_interrupt(dev);
2173                hw->adapter_state = HNS3_NIC_CONFIGURED;
2174        }
2175        hns3_rx_scattered_reset(dev);
2176        hns3vf_stop_poll_job(dev);
2177        hns3_stop_report_lse(dev);
2178        rte_spinlock_unlock(&hw->lock);
2179
2180        return 0;
2181}
2182
2183static int
2184hns3vf_dev_close(struct rte_eth_dev *eth_dev)
2185{
2186        struct hns3_adapter *hns = eth_dev->data->dev_private;
2187        struct hns3_hw *hw = &hns->hw;
2188        int ret = 0;
2189
2190        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2191                return 0;
2192
2193        if (hw->adapter_state == HNS3_NIC_STARTED)
2194                ret = hns3vf_dev_stop(eth_dev);
2195
2196        hw->adapter_state = HNS3_NIC_CLOSING;
2197        hns3_reset_abort(hns);
2198        hw->adapter_state = HNS3_NIC_CLOSED;
2199        rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
2200        hns3vf_configure_all_mc_mac_addr(hns, true);
2201        hns3vf_remove_all_vlan_table(hns);
2202        hns3vf_uninit_vf(eth_dev);
2203        hns3_free_all_queues(eth_dev);
2204        rte_free(hw->reset.wait_data);
2205        hns3_mp_uninit_primary();
2206        hns3_warn(hw, "Close port %u finished", hw->data->port_id);
2207
2208        return ret;
2209}
2210
2211static int
2212hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2213                      size_t fw_size)
2214{
2215        struct hns3_adapter *hns = eth_dev->data->dev_private;
2216        struct hns3_hw *hw = &hns->hw;
2217        uint32_t version = hw->fw_version;
2218        int ret;
2219
2220        ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
2221                       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
2222                                      HNS3_FW_VERSION_BYTE3_S),
2223                       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
2224                                      HNS3_FW_VERSION_BYTE2_S),
2225                       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
2226                                      HNS3_FW_VERSION_BYTE1_S),
2227                       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
2228                                      HNS3_FW_VERSION_BYTE0_S));
2229        if (ret < 0)
2230                return -EINVAL;
2231
2232        ret += 1; /* add the size of '\0' */
2233        if (fw_size < (size_t)ret)
2234                return ret;
2235        else
2236                return 0;
2237}
2238
2239static int
2240hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
2241                       __rte_unused int wait_to_complete)
2242{
2243        struct hns3_adapter *hns = eth_dev->data->dev_private;
2244        struct hns3_hw *hw = &hns->hw;
2245        struct hns3_mac *mac = &hw->mac;
2246        struct rte_eth_link new_link;
2247
2248        memset(&new_link, 0, sizeof(new_link));
2249        switch (mac->link_speed) {
2250        case ETH_SPEED_NUM_10M:
2251        case ETH_SPEED_NUM_100M:
2252        case ETH_SPEED_NUM_1G:
2253        case ETH_SPEED_NUM_10G:
2254        case ETH_SPEED_NUM_25G:
2255        case ETH_SPEED_NUM_40G:
2256        case ETH_SPEED_NUM_50G:
2257        case ETH_SPEED_NUM_100G:
2258        case ETH_SPEED_NUM_200G:
2259                if (mac->link_status)
2260                        new_link.link_speed = mac->link_speed;
2261                break;
2262        default:
2263                if (mac->link_status)
2264                        new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
2265                break;
2266        }
2267
2268        if (!mac->link_status)
2269                new_link.link_speed = ETH_SPEED_NUM_NONE;
2270
2271        new_link.link_duplex = mac->link_duplex;
2272        new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
2273        new_link.link_autoneg =
2274            !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
2275
2276        return rte_eth_linkstatus_set(eth_dev, &new_link);
2277}
2278
2279static int
2280hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
2281{
2282        struct hns3_hw *hw = &hns->hw;
2283        uint16_t nb_rx_q = hw->data->nb_rx_queues;
2284        uint16_t nb_tx_q = hw->data->nb_tx_queues;
2285        int ret;
2286
2287        ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
2288        if (ret)
2289                return ret;
2290
2291        hns3_enable_rxd_adv_layout(hw);
2292
2293        ret = hns3_init_queues(hns, reset_queue);
2294        if (ret)
2295                hns3_err(hw, "failed to init queues, ret = %d.", ret);
2296
2297        return ret;
2298}
2299
2300static int
2301hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
2302{
2303        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2304        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2305        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2306        uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
2307        uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
2308        uint32_t intr_vector;
2309        uint16_t q_id;
2310        int ret;
2311
2312        /*
2313         * hns3 needs a separate interrupt to be used as event interrupt which
2314         * could not be shared with task queue pair, so KERNEL drivers need
2315         * support multiple interrupt vectors.
2316         */
2317        if (dev->data->dev_conf.intr_conf.rxq == 0 ||
2318            !rte_intr_cap_multiple(intr_handle))
2319                return 0;
2320
2321        rte_intr_disable(intr_handle);
2322        intr_vector = hw->used_rx_queues;
2323        /* It creates event fd for each intr vector when MSIX is used */
2324        if (rte_intr_efd_enable(intr_handle, intr_vector))
2325                return -EINVAL;
2326
2327        if (intr_handle->intr_vec == NULL) {
2328                intr_handle->intr_vec =
2329                        rte_zmalloc("intr_vec",
2330                                    hw->used_rx_queues * sizeof(int), 0);
2331                if (intr_handle->intr_vec == NULL) {
2332                        hns3_err(hw, "Failed to allocate %u rx_queues"
2333                                     " intr_vec", hw->used_rx_queues);
2334                        ret = -ENOMEM;
2335                        goto vf_alloc_intr_vec_error;
2336                }
2337        }
2338
2339        if (rte_intr_allow_others(intr_handle)) {
2340                vec = RTE_INTR_VEC_RXTX_OFFSET;
2341                base = RTE_INTR_VEC_RXTX_OFFSET;
2342        }
2343
2344        for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2345                ret = hns3vf_bind_ring_with_vector(hw, vec, true,
2346                                                   HNS3_RING_TYPE_RX, q_id);
2347                if (ret)
2348                        goto vf_bind_vector_error;
2349                intr_handle->intr_vec[q_id] = vec;
2350                /*
2351                 * If there are not enough efds (e.g. not enough interrupt),
2352                 * remaining queues will be bond to the last interrupt.
2353                 */
2354                if (vec < base + intr_handle->nb_efd - 1)
2355                        vec++;
2356        }
2357        rte_intr_enable(intr_handle);
2358        return 0;
2359
2360vf_bind_vector_error:
2361        free(intr_handle->intr_vec);
2362        intr_handle->intr_vec = NULL;
2363vf_alloc_intr_vec_error:
2364        rte_intr_efd_disable(intr_handle);
2365        return ret;
2366}
2367
2368static int
2369hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
2370{
2371        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
2372        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2373        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2374        uint16_t q_id;
2375        int ret;
2376
2377        if (dev->data->dev_conf.intr_conf.rxq == 0)
2378                return 0;
2379
2380        if (rte_intr_dp_is_en(intr_handle)) {
2381                for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2382                        ret = hns3vf_bind_ring_with_vector(hw,
2383                                        intr_handle->intr_vec[q_id], true,
2384                                        HNS3_RING_TYPE_RX, q_id);
2385                        if (ret)
2386                                return ret;
2387                }
2388        }
2389
2390        return 0;
2391}
2392
2393static void
2394hns3vf_restore_filter(struct rte_eth_dev *dev)
2395{
2396        hns3_restore_rss_filter(dev);
2397}
2398
2399static int
2400hns3vf_dev_start(struct rte_eth_dev *dev)
2401{
2402        struct hns3_adapter *hns = dev->data->dev_private;
2403        struct hns3_hw *hw = &hns->hw;
2404        int ret;
2405
2406        PMD_INIT_FUNC_TRACE();
2407        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
2408                return -EBUSY;
2409
2410        rte_spinlock_lock(&hw->lock);
2411        hw->adapter_state = HNS3_NIC_STARTING;
2412        ret = hns3vf_do_start(hns, true);
2413        if (ret) {
2414                hw->adapter_state = HNS3_NIC_CONFIGURED;
2415                rte_spinlock_unlock(&hw->lock);
2416                return ret;
2417        }
2418        ret = hns3vf_map_rx_interrupt(dev);
2419        if (ret)
2420                goto map_rx_inter_err;
2421
2422        /*
2423         * There are three register used to control the status of a TQP
2424         * (contains a pair of Tx queue and Rx queue) in the new version network
2425         * engine. One is used to control the enabling of Tx queue, the other is
2426         * used to control the enabling of Rx queue, and the last is the master
2427         * switch used to control the enabling of the tqp. The Tx register and
2428         * TQP register must be enabled at the same time to enable a Tx queue.
2429         * The same applies to the Rx queue. For the older network enginem, this
2430         * function only refresh the enabled flag, and it is used to update the
2431         * status of queue in the dpdk framework.
2432         */
2433        ret = hns3_start_all_txqs(dev);
2434        if (ret)
2435                goto map_rx_inter_err;
2436
2437        ret = hns3_start_all_rxqs(dev);
2438        if (ret)
2439                goto start_all_rxqs_fail;
2440
2441        hw->adapter_state = HNS3_NIC_STARTED;
2442        rte_spinlock_unlock(&hw->lock);
2443
2444        hns3_rx_scattered_calc(dev);
2445        hns3_set_rxtx_function(dev);
2446        hns3_mp_req_start_rxtx(dev);
2447
2448        hns3vf_restore_filter(dev);
2449
2450        /* Enable interrupt of all rx queues before enabling queues */
2451        hns3_dev_all_rx_queue_intr_enable(hw, true);
2452        hns3_start_tqps(hw);
2453
2454        if (dev->data->dev_conf.intr_conf.lsc != 0)
2455                hns3vf_dev_link_update(dev, 0);
2456        hns3vf_start_poll_job(dev);
2457
2458        return ret;
2459
2460start_all_rxqs_fail:
2461        hns3_stop_all_txqs(dev);
2462map_rx_inter_err:
2463        (void)hns3vf_do_stop(hns);
2464        hw->adapter_state = HNS3_NIC_CONFIGURED;
2465        rte_spinlock_unlock(&hw->lock);
2466
2467        return ret;
2468}
2469
2470static bool
2471is_vf_reset_done(struct hns3_hw *hw)
2472{
2473#define HNS3_FUN_RST_ING_BITS \
2474        (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
2475         BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
2476         BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
2477         BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
2478
2479        uint32_t val;
2480
2481        if (hw->reset.level == HNS3_VF_RESET) {
2482                val = hns3_read_dev(hw, HNS3_VF_RST_ING);
2483                if (val & HNS3_VF_RST_ING_BIT)
2484                        return false;
2485        } else {
2486                val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
2487                if (val & HNS3_FUN_RST_ING_BITS)
2488                        return false;
2489        }
2490        return true;
2491}
2492
2493bool
2494hns3vf_is_reset_pending(struct hns3_adapter *hns)
2495{
2496        struct hns3_hw *hw = &hns->hw;
2497        enum hns3_reset_level reset;
2498
2499        /*
2500         * According to the protocol of PCIe, FLR to a PF device resets the PF
2501         * state as well as the SR-IOV extended capability including VF Enable
2502         * which means that VFs no longer exist.
2503         *
2504         * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
2505         * is in FLR stage, the register state of VF device is not reliable,
2506         * so register states detection can not be carried out. In this case,
2507         * we just ignore the register states and return false to indicate that
2508         * there are no other reset states that need to be processed by driver.
2509         */
2510        if (hw->reset.level == HNS3_VF_FULL_RESET)
2511                return false;
2512
2513        /* Check the registers to confirm whether there is reset pending */
2514        hns3vf_check_event_cause(hns, NULL);
2515        reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
2516        if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
2517            hw->reset.level < reset) {
2518                hns3_warn(hw, "High level reset %d is pending", reset);
2519                return true;
2520        }
2521        return false;
2522}
2523
2524static int
2525hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
2526{
2527        struct hns3_hw *hw = &hns->hw;
2528        struct hns3_wait_data *wait_data = hw->reset.wait_data;
2529        struct timeval tv;
2530
2531        if (wait_data->result == HNS3_WAIT_SUCCESS) {
2532                /*
2533                 * After vf reset is ready, the PF may not have completed
2534                 * the reset processing. The vf sending mbox to PF may fail
2535                 * during the pf reset, so it is better to add extra delay.
2536                 */
2537                if (hw->reset.level == HNS3_VF_FUNC_RESET ||
2538                    hw->reset.level == HNS3_FLR_RESET)
2539                        return 0;
2540                /* Reset retry process, no need to add extra delay. */
2541                if (hw->reset.attempts)
2542                        return 0;
2543                if (wait_data->check_completion == NULL)
2544                        return 0;
2545
2546                wait_data->check_completion = NULL;
2547                wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
2548                wait_data->count = 1;
2549                wait_data->result = HNS3_WAIT_REQUEST;
2550                rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
2551                                  wait_data);
2552                hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
2553                return -EAGAIN;
2554        } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
2555                hns3_clock_gettime(&tv);
2556                hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
2557                          tv.tv_sec, tv.tv_usec);
2558                return -ETIME;
2559        } else if (wait_data->result == HNS3_WAIT_REQUEST)
2560                return -EAGAIN;
2561
2562        wait_data->hns = hns;
2563        wait_data->check_completion = is_vf_reset_done;
2564        wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
2565                                HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
2566        wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
2567        wait_data->count = HNS3VF_RESET_WAIT_CNT;
2568        wait_data->result = HNS3_WAIT_REQUEST;
2569        rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
2570        return -EAGAIN;
2571}
2572
2573static int
2574hns3vf_prepare_reset(struct hns3_adapter *hns)
2575{
2576        struct hns3_hw *hw = &hns->hw;
2577        int ret;
2578
2579        if (hw->reset.level == HNS3_VF_FUNC_RESET) {
2580                ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
2581                                        0, true, NULL, 0);
2582                if (ret)
2583                        return ret;
2584        }
2585        __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
2586
2587        return 0;
2588}
2589
2590static int
2591hns3vf_stop_service(struct hns3_adapter *hns)
2592{
2593        struct hns3_hw *hw = &hns->hw;
2594        struct rte_eth_dev *eth_dev;
2595
2596        eth_dev = &rte_eth_devices[hw->data->port_id];
2597        if (hw->adapter_state == HNS3_NIC_STARTED) {
2598                /*
2599                 * Make sure call update link status before hns3vf_stop_poll_job
2600                 * because update link status depend on polling job exist.
2601                 */
2602                hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
2603                                          hw->mac.link_duplex);
2604                hns3vf_stop_poll_job(eth_dev);
2605        }
2606        hw->mac.link_status = ETH_LINK_DOWN;
2607
2608        hns3_set_rxtx_function(eth_dev);
2609        rte_wmb();
2610        /* Disable datapath on secondary process. */
2611        hns3_mp_req_stop_rxtx(eth_dev);
2612        rte_delay_ms(hw->cfg_max_queues);
2613
2614        rte_spinlock_lock(&hw->lock);
2615        if (hw->adapter_state == HNS3_NIC_STARTED ||
2616            hw->adapter_state == HNS3_NIC_STOPPING) {
2617                hns3_enable_all_queues(hw, false);
2618                hns3vf_do_stop(hns);
2619                hw->reset.mbuf_deferred_free = true;
2620        } else
2621                hw->reset.mbuf_deferred_free = false;
2622
2623        /*
2624         * It is cumbersome for hardware to pick-and-choose entries for deletion
2625         * from table space. Hence, for function reset software intervention is
2626         * required to delete the entries.
2627         */
2628        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
2629                hns3vf_configure_all_mc_mac_addr(hns, true);
2630        rte_spinlock_unlock(&hw->lock);
2631
2632        return 0;
2633}
2634
2635static int
2636hns3vf_start_service(struct hns3_adapter *hns)
2637{
2638        struct hns3_hw *hw = &hns->hw;
2639        struct rte_eth_dev *eth_dev;
2640
2641        eth_dev = &rte_eth_devices[hw->data->port_id];
2642        hns3_set_rxtx_function(eth_dev);
2643        hns3_mp_req_start_rxtx(eth_dev);
2644        if (hw->adapter_state == HNS3_NIC_STARTED) {
2645                hns3vf_start_poll_job(eth_dev);
2646
2647                /* Enable interrupt of all rx queues before enabling queues */
2648                hns3_dev_all_rx_queue_intr_enable(hw, true);
2649                /*
2650                 * Enable state of each rxq and txq will be recovered after
2651                 * reset, so we need to restore them before enable all tqps;
2652                 */
2653                hns3_restore_tqp_enable_state(hw);
2654                /*
2655                 * When finished the initialization, enable queues to receive
2656                 * and transmit packets.
2657                 */
2658                hns3_enable_all_queues(hw, true);
2659        }
2660
2661        return 0;
2662}
2663
2664static int
2665hns3vf_check_default_mac_change(struct hns3_hw *hw)
2666{
2667        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2668        struct rte_ether_addr *hw_mac;
2669        int ret;
2670
2671        /*
2672         * The hns3 PF ethdev driver in kernel support setting VF MAC address
2673         * on the host by "ip link set ..." command. If the hns3 PF kernel
2674         * ethdev driver sets the MAC address for VF device after the
2675         * initialization of the related VF device, the PF driver will notify
2676         * VF driver to reset VF device to make the new MAC address effective
2677         * immediately. The hns3 VF PMD driver should check whether the MAC
2678         * address has been changed by the PF kernel ethdev driver, if changed
2679         * VF driver should configure hardware using the new MAC address in the
2680         * recovering hardware configuration stage of the reset process.
2681         */
2682        ret = hns3vf_get_host_mac_addr(hw);
2683        if (ret)
2684                return ret;
2685
2686        hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2687        ret = rte_is_zero_ether_addr(hw_mac);
2688        if (ret) {
2689                rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2690        } else {
2691                ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2692                if (!ret) {
2693                        rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2694                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2695                                              &hw->data->mac_addrs[0]);
2696                        hns3_warn(hw, "Default MAC address has been changed to:"
2697                                  " %s by the host PF kernel ethdev driver",
2698                                  mac_str);
2699                }
2700        }
2701
2702        return 0;
2703}
2704
2705static int
2706hns3vf_restore_conf(struct hns3_adapter *hns)
2707{
2708        struct hns3_hw *hw = &hns->hw;
2709        int ret;
2710
2711        ret = hns3vf_check_default_mac_change(hw);
2712        if (ret)
2713                return ret;
2714
2715        ret = hns3vf_configure_mac_addr(hns, false);
2716        if (ret)
2717                return ret;
2718
2719        ret = hns3vf_configure_all_mc_mac_addr(hns, false);
2720        if (ret)
2721                goto err_mc_mac;
2722
2723        ret = hns3vf_restore_promisc(hns);
2724        if (ret)
2725                goto err_vlan_table;
2726
2727        ret = hns3vf_restore_vlan_conf(hns);
2728        if (ret)
2729                goto err_vlan_table;
2730
2731        ret = hns3vf_get_port_base_vlan_filter_state(hw);
2732        if (ret)
2733                goto err_vlan_table;
2734
2735        ret = hns3vf_restore_rx_interrupt(hw);
2736        if (ret)
2737                goto err_vlan_table;
2738
2739        ret = hns3_restore_gro_conf(hw);
2740        if (ret)
2741                goto err_vlan_table;
2742
2743        if (hw->adapter_state == HNS3_NIC_STARTED) {
2744                ret = hns3vf_do_start(hns, false);
2745                if (ret)
2746                        goto err_vlan_table;
2747                hns3_info(hw, "hns3vf dev restart successful!");
2748        } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2749                hw->adapter_state = HNS3_NIC_CONFIGURED;
2750
2751        ret = hns3vf_set_alive(hw, true);
2752        if (ret) {
2753                hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2754                goto err_vlan_table;
2755        }
2756
2757        return 0;
2758
2759err_vlan_table:
2760        hns3vf_configure_all_mc_mac_addr(hns, true);
2761err_mc_mac:
2762        hns3vf_configure_mac_addr(hns, true);
2763        return ret;
2764}
2765
2766static enum hns3_reset_level
2767hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2768{
2769        enum hns3_reset_level reset_level;
2770
2771        /* return the highest priority reset level amongst all */
2772        if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2773                reset_level = HNS3_VF_RESET;
2774        else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2775                reset_level = HNS3_VF_FULL_RESET;
2776        else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2777                reset_level = HNS3_VF_PF_FUNC_RESET;
2778        else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2779                reset_level = HNS3_VF_FUNC_RESET;
2780        else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2781                reset_level = HNS3_FLR_RESET;
2782        else
2783                reset_level = HNS3_NONE_RESET;
2784
2785        if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2786                return HNS3_NONE_RESET;
2787
2788        return reset_level;
2789}
2790
2791static void
2792hns3vf_reset_service(void *param)
2793{
2794        struct hns3_adapter *hns = (struct hns3_adapter *)param;
2795        struct hns3_hw *hw = &hns->hw;
2796        enum hns3_reset_level reset_level;
2797        struct timeval tv_delta;
2798        struct timeval tv_start;
2799        struct timeval tv;
2800        uint64_t msec;
2801
2802        /*
2803         * The interrupt is not triggered within the delay time.
2804         * The interrupt may have been lost. It is necessary to handle
2805         * the interrupt to recover from the error.
2806         */
2807        if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2808                            SCHEDULE_DEFERRED) {
2809                __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2810                                 __ATOMIC_RELAXED);
2811                hns3_err(hw, "Handling interrupts in delayed tasks");
2812                hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2813                reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2814                if (reset_level == HNS3_NONE_RESET) {
2815                        hns3_err(hw, "No reset level is set, try global reset");
2816                        hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2817                }
2818        }
2819        __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2820
2821        /*
2822         * Hardware reset has been notified, we now have to poll & check if
2823         * hardware has actually completed the reset sequence.
2824         */
2825        reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2826        if (reset_level != HNS3_NONE_RESET) {
2827                hns3_clock_gettime(&tv_start);
2828                hns3_reset_process(hns, reset_level);
2829                hns3_clock_gettime(&tv);
2830                timersub(&tv, &tv_start, &tv_delta);
2831                msec = hns3_clock_calctime_ms(&tv_delta);
2832                if (msec > HNS3_RESET_PROCESS_MS)
2833                        hns3_err(hw, "%d handle long time delta %" PRIu64
2834                                 " ms time=%ld.%.6ld",
2835                                 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2836        }
2837}
2838
2839static int
2840hns3vf_reinit_dev(struct hns3_adapter *hns)
2841{
2842        struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2843        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2844        struct hns3_hw *hw = &hns->hw;
2845        int ret;
2846
2847        if (hw->reset.level == HNS3_VF_FULL_RESET) {
2848                rte_intr_disable(&pci_dev->intr_handle);
2849                ret = hns3vf_set_bus_master(pci_dev, true);
2850                if (ret < 0) {
2851                        hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2852                        return ret;
2853                }
2854        }
2855
2856        /* Firmware command initialize */
2857        ret = hns3_cmd_init(hw);
2858        if (ret) {
2859                hns3_err(hw, "Failed to init cmd: %d", ret);
2860                return ret;
2861        }
2862
2863        if (hw->reset.level == HNS3_VF_FULL_RESET) {
2864                /*
2865                 * UIO enables msix by writing the pcie configuration space
2866                 * vfio_pci enables msix in rte_intr_enable.
2867                 */
2868                if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2869                    pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2870                        if (hns3vf_enable_msix(pci_dev, true))
2871                                hns3_err(hw, "Failed to enable msix");
2872                }
2873
2874                rte_intr_enable(&pci_dev->intr_handle);
2875        }
2876
2877        ret = hns3_reset_all_tqps(hns);
2878        if (ret) {
2879                hns3_err(hw, "Failed to reset all queues: %d", ret);
2880                return ret;
2881        }
2882
2883        ret = hns3vf_init_hardware(hns);
2884        if (ret) {
2885                hns3_err(hw, "Failed to init hardware: %d", ret);
2886                return ret;
2887        }
2888
2889        return 0;
2890}
2891
2892static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2893        .dev_configure      = hns3vf_dev_configure,
2894        .dev_start          = hns3vf_dev_start,
2895        .dev_stop           = hns3vf_dev_stop,
2896        .dev_close          = hns3vf_dev_close,
2897        .mtu_set            = hns3vf_dev_mtu_set,
2898        .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2899        .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2900        .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2901        .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2902        .stats_get          = hns3_stats_get,
2903        .stats_reset        = hns3_stats_reset,
2904        .xstats_get         = hns3_dev_xstats_get,
2905        .xstats_get_names   = hns3_dev_xstats_get_names,
2906        .xstats_reset       = hns3_dev_xstats_reset,
2907        .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2908        .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2909        .dev_infos_get      = hns3vf_dev_infos_get,
2910        .fw_version_get     = hns3vf_fw_version_get,
2911        .rx_queue_setup     = hns3_rx_queue_setup,
2912        .tx_queue_setup     = hns3_tx_queue_setup,
2913        .rx_queue_release   = hns3_dev_rx_queue_release,
2914        .tx_queue_release   = hns3_dev_tx_queue_release,
2915        .rx_queue_start     = hns3_dev_rx_queue_start,
2916        .rx_queue_stop      = hns3_dev_rx_queue_stop,
2917        .tx_queue_start     = hns3_dev_tx_queue_start,
2918        .tx_queue_stop      = hns3_dev_tx_queue_stop,
2919        .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2920        .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2921        .rxq_info_get       = hns3_rxq_info_get,
2922        .txq_info_get       = hns3_txq_info_get,
2923        .rx_burst_mode_get  = hns3_rx_burst_mode_get,
2924        .tx_burst_mode_get  = hns3_tx_burst_mode_get,
2925        .mac_addr_add       = hns3vf_add_mac_addr,
2926        .mac_addr_remove    = hns3vf_remove_mac_addr,
2927        .mac_addr_set       = hns3vf_set_default_mac_addr,
2928        .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
2929        .link_update        = hns3vf_dev_link_update,
2930        .rss_hash_update    = hns3_dev_rss_hash_update,
2931        .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2932        .reta_update        = hns3_dev_rss_reta_update,
2933        .reta_query         = hns3_dev_rss_reta_query,
2934        .flow_ops_get       = hns3_dev_flow_ops_get,
2935        .vlan_filter_set    = hns3vf_vlan_filter_set,
2936        .vlan_offload_set   = hns3vf_vlan_offload_set,
2937        .get_reg            = hns3_get_regs,
2938        .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2939        .tx_done_cleanup    = hns3_tx_done_cleanup,
2940};
2941
2942static const struct hns3_reset_ops hns3vf_reset_ops = {
2943        .reset_service       = hns3vf_reset_service,
2944        .stop_service        = hns3vf_stop_service,
2945        .prepare_reset       = hns3vf_prepare_reset,
2946        .wait_hardware_ready = hns3vf_wait_hardware_ready,
2947        .reinit_dev          = hns3vf_reinit_dev,
2948        .restore_conf        = hns3vf_restore_conf,
2949        .start_service       = hns3vf_start_service,
2950};
2951
2952static int
2953hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2954{
2955        struct hns3_adapter *hns = eth_dev->data->dev_private;
2956        struct hns3_hw *hw = &hns->hw;
2957        int ret;
2958
2959        PMD_INIT_FUNC_TRACE();
2960
2961        hns3_flow_init(eth_dev);
2962
2963        hns3_set_rxtx_function(eth_dev);
2964        eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2965        eth_dev->rx_queue_count = hns3_rx_queue_count;
2966        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2967                ret = hns3_mp_init_secondary();
2968                if (ret) {
2969                        PMD_INIT_LOG(ERR, "Failed to init for secondary "
2970                                          "process, ret = %d", ret);
2971                        goto err_mp_init_secondary;
2972                }
2973                hw->secondary_cnt++;
2974                hns3_tx_push_init(eth_dev);
2975                return 0;
2976        }
2977
2978        ret = hns3_mp_init_primary();
2979        if (ret) {
2980                PMD_INIT_LOG(ERR,
2981                             "Failed to init for primary process, ret = %d",
2982                             ret);
2983                goto err_mp_init_primary;
2984        }
2985
2986        hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2987        hns->is_vf = true;
2988        hw->data = eth_dev->data;
2989        hns3_parse_devargs(eth_dev);
2990
2991        ret = hns3_reset_init(hw);
2992        if (ret)
2993                goto err_init_reset;
2994        hw->reset.ops = &hns3vf_reset_ops;
2995
2996        ret = hns3vf_init_vf(eth_dev);
2997        if (ret) {
2998                PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2999                goto err_init_vf;
3000        }
3001
3002        /* Allocate memory for storing MAC addresses */
3003        eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
3004                                               sizeof(struct rte_ether_addr) *
3005                                               HNS3_VF_UC_MACADDR_NUM, 0);
3006        if (eth_dev->data->mac_addrs == NULL) {
3007                PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
3008                             "to store MAC addresses",
3009                             sizeof(struct rte_ether_addr) *
3010                             HNS3_VF_UC_MACADDR_NUM);
3011                ret = -ENOMEM;
3012                goto err_rte_zmalloc;
3013        }
3014
3015        /*
3016         * The hns3 PF ethdev driver in kernel support setting VF MAC address
3017         * on the host by "ip link set ..." command. To avoid some incorrect
3018         * scenes, for example, hns3 VF PMD driver fails to receive and send
3019         * packets after user configure the MAC address by using the
3020         * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
3021         * address strategy as the hns3 kernel ethdev driver in the
3022         * initialization. If user configure a MAC address by the ip command
3023         * for VF device, then hns3 VF PMD driver will start with it, otherwise
3024         * start with a random MAC address in the initialization.
3025         */
3026        if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr))
3027                rte_eth_random_addr(hw->mac.mac_addr);
3028        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
3029                            &eth_dev->data->mac_addrs[0]);
3030
3031        hw->adapter_state = HNS3_NIC_INITIALIZED;
3032
3033        if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
3034                            SCHEDULE_PENDING) {
3035                hns3_err(hw, "Reschedule reset service after dev_init");
3036                hns3_schedule_reset(hns);
3037        } else {
3038                /* IMP will wait ready flag before reset */
3039                hns3_notify_reset_ready(hw, false);
3040        }
3041        rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
3042                          eth_dev);
3043        return 0;
3044
3045err_rte_zmalloc:
3046        hns3vf_uninit_vf(eth_dev);
3047
3048err_init_vf:
3049        rte_free(hw->reset.wait_data);
3050
3051err_init_reset:
3052        hns3_mp_uninit_primary();
3053
3054err_mp_init_primary:
3055err_mp_init_secondary:
3056        eth_dev->dev_ops = NULL;
3057        eth_dev->rx_pkt_burst = NULL;
3058        eth_dev->rx_descriptor_status = NULL;
3059        eth_dev->tx_pkt_burst = NULL;
3060        eth_dev->tx_pkt_prepare = NULL;
3061        eth_dev->tx_descriptor_status = NULL;
3062
3063        return ret;
3064}
3065
3066static int
3067hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
3068{
3069        struct hns3_adapter *hns = eth_dev->data->dev_private;
3070        struct hns3_hw *hw = &hns->hw;
3071
3072        PMD_INIT_FUNC_TRACE();
3073
3074        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3075                return 0;
3076
3077        if (hw->adapter_state < HNS3_NIC_CLOSING)
3078                hns3vf_dev_close(eth_dev);
3079
3080        hw->adapter_state = HNS3_NIC_REMOVED;
3081        return 0;
3082}
3083
3084static int
3085eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3086                     struct rte_pci_device *pci_dev)
3087{
3088        return rte_eth_dev_pci_generic_probe(pci_dev,
3089                                             sizeof(struct hns3_adapter),
3090                                             hns3vf_dev_init);
3091}
3092
3093static int
3094eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
3095{
3096        return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
3097}
3098
3099static const struct rte_pci_id pci_id_hns3vf_map[] = {
3100        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
3101        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
3102        { .vendor_id = 0, }, /* sentinel */
3103};
3104
3105static struct rte_pci_driver rte_hns3vf_pmd = {
3106        .id_table = pci_id_hns3vf_map,
3107        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3108        .probe = eth_hns3vf_pci_probe,
3109        .remove = eth_hns3vf_pci_remove,
3110};
3111
3112RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
3113RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
3114RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
3115RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
3116                HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
3117                HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
3118                HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> ");
3119