dpdk/drivers/net/hns3/hns3_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 HiSilicon Limited.
   3 */
   4
   5#include <rte_alarm.h>
   6#include <rte_bus_pci.h>
   7#include <ethdev_pci.h>
   8#include <rte_pci.h>
   9#include <rte_kvargs.h>
  10
  11#include "hns3_ethdev.h"
  12#include "hns3_logs.h"
  13#include "hns3_rxtx.h"
  14#include "hns3_intr.h"
  15#include "hns3_regs.h"
  16#include "hns3_dcb.h"
  17#include "hns3_mp.h"
  18
  19#define HNS3_SERVICE_INTERVAL           1000000 /* us */
  20#define HNS3_SERVICE_QUICK_INTERVAL     10
  21#define HNS3_INVALID_PVID               0xFFFF
  22
  23#define HNS3_FILTER_TYPE_VF             0
  24#define HNS3_FILTER_TYPE_PORT           1
  25#define HNS3_FILTER_FE_EGRESS_V1_B      BIT(0)
  26#define HNS3_FILTER_FE_NIC_INGRESS_B    BIT(0)
  27#define HNS3_FILTER_FE_NIC_EGRESS_B     BIT(1)
  28#define HNS3_FILTER_FE_ROCE_INGRESS_B   BIT(2)
  29#define HNS3_FILTER_FE_ROCE_EGRESS_B    BIT(3)
  30#define HNS3_FILTER_FE_EGRESS           (HNS3_FILTER_FE_NIC_EGRESS_B \
  31                                        | HNS3_FILTER_FE_ROCE_EGRESS_B)
  32#define HNS3_FILTER_FE_INGRESS          (HNS3_FILTER_FE_NIC_INGRESS_B \
  33                                        | HNS3_FILTER_FE_ROCE_INGRESS_B)
  34
  35/* Reset related Registers */
  36#define HNS3_GLOBAL_RESET_BIT           0
  37#define HNS3_CORE_RESET_BIT             1
  38#define HNS3_IMP_RESET_BIT              2
  39#define HNS3_FUN_RST_ING_B              0
  40
  41#define HNS3_VECTOR0_IMP_RESET_INT_B    1
  42#define HNS3_VECTOR0_IMP_CMDQ_ERR_B     4U
  43#define HNS3_VECTOR0_IMP_RD_POISON_B    5U
  44#define HNS3_VECTOR0_ALL_MSIX_ERR_B     6U
  45
  46#define HNS3_RESET_WAIT_MS      100
  47#define HNS3_RESET_WAIT_CNT     200
  48
  49/* FEC mode order defined in HNS3 hardware */
  50#define HNS3_HW_FEC_MODE_NOFEC  0
  51#define HNS3_HW_FEC_MODE_BASER  1
  52#define HNS3_HW_FEC_MODE_RS     2
  53
  54enum hns3_evt_cause {
  55        HNS3_VECTOR0_EVENT_RST,
  56        HNS3_VECTOR0_EVENT_MBX,
  57        HNS3_VECTOR0_EVENT_ERR,
  58        HNS3_VECTOR0_EVENT_PTP,
  59        HNS3_VECTOR0_EVENT_OTHER,
  60};
  61
  62static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
  63        { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
  64                             RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
  65                             RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
  66
  67        { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
  68                             RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
  69                             RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
  70                             RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
  71
  72        { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
  73                             RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
  74                             RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
  75
  76        { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
  77                             RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
  78                             RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
  79                             RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
  80
  81        { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
  82                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
  83                              RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
  84
  85        { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
  86                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
  87                              RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
  88};
  89
  90static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
  91                                                 uint64_t *levels);
  92static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
  93static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
  94                                    int on);
  95static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
  96static bool hns3_update_link_status(struct hns3_hw *hw);
  97
  98static int hns3_add_mc_addr(struct hns3_hw *hw,
  99                            struct rte_ether_addr *mac_addr);
 100static int hns3_remove_mc_addr(struct hns3_hw *hw,
 101                            struct rte_ether_addr *mac_addr);
 102static int hns3_restore_fec(struct hns3_hw *hw);
 103static int hns3_query_dev_fec_info(struct hns3_hw *hw);
 104static int hns3_do_stop(struct hns3_adapter *hns);
 105static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
 106
 107void hns3_ether_format_addr(char *buf, uint16_t size,
 108                            const struct rte_ether_addr *ether_addr)
 109{
 110        snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
 111                ether_addr->addr_bytes[0],
 112                ether_addr->addr_bytes[4],
 113                ether_addr->addr_bytes[5]);
 114}
 115
 116static void
 117hns3_pf_disable_irq0(struct hns3_hw *hw)
 118{
 119        hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
 120}
 121
 122static void
 123hns3_pf_enable_irq0(struct hns3_hw *hw)
 124{
 125        hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
 126}
 127
 128static enum hns3_evt_cause
 129hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
 130                          uint32_t *vec_val)
 131{
 132        struct hns3_hw *hw = &hns->hw;
 133
 134        __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
 135        hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 136        *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
 137        if (!is_delay) {
 138                hw->reset.stats.imp_cnt++;
 139                hns3_warn(hw, "IMP reset detected, clear reset status");
 140        } else {
 141                hns3_schedule_delayed_reset(hns);
 142                hns3_warn(hw, "IMP reset detected, don't clear reset status");
 143        }
 144
 145        return HNS3_VECTOR0_EVENT_RST;
 146}
 147
 148static enum hns3_evt_cause
 149hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
 150                             uint32_t *vec_val)
 151{
 152        struct hns3_hw *hw = &hns->hw;
 153
 154        __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
 155        hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
 156        *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
 157        if (!is_delay) {
 158                hw->reset.stats.global_cnt++;
 159                hns3_warn(hw, "Global reset detected, clear reset status");
 160        } else {
 161                hns3_schedule_delayed_reset(hns);
 162                hns3_warn(hw,
 163                          "Global reset detected, don't clear reset status");
 164        }
 165
 166        return HNS3_VECTOR0_EVENT_RST;
 167}
 168
 169static enum hns3_evt_cause
 170hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 171{
 172        struct hns3_hw *hw = &hns->hw;
 173        uint32_t vector0_int_stats;
 174        uint32_t cmdq_src_val;
 175        uint32_t hw_err_src_reg;
 176        uint32_t val;
 177        enum hns3_evt_cause ret;
 178        bool is_delay;
 179
 180        /* fetch the events from their corresponding regs */
 181        vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
 182        cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
 183        hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
 184
 185        is_delay = clearval == NULL ? true : false;
 186        /*
 187         * Assumption: If by any chance reset and mailbox events are reported
 188         * together then we will only process reset event and defer the
 189         * processing of the mailbox events. Since, we would have not cleared
 190         * RX CMDQ event this time we would receive again another interrupt
 191         * from H/W just for the mailbox.
 192         */
 193        if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
 194                ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
 195                goto out;
 196        }
 197
 198        /* Global reset */
 199        if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
 200                ret = hns3_proc_global_reset_event(hns, is_delay, &val);
 201                goto out;
 202        }
 203
 204        /* Check for vector0 1588 event source */
 205        if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
 206                val = BIT(HNS3_VECTOR0_1588_INT_B);
 207                ret = HNS3_VECTOR0_EVENT_PTP;
 208                goto out;
 209        }
 210
 211        /* check for vector0 msix event source */
 212        if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
 213            hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
 214                val = vector0_int_stats | hw_err_src_reg;
 215                ret = HNS3_VECTOR0_EVENT_ERR;
 216                goto out;
 217        }
 218
 219        /* check for vector0 mailbox(=CMDQ RX) event source */
 220        if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
 221                cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
 222                val = cmdq_src_val;
 223                ret = HNS3_VECTOR0_EVENT_MBX;
 224                goto out;
 225        }
 226
 227        val = vector0_int_stats;
 228        ret = HNS3_VECTOR0_EVENT_OTHER;
 229out:
 230
 231        if (clearval)
 232                *clearval = val;
 233        return ret;
 234}
 235
 236static bool
 237hns3_is_1588_event_type(uint32_t event_type)
 238{
 239        return (event_type == HNS3_VECTOR0_EVENT_PTP);
 240}
 241
 242static void
 243hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
 244{
 245        if (event_type == HNS3_VECTOR0_EVENT_RST ||
 246            hns3_is_1588_event_type(event_type))
 247                hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
 248        else if (event_type == HNS3_VECTOR0_EVENT_MBX)
 249                hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
 250}
 251
 252static void
 253hns3_clear_all_event_cause(struct hns3_hw *hw)
 254{
 255        uint32_t vector0_int_stats;
 256
 257        vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
 258        if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
 259                hns3_warn(hw, "Probe during IMP reset interrupt");
 260
 261        if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
 262                hns3_warn(hw, "Probe during Global reset interrupt");
 263
 264        hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
 265                               BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
 266                               BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
 267                               BIT(HNS3_VECTOR0_CORERESET_INT_B));
 268        hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
 269        hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
 270                                BIT(HNS3_VECTOR0_1588_INT_B));
 271}
 272
 273static void
 274hns3_handle_mac_tnl(struct hns3_hw *hw)
 275{
 276        struct hns3_cmd_desc desc;
 277        uint32_t status;
 278        int ret;
 279
 280        /* query and clear mac tnl interrupt */
 281        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
 282        ret = hns3_cmd_send(hw, &desc, 1);
 283        if (ret) {
 284                hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
 285                return;
 286        }
 287
 288        status = rte_le_to_cpu_32(desc.data[0]);
 289        if (status) {
 290                hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
 291                hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
 292                                          false);
 293                desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
 294                ret = hns3_cmd_send(hw, &desc, 1);
 295                if (ret)
 296                        hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
 297                                 ret);
 298        }
 299}
 300
 301static void
 302hns3_interrupt_handler(void *param)
 303{
 304        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 305        struct hns3_adapter *hns = dev->data->dev_private;
 306        struct hns3_hw *hw = &hns->hw;
 307        enum hns3_evt_cause event_cause;
 308        uint32_t clearval = 0;
 309        uint32_t vector0_int;
 310        uint32_t ras_int;
 311        uint32_t cmdq_int;
 312
 313        /* Disable interrupt */
 314        hns3_pf_disable_irq0(hw);
 315
 316        event_cause = hns3_check_event_cause(hns, &clearval);
 317        vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
 318        ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
 319        cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
 320        hns3_clear_event_cause(hw, event_cause, clearval);
 321        /* vector 0 interrupt is shared with reset and mailbox source events. */
 322        if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
 323                hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
 324                          "ras_int_stat:0x%x cmdq_int_stat:0x%x",
 325                          vector0_int, ras_int, cmdq_int);
 326                hns3_handle_mac_tnl(hw);
 327                hns3_handle_error(hns);
 328        } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
 329                hns3_warn(hw, "received reset interrupt");
 330                hns3_schedule_reset(hns);
 331        } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
 332                hns3_dev_handle_mbx_msg(hw);
 333        } else {
 334                hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
 335                          "ras_int_stat:0x%x cmdq_int_stat:0x%x",
 336                          vector0_int, ras_int, cmdq_int);
 337        }
 338
 339        /* Enable interrupt if it is not cause by reset */
 340        hns3_pf_enable_irq0(hw);
 341}
 342
 343static int
 344hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
 345{
 346#define HNS3_VLAN_ID_OFFSET_STEP        160
 347#define HNS3_VLAN_BYTE_SIZE             8
 348        struct hns3_vlan_filter_pf_cfg_cmd *req;
 349        struct hns3_hw *hw = &hns->hw;
 350        uint8_t vlan_offset_byte_val;
 351        struct hns3_cmd_desc desc;
 352        uint8_t vlan_offset_byte;
 353        uint8_t vlan_offset_base;
 354        int ret;
 355
 356        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
 357
 358        vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
 359        vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
 360                           HNS3_VLAN_BYTE_SIZE;
 361        vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
 362
 363        req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
 364        req->vlan_offset = vlan_offset_base;
 365        req->vlan_cfg = on ? 0 : 1;
 366        req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
 367
 368        ret = hns3_cmd_send(hw, &desc, 1);
 369        if (ret)
 370                hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
 371                         vlan_id, ret);
 372
 373        return ret;
 374}
 375
 376static void
 377hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
 378{
 379        struct hns3_user_vlan_table *vlan_entry;
 380        struct hns3_pf *pf = &hns->pf;
 381
 382        LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
 383                if (vlan_entry->vlan_id == vlan_id) {
 384                        if (vlan_entry->hd_tbl_status)
 385                                hns3_set_port_vlan_filter(hns, vlan_id, 0);
 386                        LIST_REMOVE(vlan_entry, next);
 387                        rte_free(vlan_entry);
 388                        break;
 389                }
 390        }
 391}
 392
 393static void
 394hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
 395                        bool writen_to_tbl)
 396{
 397        struct hns3_user_vlan_table *vlan_entry;
 398        struct hns3_hw *hw = &hns->hw;
 399        struct hns3_pf *pf = &hns->pf;
 400
 401        LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
 402                if (vlan_entry->vlan_id == vlan_id)
 403                        return;
 404        }
 405
 406        vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
 407        if (vlan_entry == NULL) {
 408                hns3_err(hw, "Failed to malloc hns3 vlan table");
 409                return;
 410        }
 411
 412        vlan_entry->hd_tbl_status = writen_to_tbl;
 413        vlan_entry->vlan_id = vlan_id;
 414
 415        LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
 416}
 417
 418static int
 419hns3_restore_vlan_table(struct hns3_adapter *hns)
 420{
 421        struct hns3_user_vlan_table *vlan_entry;
 422        struct hns3_hw *hw = &hns->hw;
 423        struct hns3_pf *pf = &hns->pf;
 424        uint16_t vlan_id;
 425        int ret = 0;
 426
 427        if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
 428                return hns3_vlan_pvid_configure(hns,
 429                                                hw->port_base_vlan_cfg.pvid, 1);
 430
 431        LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
 432                if (vlan_entry->hd_tbl_status) {
 433                        vlan_id = vlan_entry->vlan_id;
 434                        ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
 435                        if (ret)
 436                                break;
 437                }
 438        }
 439
 440        return ret;
 441}
 442
 443static int
 444hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
 445{
 446        struct hns3_hw *hw = &hns->hw;
 447        bool writen_to_tbl = false;
 448        int ret = 0;
 449
 450        /*
 451         * When vlan filter is enabled, hardware regards packets without vlan
 452         * as packets with vlan 0. So, to receive packets without vlan, vlan id
 453         * 0 is not allowed to be removed by rte_eth_dev_vlan_filter.
 454         */
 455        if (on == 0 && vlan_id == 0)
 456                return 0;
 457
 458        /*
 459         * When port base vlan enabled, we use port base vlan as the vlan
 460         * filter condition. In this case, we don't update vlan filter table
 461         * when user add new vlan or remove exist vlan, just update the
 462         * vlan list. The vlan id in vlan list will be written in vlan filter
 463         * table until port base vlan disabled
 464         */
 465        if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
 466                ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
 467                writen_to_tbl = true;
 468        }
 469
 470        if (ret == 0) {
 471                if (on)
 472                        hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
 473                else
 474                        hns3_rm_dev_vlan_table(hns, vlan_id);
 475        }
 476        return ret;
 477}
 478
 479static int
 480hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 481{
 482        struct hns3_adapter *hns = dev->data->dev_private;
 483        struct hns3_hw *hw = &hns->hw;
 484        int ret;
 485
 486        rte_spinlock_lock(&hw->lock);
 487        ret = hns3_vlan_filter_configure(hns, vlan_id, on);
 488        rte_spinlock_unlock(&hw->lock);
 489        return ret;
 490}
 491
 492static int
 493hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
 494                         uint16_t tpid)
 495{
 496        struct hns3_rx_vlan_type_cfg_cmd *rx_req;
 497        struct hns3_tx_vlan_type_cfg_cmd *tx_req;
 498        struct hns3_hw *hw = &hns->hw;
 499        struct hns3_cmd_desc desc;
 500        int ret;
 501
 502        if ((vlan_type != ETH_VLAN_TYPE_INNER &&
 503             vlan_type != ETH_VLAN_TYPE_OUTER)) {
 504                hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
 505                return -EINVAL;
 506        }
 507
 508        if (tpid != RTE_ETHER_TYPE_VLAN) {
 509                hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
 510                return -EINVAL;
 511        }
 512
 513        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
 514        rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
 515
 516        if (vlan_type == ETH_VLAN_TYPE_OUTER) {
 517                rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 518                rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 519        } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
 520                rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
 521                rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
 522                rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
 523                rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
 524        }
 525
 526        ret = hns3_cmd_send(hw, &desc, 1);
 527        if (ret) {
 528                hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
 529                         ret);
 530                return ret;
 531        }
 532
 533        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
 534
 535        tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
 536        tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
 537        tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
 538
 539        ret = hns3_cmd_send(hw, &desc, 1);
 540        if (ret)
 541                hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
 542                         ret);
 543        return ret;
 544}
 545
 546static int
 547hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
 548                   uint16_t tpid)
 549{
 550        struct hns3_adapter *hns = dev->data->dev_private;
 551        struct hns3_hw *hw = &hns->hw;
 552        int ret;
 553
 554        rte_spinlock_lock(&hw->lock);
 555        ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
 556        rte_spinlock_unlock(&hw->lock);
 557        return ret;
 558}
 559
 560static int
 561hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
 562                             struct hns3_rx_vtag_cfg *vcfg)
 563{
 564        struct hns3_vport_vtag_rx_cfg_cmd *req;
 565        struct hns3_hw *hw = &hns->hw;
 566        struct hns3_cmd_desc desc;
 567        uint16_t vport_id;
 568        uint8_t bitmap;
 569        int ret;
 570
 571        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
 572
 573        req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
 574        hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
 575                     vcfg->strip_tag1_en ? 1 : 0);
 576        hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
 577                     vcfg->strip_tag2_en ? 1 : 0);
 578        hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
 579                     vcfg->vlan1_vlan_prionly ? 1 : 0);
 580        hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
 581                     vcfg->vlan2_vlan_prionly ? 1 : 0);
 582
 583        /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
 584        hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
 585                     vcfg->strip_tag1_discard_en ? 1 : 0);
 586        hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
 587                     vcfg->strip_tag2_discard_en ? 1 : 0);
 588        /*
 589         * In current version VF is not supported when PF is driven by DPDK
 590         * driver, just need to configure parameters for PF vport.
 591         */
 592        vport_id = HNS3_PF_FUNC_ID;
 593        req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
 594        bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
 595        req->vf_bitmap[req->vf_offset] = bitmap;
 596
 597        ret = hns3_cmd_send(hw, &desc, 1);
 598        if (ret)
 599                hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
 600        return ret;
 601}
 602
 603static void
 604hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
 605                           struct hns3_rx_vtag_cfg *vcfg)
 606{
 607        struct hns3_pf *pf = &hns->pf;
 608        memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
 609}
 610
 611static void
 612hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
 613                           struct hns3_tx_vtag_cfg *vcfg)
 614{
 615        struct hns3_pf *pf = &hns->pf;
 616        memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
 617}
 618
 619static int
 620hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
 621{
 622        struct hns3_rx_vtag_cfg rxvlan_cfg;
 623        struct hns3_hw *hw = &hns->hw;
 624        int ret;
 625
 626        if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
 627                rxvlan_cfg.strip_tag1_en = false;
 628                rxvlan_cfg.strip_tag2_en = enable;
 629                rxvlan_cfg.strip_tag2_discard_en = false;
 630        } else {
 631                rxvlan_cfg.strip_tag1_en = enable;
 632                rxvlan_cfg.strip_tag2_en = true;
 633                rxvlan_cfg.strip_tag2_discard_en = true;
 634        }
 635
 636        rxvlan_cfg.strip_tag1_discard_en = false;
 637        rxvlan_cfg.vlan1_vlan_prionly = false;
 638        rxvlan_cfg.vlan2_vlan_prionly = false;
 639        rxvlan_cfg.rx_vlan_offload_en = enable;
 640
 641        ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
 642        if (ret) {
 643                hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
 644                                enable ? "enable" : "disable", ret);
 645                return ret;
 646        }
 647
 648        hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
 649
 650        return ret;
 651}
 652
 653static int
 654hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
 655                          uint8_t fe_type, bool filter_en, uint8_t vf_id)
 656{
 657        struct hns3_vlan_filter_ctrl_cmd *req;
 658        struct hns3_cmd_desc desc;
 659        int ret;
 660
 661        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
 662
 663        req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
 664        req->vlan_type = vlan_type;
 665        req->vlan_fe = filter_en ? fe_type : 0;
 666        req->vf_id = vf_id;
 667
 668        ret = hns3_cmd_send(hw, &desc, 1);
 669        if (ret)
 670                hns3_err(hw, "set vlan filter fail, ret =%d", ret);
 671
 672        return ret;
 673}
 674
 675static int
 676hns3_vlan_filter_init(struct hns3_adapter *hns)
 677{
 678        struct hns3_hw *hw = &hns->hw;
 679        int ret;
 680
 681        ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
 682                                        HNS3_FILTER_FE_EGRESS, false,
 683                                        HNS3_PF_FUNC_ID);
 684        if (ret) {
 685                hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
 686                return ret;
 687        }
 688
 689        ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
 690                                        HNS3_FILTER_FE_INGRESS, false,
 691                                        HNS3_PF_FUNC_ID);
 692        if (ret)
 693                hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
 694
 695        return ret;
 696}
 697
 698static int
 699hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
 700{
 701        struct hns3_hw *hw = &hns->hw;
 702        int ret;
 703
 704        ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
 705                                        HNS3_FILTER_FE_INGRESS, enable,
 706                                        HNS3_PF_FUNC_ID);
 707        if (ret)
 708                hns3_err(hw, "failed to %s port vlan filter, ret = %d",
 709                         enable ? "enable" : "disable", ret);
 710
 711        return ret;
 712}
 713
 714static int
 715hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 716{
 717        struct hns3_adapter *hns = dev->data->dev_private;
 718        struct hns3_hw *hw = &hns->hw;
 719        struct rte_eth_rxmode *rxmode;
 720        unsigned int tmp_mask;
 721        bool enable;
 722        int ret = 0;
 723
 724        rte_spinlock_lock(&hw->lock);
 725        rxmode = &dev->data->dev_conf.rxmode;
 726        tmp_mask = (unsigned int)mask;
 727        if (tmp_mask & ETH_VLAN_FILTER_MASK) {
 728                /* ignore vlan filter configuration during promiscuous mode */
 729                if (!dev->data->promiscuous) {
 730                        /* Enable or disable VLAN filter */
 731                        enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
 732                                 true : false;
 733
 734                        ret = hns3_enable_vlan_filter(hns, enable);
 735                        if (ret) {
 736                                rte_spinlock_unlock(&hw->lock);
 737                                hns3_err(hw, "failed to %s rx filter, ret = %d",
 738                                         enable ? "enable" : "disable", ret);
 739                                return ret;
 740                        }
 741                }
 742        }
 743
 744        if (tmp_mask & ETH_VLAN_STRIP_MASK) {
 745                /* Enable or disable VLAN stripping */
 746                enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
 747                    true : false;
 748
 749                ret = hns3_en_hw_strip_rxvtag(hns, enable);
 750                if (ret) {
 751                        rte_spinlock_unlock(&hw->lock);
 752                        hns3_err(hw, "failed to %s rx strip, ret = %d",
 753                                 enable ? "enable" : "disable", ret);
 754                        return ret;
 755                }
 756        }
 757
 758        rte_spinlock_unlock(&hw->lock);
 759
 760        return ret;
 761}
 762
 763static int
 764hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
 765                             struct hns3_tx_vtag_cfg *vcfg)
 766{
 767        struct hns3_vport_vtag_tx_cfg_cmd *req;
 768        struct hns3_cmd_desc desc;
 769        struct hns3_hw *hw = &hns->hw;
 770        uint16_t vport_id;
 771        uint8_t bitmap;
 772        int ret;
 773
 774        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
 775
 776        req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
 777        req->def_vlan_tag1 = vcfg->default_tag1;
 778        req->def_vlan_tag2 = vcfg->default_tag2;
 779        hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
 780                     vcfg->accept_tag1 ? 1 : 0);
 781        hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
 782                     vcfg->accept_untag1 ? 1 : 0);
 783        hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
 784                     vcfg->accept_tag2 ? 1 : 0);
 785        hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
 786                     vcfg->accept_untag2 ? 1 : 0);
 787        hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
 788                     vcfg->insert_tag1_en ? 1 : 0);
 789        hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
 790                     vcfg->insert_tag2_en ? 1 : 0);
 791        hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
 792
 793        /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
 794        hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
 795                     vcfg->tag_shift_mode_en ? 1 : 0);
 796
 797        /*
 798         * In current version VF is not supported when PF is driven by DPDK
 799         * driver, just need to configure parameters for PF vport.
 800         */
 801        vport_id = HNS3_PF_FUNC_ID;
 802        req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
 803        bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
 804        req->vf_bitmap[req->vf_offset] = bitmap;
 805
 806        ret = hns3_cmd_send(hw, &desc, 1);
 807        if (ret)
 808                hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
 809
 810        return ret;
 811}
 812
 813static int
 814hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
 815                     uint16_t pvid)
 816{
 817        struct hns3_hw *hw = &hns->hw;
 818        struct hns3_tx_vtag_cfg txvlan_cfg;
 819        int ret;
 820
 821        if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
 822                txvlan_cfg.accept_tag1 = true;
 823                txvlan_cfg.insert_tag1_en = false;
 824                txvlan_cfg.default_tag1 = 0;
 825        } else {
 826                txvlan_cfg.accept_tag1 =
 827                        hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE;
 828                txvlan_cfg.insert_tag1_en = true;
 829                txvlan_cfg.default_tag1 = pvid;
 830        }
 831
 832        txvlan_cfg.accept_untag1 = true;
 833        txvlan_cfg.accept_tag2 = true;
 834        txvlan_cfg.accept_untag2 = true;
 835        txvlan_cfg.insert_tag2_en = false;
 836        txvlan_cfg.default_tag2 = 0;
 837        txvlan_cfg.tag_shift_mode_en = true;
 838
 839        ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
 840        if (ret) {
 841                hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
 842                         ret);
 843                return ret;
 844        }
 845
 846        hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
 847        return ret;
 848}
 849
 850
 851static void
 852hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
 853{
 854        struct hns3_user_vlan_table *vlan_entry;
 855        struct hns3_pf *pf = &hns->pf;
 856
 857        LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
 858                if (vlan_entry->hd_tbl_status) {
 859                        hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
 860                        vlan_entry->hd_tbl_status = false;
 861                }
 862        }
 863
 864        if (is_del_list) {
 865                vlan_entry = LIST_FIRST(&pf->vlan_list);
 866                while (vlan_entry) {
 867                        LIST_REMOVE(vlan_entry, next);
 868                        rte_free(vlan_entry);
 869                        vlan_entry = LIST_FIRST(&pf->vlan_list);
 870                }
 871        }
 872}
 873
 874static void
 875hns3_add_all_vlan_table(struct hns3_adapter *hns)
 876{
 877        struct hns3_user_vlan_table *vlan_entry;
 878        struct hns3_pf *pf = &hns->pf;
 879
 880        LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
 881                if (!vlan_entry->hd_tbl_status) {
 882                        hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
 883                        vlan_entry->hd_tbl_status = true;
 884                }
 885        }
 886}
 887
 888static void
 889hns3_remove_all_vlan_table(struct hns3_adapter *hns)
 890{
 891        struct hns3_hw *hw = &hns->hw;
 892        int ret;
 893
 894        hns3_rm_all_vlan_table(hns, true);
 895        if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) {
 896                ret = hns3_set_port_vlan_filter(hns,
 897                                                hw->port_base_vlan_cfg.pvid, 0);
 898                if (ret) {
 899                        hns3_err(hw, "Failed to remove all vlan table, ret =%d",
 900                                 ret);
 901                        return;
 902                }
 903        }
 904}
 905
 906static int
 907hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
 908                        uint16_t port_base_vlan_state, uint16_t new_pvid)
 909{
 910        struct hns3_hw *hw = &hns->hw;
 911        uint16_t old_pvid;
 912        int ret;
 913
 914        if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
 915                old_pvid = hw->port_base_vlan_cfg.pvid;
 916                if (old_pvid != HNS3_INVALID_PVID) {
 917                        ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
 918                        if (ret) {
 919                                hns3_err(hw, "failed to remove old pvid %u, "
 920                                                "ret = %d", old_pvid, ret);
 921                                return ret;
 922                        }
 923                }
 924
 925                hns3_rm_all_vlan_table(hns, false);
 926                ret = hns3_set_port_vlan_filter(hns, new_pvid, 1);
 927                if (ret) {
 928                        hns3_err(hw, "failed to add new pvid %u, ret = %d",
 929                                        new_pvid, ret);
 930                        return ret;
 931                }
 932        } else {
 933                ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
 934                if (ret) {
 935                        hns3_err(hw, "failed to remove pvid %u, ret = %d",
 936                                        new_pvid, ret);
 937                        return ret;
 938                }
 939
 940                hns3_add_all_vlan_table(hns);
 941        }
 942        return 0;
 943}
 944
 945static int
 946hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
 947{
 948        struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
 949        struct hns3_rx_vtag_cfg rx_vlan_cfg;
 950        bool rx_strip_en;
 951        int ret;
 952
 953        rx_strip_en = old_cfg->rx_vlan_offload_en;
 954        if (on) {
 955                rx_vlan_cfg.strip_tag1_en = rx_strip_en;
 956                rx_vlan_cfg.strip_tag2_en = true;
 957                rx_vlan_cfg.strip_tag2_discard_en = true;
 958        } else {
 959                rx_vlan_cfg.strip_tag1_en = false;
 960                rx_vlan_cfg.strip_tag2_en = rx_strip_en;
 961                rx_vlan_cfg.strip_tag2_discard_en = false;
 962        }
 963        rx_vlan_cfg.strip_tag1_discard_en = false;
 964        rx_vlan_cfg.vlan1_vlan_prionly = false;
 965        rx_vlan_cfg.vlan2_vlan_prionly = false;
 966        rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
 967
 968        ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
 969        if (ret)
 970                return ret;
 971
 972        hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
 973        return ret;
 974}
 975
 976static int
 977hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
 978{
 979        struct hns3_hw *hw = &hns->hw;
 980        uint16_t port_base_vlan_state;
 981        int ret, err;
 982
 983        if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
 984                if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID)
 985                        hns3_warn(hw, "Invalid operation! As current pvid set "
 986                                  "is %u, disable pvid %u is invalid",
 987                                  hw->port_base_vlan_cfg.pvid, pvid);
 988                return 0;
 989        }
 990
 991        port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
 992                                    HNS3_PORT_BASE_VLAN_DISABLE;
 993        ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
 994        if (ret) {
 995                hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
 996                         ret);
 997                return ret;
 998        }
 999
1000        ret = hns3_en_pvid_strip(hns, on);
1001        if (ret) {
1002                hns3_err(hw, "failed to config rx vlan strip for pvid, "
1003                         "ret = %d", ret);
1004                goto pvid_vlan_strip_fail;
1005        }
1006
1007        if (pvid == HNS3_INVALID_PVID)
1008                goto out;
1009        ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid);
1010        if (ret) {
1011                hns3_err(hw, "failed to update vlan filter entries, ret = %d",
1012                         ret);
1013                goto vlan_filter_set_fail;
1014        }
1015
1016out:
1017        hw->port_base_vlan_cfg.state = port_base_vlan_state;
1018        hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID;
1019        return ret;
1020
1021vlan_filter_set_fail:
1022        err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state ==
1023                                        HNS3_PORT_BASE_VLAN_ENABLE);
1024        if (err)
1025                hns3_err(hw, "fail to rollback pvid strip, ret = %d", err);
1026
1027pvid_vlan_strip_fail:
1028        err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state,
1029                                        hw->port_base_vlan_cfg.pvid);
1030        if (err)
1031                hns3_err(hw, "fail to rollback txvlan status, ret = %d", err);
1032
1033        return ret;
1034}
1035
1036static int
1037hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1038{
1039        struct hns3_adapter *hns = dev->data->dev_private;
1040        struct hns3_hw *hw = &hns->hw;
1041        bool pvid_en_state_change;
1042        uint16_t pvid_state;
1043        int ret;
1044
1045        if (pvid > RTE_ETHER_MAX_VLAN_ID) {
1046                hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
1047                         RTE_ETHER_MAX_VLAN_ID);
1048                return -EINVAL;
1049        }
1050
1051        /*
1052         * If PVID configuration state change, should refresh the PVID
1053         * configuration state in struct hns3_tx_queue/hns3_rx_queue.
1054         */
1055        pvid_state = hw->port_base_vlan_cfg.state;
1056        if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
1057            (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
1058                pvid_en_state_change = false;
1059        else
1060                pvid_en_state_change = true;
1061
1062        rte_spinlock_lock(&hw->lock);
1063        ret = hns3_vlan_pvid_configure(hns, pvid, on);
1064        rte_spinlock_unlock(&hw->lock);
1065        if (ret)
1066                return ret;
1067        /*
1068         * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
1069         * need be processed by PMD driver.
1070         */
1071        if (pvid_en_state_change &&
1072            hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1073                hns3_update_all_queues_pvid_proc_en(hw);
1074
1075        return 0;
1076}
1077
1078static int
1079hns3_default_vlan_config(struct hns3_adapter *hns)
1080{
1081        struct hns3_hw *hw = &hns->hw;
1082        int ret;
1083
1084        /*
1085         * When vlan filter is enabled, hardware regards packets without vlan
1086         * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan
1087         * table, packets without vlan won't be received. So, add vlan 0 as
1088         * the default vlan.
1089         */
1090        ret = hns3_vlan_filter_configure(hns, 0, 1);
1091        if (ret)
1092                hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
1093        return ret;
1094}
1095
1096static int
1097hns3_init_vlan_config(struct hns3_adapter *hns)
1098{
1099        struct hns3_hw *hw = &hns->hw;
1100        int ret;
1101
1102        /*
1103         * This function can be called in the initialization and reset process,
1104         * when in reset process, it means that hardware had been reseted
1105         * successfully and we need to restore the hardware configuration to
1106         * ensure that the hardware configuration remains unchanged before and
1107         * after reset.
1108         */
1109        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1110                hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
1111                hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
1112        }
1113
1114        ret = hns3_vlan_filter_init(hns);
1115        if (ret) {
1116                hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
1117                return ret;
1118        }
1119
1120        ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
1121                                       RTE_ETHER_TYPE_VLAN);
1122        if (ret) {
1123                hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
1124                return ret;
1125        }
1126
1127        /*
1128         * When in the reinit dev stage of the reset process, the following
1129         * vlan-related configurations may differ from those at initialization,
1130         * we will restore configurations to hardware in hns3_restore_vlan_table
1131         * and hns3_restore_vlan_conf later.
1132         */
1133        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1134                ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
1135                if (ret) {
1136                        hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1137                        return ret;
1138                }
1139
1140                ret = hns3_en_hw_strip_rxvtag(hns, false);
1141                if (ret) {
1142                        hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1143                                 ret);
1144                        return ret;
1145                }
1146        }
1147
1148        return hns3_default_vlan_config(hns);
1149}
1150
1151static int
1152hns3_restore_vlan_conf(struct hns3_adapter *hns)
1153{
1154        struct hns3_pf *pf = &hns->pf;
1155        struct hns3_hw *hw = &hns->hw;
1156        uint64_t offloads;
1157        bool enable;
1158        int ret;
1159
1160        if (!hw->data->promiscuous) {
1161                /* restore vlan filter states */
1162                offloads = hw->data->dev_conf.rxmode.offloads;
1163                enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
1164                ret = hns3_enable_vlan_filter(hns, enable);
1165                if (ret) {
1166                        hns3_err(hw, "failed to restore vlan rx filter conf, "
1167                                 "ret = %d", ret);
1168                        return ret;
1169                }
1170        }
1171
1172        ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1173        if (ret) {
1174                hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1175                return ret;
1176        }
1177
1178        ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1179        if (ret)
1180                hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1181
1182        return ret;
1183}
1184
1185static int
1186hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1187{
1188        struct hns3_adapter *hns = dev->data->dev_private;
1189        struct rte_eth_dev_data *data = dev->data;
1190        struct rte_eth_txmode *txmode;
1191        struct hns3_hw *hw = &hns->hw;
1192        int mask;
1193        int ret;
1194
1195        txmode = &data->dev_conf.txmode;
1196        if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1197                hns3_warn(hw,
1198                          "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1199                          "configuration is not supported! Ignore these two "
1200                          "parameters: hw_vlan_reject_tagged(%u), "
1201                          "hw_vlan_reject_untagged(%u)",
1202                          txmode->hw_vlan_reject_tagged,
1203                          txmode->hw_vlan_reject_untagged);
1204
1205        /* Apply vlan offload setting */
1206        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
1207        ret = hns3_vlan_offload_set(dev, mask);
1208        if (ret) {
1209                hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1210                         ret);
1211                return ret;
1212        }
1213
1214        /*
1215         * If pvid config is not set in rte_eth_conf, driver needn't to set
1216         * VLAN pvid related configuration to hardware.
1217         */
1218        if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1219                return 0;
1220
1221        /* Apply pvid setting */
1222        ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1223                                 txmode->hw_vlan_insert_pvid);
1224        if (ret)
1225                hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
1226                         txmode->pvid, ret);
1227
1228        return ret;
1229}
1230
1231static int
1232hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1233                unsigned int tso_mss_max)
1234{
1235        struct hns3_cfg_tso_status_cmd *req;
1236        struct hns3_cmd_desc desc;
1237        uint16_t tso_mss;
1238
1239        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1240
1241        req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1242
1243        tso_mss = 0;
1244        hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1245                       tso_mss_min);
1246        req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1247
1248        tso_mss = 0;
1249        hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1250                       tso_mss_max);
1251        req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1252
1253        return hns3_cmd_send(hw, &desc, 1);
1254}
1255
1256static int
1257hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1258                   uint16_t *allocated_size, bool is_alloc)
1259{
1260        struct hns3_umv_spc_alc_cmd *req;
1261        struct hns3_cmd_desc desc;
1262        int ret;
1263
1264        req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1265        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1266        hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1267        req->space_size = rte_cpu_to_le_32(space_size);
1268
1269        ret = hns3_cmd_send(hw, &desc, 1);
1270        if (ret) {
1271                PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1272                             is_alloc ? "allocate" : "free", ret);
1273                return ret;
1274        }
1275
1276        if (is_alloc && allocated_size)
1277                *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1278
1279        return 0;
1280}
1281
1282static int
1283hns3_init_umv_space(struct hns3_hw *hw)
1284{
1285        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1286        struct hns3_pf *pf = &hns->pf;
1287        uint16_t allocated_size = 0;
1288        int ret;
1289
1290        ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1291                                 true);
1292        if (ret)
1293                return ret;
1294
1295        if (allocated_size < pf->wanted_umv_size)
1296                PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1297                             pf->wanted_umv_size, allocated_size);
1298
1299        pf->max_umv_size = (!!allocated_size) ? allocated_size :
1300                                                pf->wanted_umv_size;
1301        pf->used_umv_size = 0;
1302        return 0;
1303}
1304
1305static int
1306hns3_uninit_umv_space(struct hns3_hw *hw)
1307{
1308        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1309        struct hns3_pf *pf = &hns->pf;
1310        int ret;
1311
1312        if (pf->max_umv_size == 0)
1313                return 0;
1314
1315        ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1316        if (ret)
1317                return ret;
1318
1319        pf->max_umv_size = 0;
1320
1321        return 0;
1322}
1323
1324static bool
1325hns3_is_umv_space_full(struct hns3_hw *hw)
1326{
1327        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1328        struct hns3_pf *pf = &hns->pf;
1329        bool is_full;
1330
1331        is_full = (pf->used_umv_size >= pf->max_umv_size);
1332
1333        return is_full;
1334}
1335
1336static void
1337hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1338{
1339        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1340        struct hns3_pf *pf = &hns->pf;
1341
1342        if (is_free) {
1343                if (pf->used_umv_size > 0)
1344                        pf->used_umv_size--;
1345        } else
1346                pf->used_umv_size++;
1347}
1348
1349static void
1350hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1351                      const uint8_t *addr, bool is_mc)
1352{
1353        const unsigned char *mac_addr = addr;
1354        uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1355                            ((uint32_t)mac_addr[2] << 16) |
1356                            ((uint32_t)mac_addr[1] << 8) |
1357                            (uint32_t)mac_addr[0];
1358        uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1359
1360        hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1361        if (is_mc) {
1362                hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1363                hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1364                hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1365        }
1366
1367        new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1368        new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1369}
1370
1371static int
1372hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1373                             uint8_t resp_code,
1374                             enum hns3_mac_vlan_tbl_opcode op)
1375{
1376        if (cmdq_resp) {
1377                hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1378                         cmdq_resp);
1379                return -EIO;
1380        }
1381
1382        if (op == HNS3_MAC_VLAN_ADD) {
1383                if (resp_code == 0 || resp_code == 1) {
1384                        return 0;
1385                } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1386                        hns3_err(hw, "add mac addr failed for uc_overflow");
1387                        return -ENOSPC;
1388                } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1389                        hns3_err(hw, "add mac addr failed for mc_overflow");
1390                        return -ENOSPC;
1391                }
1392
1393                hns3_err(hw, "add mac addr failed for undefined, code=%u",
1394                         resp_code);
1395                return -EIO;
1396        } else if (op == HNS3_MAC_VLAN_REMOVE) {
1397                if (resp_code == 0) {
1398                        return 0;
1399                } else if (resp_code == 1) {
1400                        hns3_dbg(hw, "remove mac addr failed for miss");
1401                        return -ENOENT;
1402                }
1403
1404                hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1405                         resp_code);
1406                return -EIO;
1407        } else if (op == HNS3_MAC_VLAN_LKUP) {
1408                if (resp_code == 0) {
1409                        return 0;
1410                } else if (resp_code == 1) {
1411                        hns3_dbg(hw, "lookup mac addr failed for miss");
1412                        return -ENOENT;
1413                }
1414
1415                hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1416                         resp_code);
1417                return -EIO;
1418        }
1419
1420        hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1421                 op);
1422
1423        return -EINVAL;
1424}
1425
1426static int
1427hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1428                         struct hns3_mac_vlan_tbl_entry_cmd *req,
1429                         struct hns3_cmd_desc *desc, bool is_mc)
1430{
1431        uint8_t resp_code;
1432        uint16_t retval;
1433        int ret;
1434
1435        hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true);
1436        if (is_mc) {
1437                desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1438                memcpy(desc[0].data, req,
1439                           sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1440                hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD,
1441                                          true);
1442                desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1443                hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD,
1444                                          true);
1445                ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1446        } else {
1447                memcpy(desc[0].data, req,
1448                       sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1449                ret = hns3_cmd_send(hw, desc, 1);
1450        }
1451        if (ret) {
1452                hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1453                         ret);
1454                return ret;
1455        }
1456        resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1457        retval = rte_le_to_cpu_16(desc[0].retval);
1458
1459        return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1460                                            HNS3_MAC_VLAN_LKUP);
1461}
1462
1463static int
1464hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1465                      struct hns3_mac_vlan_tbl_entry_cmd *req,
1466                      struct hns3_cmd_desc *mc_desc)
1467{
1468        uint8_t resp_code;
1469        uint16_t retval;
1470        int cfg_status;
1471        int ret;
1472
1473        if (mc_desc == NULL) {
1474                struct hns3_cmd_desc desc;
1475
1476                hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false);
1477                memcpy(desc.data, req,
1478                       sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1479                ret = hns3_cmd_send(hw, &desc, 1);
1480                resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1481                retval = rte_le_to_cpu_16(desc.retval);
1482
1483                cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1484                                                          HNS3_MAC_VLAN_ADD);
1485        } else {
1486                hns3_cmd_reuse_desc(&mc_desc[0], false);
1487                mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1488                hns3_cmd_reuse_desc(&mc_desc[1], false);
1489                mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1490                hns3_cmd_reuse_desc(&mc_desc[2], false);
1491                mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1492                memcpy(mc_desc[0].data, req,
1493                       sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1494                mc_desc[0].retval = 0;
1495                ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1496                resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff;
1497                retval = rte_le_to_cpu_16(mc_desc[0].retval);
1498
1499                cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1500                                                          HNS3_MAC_VLAN_ADD);
1501        }
1502
1503        if (ret) {
1504                hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1505                return ret;
1506        }
1507
1508        return cfg_status;
1509}
1510
1511static int
1512hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1513                         struct hns3_mac_vlan_tbl_entry_cmd *req)
1514{
1515        struct hns3_cmd_desc desc;
1516        uint8_t resp_code;
1517        uint16_t retval;
1518        int ret;
1519
1520        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1521
1522        memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1523
1524        ret = hns3_cmd_send(hw, &desc, 1);
1525        if (ret) {
1526                hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1527                return ret;
1528        }
1529        resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1530        retval = rte_le_to_cpu_16(desc.retval);
1531
1532        return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1533                                            HNS3_MAC_VLAN_REMOVE);
1534}
1535
1536static int
1537hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1538{
1539        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1540        struct hns3_mac_vlan_tbl_entry_cmd req;
1541        struct hns3_pf *pf = &hns->pf;
1542        struct hns3_cmd_desc desc[3];
1543        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1544        uint16_t egress_port = 0;
1545        uint8_t vf_id;
1546        int ret;
1547
1548        /* check if mac addr is valid */
1549        if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1550                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1551                                      mac_addr);
1552                hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1553                         mac_str);
1554                return -EINVAL;
1555        }
1556
1557        memset(&req, 0, sizeof(req));
1558
1559        /*
1560         * In current version VF is not supported when PF is driven by DPDK
1561         * driver, just need to configure parameters for PF vport.
1562         */
1563        vf_id = HNS3_PF_FUNC_ID;
1564        hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1565                       HNS3_MAC_EPORT_VFID_S, vf_id);
1566
1567        req.egress_port = rte_cpu_to_le_16(egress_port);
1568
1569        hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1570
1571        /*
1572         * Lookup the mac address in the mac_vlan table, and add
1573         * it if the entry is inexistent. Repeated unicast entry
1574         * is not allowed in the mac vlan table.
1575         */
1576        ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false);
1577        if (ret == -ENOENT) {
1578                if (!hns3_is_umv_space_full(hw)) {
1579                        ret = hns3_add_mac_vlan_tbl(hw, &req, NULL);
1580                        if (!ret)
1581                                hns3_update_umv_space(hw, false);
1582                        return ret;
1583                }
1584
1585                hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1586
1587                return -ENOSPC;
1588        }
1589
1590        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1591
1592        /* check if we just hit the duplicate */
1593        if (ret == 0) {
1594                hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1595                return 0;
1596        }
1597
1598        hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1599                 mac_str);
1600
1601        return ret;
1602}
1603
1604static int
1605hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1606{
1607        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1608        struct rte_ether_addr *addr;
1609        int ret;
1610        int i;
1611
1612        for (i = 0; i < hw->mc_addrs_num; i++) {
1613                addr = &hw->mc_addrs[i];
1614                /* Check if there are duplicate addresses */
1615                if (rte_is_same_ether_addr(addr, mac_addr)) {
1616                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1617                                              addr);
1618                        hns3_err(hw, "failed to add mc mac addr, same addrs"
1619                                 "(%s) is added by the set_mc_mac_addr_list "
1620                                 "API", mac_str);
1621                        return -EINVAL;
1622                }
1623        }
1624
1625        ret = hns3_add_mc_addr(hw, mac_addr);
1626        if (ret) {
1627                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1628                                      mac_addr);
1629                hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
1630                         mac_str, ret);
1631        }
1632        return ret;
1633}
1634
1635static int
1636hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1637{
1638        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1639        int ret;
1640
1641        ret = hns3_remove_mc_addr(hw, mac_addr);
1642        if (ret) {
1643                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1644                                      mac_addr);
1645                hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d",
1646                         mac_str, ret);
1647        }
1648        return ret;
1649}
1650
1651static int
1652hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1653                  uint32_t idx, __rte_unused uint32_t pool)
1654{
1655        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1656        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1657        int ret;
1658
1659        rte_spinlock_lock(&hw->lock);
1660
1661        /*
1662         * In hns3 network engine adding UC and MC mac address with different
1663         * commands with firmware. We need to determine whether the input
1664         * address is a UC or a MC address to call different commands.
1665         * By the way, it is recommended calling the API function named
1666         * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
1667         * using the rte_eth_dev_mac_addr_add API function to set MC mac address
1668         * may affect the specifications of UC mac addresses.
1669         */
1670        if (rte_is_multicast_ether_addr(mac_addr))
1671                ret = hns3_add_mc_addr_common(hw, mac_addr);
1672        else
1673                ret = hns3_add_uc_addr_common(hw, mac_addr);
1674
1675        if (ret) {
1676                rte_spinlock_unlock(&hw->lock);
1677                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1678                                      mac_addr);
1679                hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
1680                         ret);
1681                return ret;
1682        }
1683
1684        if (idx == 0)
1685                hw->mac.default_addr_setted = true;
1686        rte_spinlock_unlock(&hw->lock);
1687
1688        return ret;
1689}
1690
1691static int
1692hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1693{
1694        struct hns3_mac_vlan_tbl_entry_cmd req;
1695        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1696        int ret;
1697
1698        /* check if mac addr is valid */
1699        if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1700                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1701                                      mac_addr);
1702                hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1703                         mac_str);
1704                return -EINVAL;
1705        }
1706
1707        memset(&req, 0, sizeof(req));
1708        hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1709        hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1710        ret = hns3_remove_mac_vlan_tbl(hw, &req);
1711        if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1712                return 0;
1713        else if (ret == 0)
1714                hns3_update_umv_space(hw, true);
1715
1716        return ret;
1717}
1718
1719static void
1720hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
1721{
1722        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723        /* index will be checked by upper level rte interface */
1724        struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
1725        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1726        int ret;
1727
1728        rte_spinlock_lock(&hw->lock);
1729
1730        if (rte_is_multicast_ether_addr(mac_addr))
1731                ret = hns3_remove_mc_addr_common(hw, mac_addr);
1732        else
1733                ret = hns3_remove_uc_addr_common(hw, mac_addr);
1734        rte_spinlock_unlock(&hw->lock);
1735        if (ret) {
1736                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1737                                      mac_addr);
1738                hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
1739                         ret);
1740        }
1741}
1742
1743static int
1744hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1745                          struct rte_ether_addr *mac_addr)
1746{
1747        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1748        struct rte_ether_addr *oaddr;
1749        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1750        bool default_addr_setted;
1751        int ret, ret_val;
1752
1753        /*
1754         * It has been guaranteed that input parameter named mac_addr is valid
1755         * address in the rte layer of DPDK framework.
1756         */
1757        oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1758        default_addr_setted = hw->mac.default_addr_setted;
1759        if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr))
1760                return 0;
1761
1762        rte_spinlock_lock(&hw->lock);
1763        if (default_addr_setted) {
1764                ret = hns3_remove_uc_addr_common(hw, oaddr);
1765                if (ret) {
1766                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1767                                              oaddr);
1768                        hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1769                                  mac_str, ret);
1770
1771                        rte_spinlock_unlock(&hw->lock);
1772                        return ret;
1773                }
1774        }
1775
1776        ret = hns3_add_uc_addr_common(hw, mac_addr);
1777        if (ret) {
1778                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1779                                      mac_addr);
1780                hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1781                goto err_add_uc_addr;
1782        }
1783
1784        ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1785        if (ret) {
1786                hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1787                goto err_pause_addr_cfg;
1788        }
1789
1790        rte_ether_addr_copy(mac_addr,
1791                            (struct rte_ether_addr *)hw->mac.mac_addr);
1792        hw->mac.default_addr_setted = true;
1793        rte_spinlock_unlock(&hw->lock);
1794
1795        return 0;
1796
1797err_pause_addr_cfg:
1798        ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
1799        if (ret_val) {
1800                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1801                                      mac_addr);
1802                hns3_warn(hw,
1803                          "Failed to roll back to del setted mac addr(%s): %d",
1804                          mac_str, ret_val);
1805        }
1806
1807err_add_uc_addr:
1808        ret_val = hns3_add_uc_addr_common(hw, oaddr);
1809        if (ret_val) {
1810                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
1811                hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
1812                                  mac_str, ret_val);
1813                hw->mac.default_addr_setted = false;
1814        }
1815        rte_spinlock_unlock(&hw->lock);
1816
1817        return ret;
1818}
1819
1820static int
1821hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
1822{
1823        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1824        struct hns3_hw *hw = &hns->hw;
1825        struct rte_ether_addr *addr;
1826        int err = 0;
1827        int ret;
1828        int i;
1829
1830        for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
1831                addr = &hw->data->mac_addrs[i];
1832                if (rte_is_zero_ether_addr(addr))
1833                        continue;
1834                if (rte_is_multicast_ether_addr(addr))
1835                        ret = del ? hns3_remove_mc_addr(hw, addr) :
1836                              hns3_add_mc_addr(hw, addr);
1837                else
1838                        ret = del ? hns3_remove_uc_addr_common(hw, addr) :
1839                              hns3_add_uc_addr_common(hw, addr);
1840
1841                if (ret) {
1842                        err = ret;
1843                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1844                                              addr);
1845                        hns3_err(hw, "failed to %s mac addr(%s) index:%d "
1846                                 "ret = %d.", del ? "remove" : "restore",
1847                                 mac_str, i, ret);
1848                }
1849        }
1850        return err;
1851}
1852
1853static void
1854hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1855{
1856#define HNS3_VF_NUM_IN_FIRST_DESC 192
1857        uint8_t word_num;
1858        uint8_t bit_num;
1859
1860        if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1861                word_num = vfid / 32;
1862                bit_num = vfid % 32;
1863                if (clr)
1864                        desc[1].data[word_num] &=
1865                            rte_cpu_to_le_32(~(1UL << bit_num));
1866                else
1867                        desc[1].data[word_num] |=
1868                            rte_cpu_to_le_32(1UL << bit_num);
1869        } else {
1870                word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1871                bit_num = vfid % 32;
1872                if (clr)
1873                        desc[2].data[word_num] &=
1874                            rte_cpu_to_le_32(~(1UL << bit_num));
1875                else
1876                        desc[2].data[word_num] |=
1877                            rte_cpu_to_le_32(1UL << bit_num);
1878        }
1879}
1880
1881static int
1882hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1883{
1884        struct hns3_mac_vlan_tbl_entry_cmd req;
1885        struct hns3_cmd_desc desc[3];
1886        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1887        uint8_t vf_id;
1888        int ret;
1889
1890        /* Check if mac addr is valid */
1891        if (!rte_is_multicast_ether_addr(mac_addr)) {
1892                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1893                                      mac_addr);
1894                hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1895                         mac_str);
1896                return -EINVAL;
1897        }
1898
1899        memset(&req, 0, sizeof(req));
1900        hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1901        hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1902        ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1903        if (ret) {
1904                /* This mac addr do not exist, add new entry for it */
1905                memset(desc[0].data, 0, sizeof(desc[0].data));
1906                memset(desc[1].data, 0, sizeof(desc[0].data));
1907                memset(desc[2].data, 0, sizeof(desc[0].data));
1908        }
1909
1910        /*
1911         * In current version VF is not supported when PF is driven by DPDK
1912         * driver, just need to configure parameters for PF vport.
1913         */
1914        vf_id = HNS3_PF_FUNC_ID;
1915        hns3_update_desc_vfid(desc, vf_id, false);
1916        ret = hns3_add_mac_vlan_tbl(hw, &req, desc);
1917        if (ret) {
1918                if (ret == -ENOSPC)
1919                        hns3_err(hw, "mc mac vlan table is full");
1920                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1921                                      mac_addr);
1922                hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1923        }
1924
1925        return ret;
1926}
1927
1928static int
1929hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1930{
1931        struct hns3_mac_vlan_tbl_entry_cmd req;
1932        struct hns3_cmd_desc desc[3];
1933        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1934        uint8_t vf_id;
1935        int ret;
1936
1937        /* Check if mac addr is valid */
1938        if (!rte_is_multicast_ether_addr(mac_addr)) {
1939                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1940                                      mac_addr);
1941                hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1942                         mac_str);
1943                return -EINVAL;
1944        }
1945
1946        memset(&req, 0, sizeof(req));
1947        hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1948        hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1949        ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1950        if (ret == 0) {
1951                /*
1952                 * This mac addr exist, remove this handle's VFID for it.
1953                 * In current version VF is not supported when PF is driven by
1954                 * DPDK driver, just need to configure parameters for PF vport.
1955                 */
1956                vf_id = HNS3_PF_FUNC_ID;
1957                hns3_update_desc_vfid(desc, vf_id, true);
1958
1959                /* All the vfid is zero, so need to delete this entry */
1960                ret = hns3_remove_mac_vlan_tbl(hw, &req);
1961        } else if (ret == -ENOENT) {
1962                /* This mac addr doesn't exist. */
1963                return 0;
1964        }
1965
1966        if (ret) {
1967                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1968                                      mac_addr);
1969                hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1970        }
1971
1972        return ret;
1973}
1974
1975static int
1976hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
1977                           struct rte_ether_addr *mc_addr_set,
1978                           uint32_t nb_mc_addr)
1979{
1980        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1981        struct rte_ether_addr *addr;
1982        uint32_t i;
1983        uint32_t j;
1984
1985        if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
1986                hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
1987                         "invalid. valid range: 0~%d",
1988                         nb_mc_addr, HNS3_MC_MACADDR_NUM);
1989                return -EINVAL;
1990        }
1991
1992        /* Check if input mac addresses are valid */
1993        for (i = 0; i < nb_mc_addr; i++) {
1994                addr = &mc_addr_set[i];
1995                if (!rte_is_multicast_ether_addr(addr)) {
1996                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1997                                              addr);
1998                        hns3_err(hw,
1999                                 "failed to set mc mac addr, addr(%s) invalid.",
2000                                 mac_str);
2001                        return -EINVAL;
2002                }
2003
2004                /* Check if there are duplicate addresses */
2005                for (j = i + 1; j < nb_mc_addr; j++) {
2006                        if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
2007                                hns3_ether_format_addr(mac_str,
2008                                                      RTE_ETHER_ADDR_FMT_SIZE,
2009                                                      addr);
2010                                hns3_err(hw, "failed to set mc mac addr, "
2011                                         "addrs invalid. two same addrs(%s).",
2012                                         mac_str);
2013                                return -EINVAL;
2014                        }
2015                }
2016
2017                /*
2018                 * Check if there are duplicate addresses between mac_addrs
2019                 * and mc_addr_set
2020                 */
2021                for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) {
2022                        if (rte_is_same_ether_addr(addr,
2023                                                   &hw->data->mac_addrs[j])) {
2024                                hns3_ether_format_addr(mac_str,
2025                                                      RTE_ETHER_ADDR_FMT_SIZE,
2026                                                      addr);
2027                                hns3_err(hw, "failed to set mc mac addr, "
2028                                         "addrs invalid. addrs(%s) has already "
2029                                         "configured in mac_addr add API",
2030                                         mac_str);
2031                                return -EINVAL;
2032                        }
2033                }
2034        }
2035
2036        return 0;
2037}
2038
2039static void
2040hns3_set_mc_addr_calc_addr(struct hns3_hw *hw,
2041                           struct rte_ether_addr *mc_addr_set,
2042                           int mc_addr_num,
2043                           struct rte_ether_addr *reserved_addr_list,
2044                           int *reserved_addr_num,
2045                           struct rte_ether_addr *add_addr_list,
2046                           int *add_addr_num,
2047                           struct rte_ether_addr *rm_addr_list,
2048                           int *rm_addr_num)
2049{
2050        struct rte_ether_addr *addr;
2051        int current_addr_num;
2052        int reserved_num = 0;
2053        int add_num = 0;
2054        int rm_num = 0;
2055        int num;
2056        int i;
2057        int j;
2058        bool same_addr;
2059
2060        /* Calculate the mc mac address list that should be removed */
2061        current_addr_num = hw->mc_addrs_num;
2062        for (i = 0; i < current_addr_num; i++) {
2063                addr = &hw->mc_addrs[i];
2064                same_addr = false;
2065                for (j = 0; j < mc_addr_num; j++) {
2066                        if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
2067                                same_addr = true;
2068                                break;
2069                        }
2070                }
2071
2072                if (!same_addr) {
2073                        rte_ether_addr_copy(addr, &rm_addr_list[rm_num]);
2074                        rm_num++;
2075                } else {
2076                        rte_ether_addr_copy(addr,
2077                                            &reserved_addr_list[reserved_num]);
2078                        reserved_num++;
2079                }
2080        }
2081
2082        /* Calculate the mc mac address list that should be added */
2083        for (i = 0; i < mc_addr_num; i++) {
2084                addr = &mc_addr_set[i];
2085                same_addr = false;
2086                for (j = 0; j < current_addr_num; j++) {
2087                        if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) {
2088                                same_addr = true;
2089                                break;
2090                        }
2091                }
2092
2093                if (!same_addr) {
2094                        rte_ether_addr_copy(addr, &add_addr_list[add_num]);
2095                        add_num++;
2096                }
2097        }
2098
2099        /* Reorder the mc mac address list maintained by driver */
2100        for (i = 0; i < reserved_num; i++)
2101                rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]);
2102
2103        for (i = 0; i < rm_num; i++) {
2104                num = reserved_num + i;
2105                rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]);
2106        }
2107
2108        *reserved_addr_num = reserved_num;
2109        *add_addr_num = add_num;
2110        *rm_addr_num = rm_num;
2111}
2112
2113static int
2114hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
2115                          struct rte_ether_addr *mc_addr_set,
2116                          uint32_t nb_mc_addr)
2117{
2118        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2119        struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM];
2120        struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM];
2121        struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM];
2122        struct rte_ether_addr *addr;
2123        int reserved_addr_num;
2124        int add_addr_num;
2125        int rm_addr_num;
2126        int mc_addr_num;
2127        int num;
2128        int ret;
2129        int i;
2130
2131        /* Check if input parameters are valid */
2132        ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
2133        if (ret)
2134                return ret;
2135
2136        rte_spinlock_lock(&hw->lock);
2137
2138        /*
2139         * Calculate the mc mac address lists those should be removed and be
2140         * added, Reorder the mc mac address list maintained by driver.
2141         */
2142        mc_addr_num = (int)nb_mc_addr;
2143        hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num,
2144                                   reserved_addr_list, &reserved_addr_num,
2145                                   add_addr_list, &add_addr_num,
2146                                   rm_addr_list, &rm_addr_num);
2147
2148        /* Remove mc mac addresses */
2149        for (i = 0; i < rm_addr_num; i++) {
2150                num = rm_addr_num - i - 1;
2151                addr = &rm_addr_list[num];
2152                ret = hns3_remove_mc_addr(hw, addr);
2153                if (ret) {
2154                        rte_spinlock_unlock(&hw->lock);
2155                        return ret;
2156                }
2157                hw->mc_addrs_num--;
2158        }
2159
2160        /* Add mc mac addresses */
2161        for (i = 0; i < add_addr_num; i++) {
2162                addr = &add_addr_list[i];
2163                ret = hns3_add_mc_addr(hw, addr);
2164                if (ret) {
2165                        rte_spinlock_unlock(&hw->lock);
2166                        return ret;
2167                }
2168
2169                num = reserved_addr_num + i;
2170                rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
2171                hw->mc_addrs_num++;
2172        }
2173        rte_spinlock_unlock(&hw->lock);
2174
2175        return 0;
2176}
2177
2178static int
2179hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
2180{
2181        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2182        struct hns3_hw *hw = &hns->hw;
2183        struct rte_ether_addr *addr;
2184        int err = 0;
2185        int ret;
2186        int i;
2187
2188        for (i = 0; i < hw->mc_addrs_num; i++) {
2189                addr = &hw->mc_addrs[i];
2190                if (!rte_is_multicast_ether_addr(addr))
2191                        continue;
2192                if (del)
2193                        ret = hns3_remove_mc_addr(hw, addr);
2194                else
2195                        ret = hns3_add_mc_addr(hw, addr);
2196                if (ret) {
2197                        err = ret;
2198                        hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2199                                              addr);
2200                        hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d",
2201                                 del ? "Remove" : "Restore", mac_str, ret);
2202                }
2203        }
2204        return err;
2205}
2206
2207static int
2208hns3_check_mq_mode(struct rte_eth_dev *dev)
2209{
2210        enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
2211        enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
2212        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2213        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2214        struct rte_eth_dcb_rx_conf *dcb_rx_conf;
2215        struct rte_eth_dcb_tx_conf *dcb_tx_conf;
2216        uint8_t num_tc;
2217        int max_tc = 0;
2218        int i;
2219
2220        if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) ||
2221            (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB ||
2222             tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) {
2223                hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
2224                         rx_mq_mode, tx_mq_mode);
2225                return -EOPNOTSUPP;
2226        }
2227
2228        dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
2229        dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
2230        if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
2231                if (dcb_rx_conf->nb_tcs > pf->tc_max) {
2232                        hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
2233                                 dcb_rx_conf->nb_tcs, pf->tc_max);
2234                        return -EINVAL;
2235                }
2236
2237                if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
2238                      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
2239                        hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
2240                                 "nb_tcs(%d) != %d or %d in rx direction.",
2241                                 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
2242                        return -EINVAL;
2243                }
2244
2245                if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
2246                        hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
2247                                 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
2248                        return -EINVAL;
2249                }
2250
2251                for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
2252                        if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
2253                                hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
2254                                         "is not equal to one in tx direction.",
2255                                         i, dcb_rx_conf->dcb_tc[i]);
2256                                return -EINVAL;
2257                        }
2258                        if (dcb_rx_conf->dcb_tc[i] > max_tc)
2259                                max_tc = dcb_rx_conf->dcb_tc[i];
2260                }
2261
2262                num_tc = max_tc + 1;
2263                if (num_tc > dcb_rx_conf->nb_tcs) {
2264                        hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
2265                                 num_tc, dcb_rx_conf->nb_tcs);
2266                        return -EINVAL;
2267                }
2268        }
2269
2270        return 0;
2271}
2272
2273static int
2274hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
2275                           enum hns3_ring_type queue_type, uint16_t queue_id)
2276{
2277        struct hns3_cmd_desc desc;
2278        struct hns3_ctrl_vector_chain_cmd *req =
2279                (struct hns3_ctrl_vector_chain_cmd *)desc.data;
2280        enum hns3_opcode_type op;
2281        uint16_t tqp_type_and_id = 0;
2282        uint16_t type;
2283        uint16_t gl;
2284        int ret;
2285
2286        op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
2287        hns3_cmd_setup_basic_desc(&desc, op, false);
2288        req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
2289                                              HNS3_TQP_INT_ID_L_S);
2290        req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
2291                                              HNS3_TQP_INT_ID_H_S);
2292
2293        if (queue_type == HNS3_RING_TYPE_RX)
2294                gl = HNS3_RING_GL_RX;
2295        else
2296                gl = HNS3_RING_GL_TX;
2297
2298        type = queue_type;
2299
2300        hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
2301                       type);
2302        hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
2303        hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
2304                       gl);
2305        req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
2306        req->int_cause_num = 1;
2307        ret = hns3_cmd_send(hw, &desc, 1);
2308        if (ret) {
2309                hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.",
2310                         en ? "Map" : "Unmap", queue_id, vector_id, ret);
2311                return ret;
2312        }
2313
2314        return 0;
2315}
2316
2317static int
2318hns3_init_ring_with_vector(struct hns3_hw *hw)
2319{
2320        uint16_t vec;
2321        int ret;
2322        int i;
2323
2324        /*
2325         * In hns3 network engine, vector 0 is always the misc interrupt of this
2326         * function, vector 1~N can be used respectively for the queues of the
2327         * function. Tx and Rx queues with the same number share the interrupt
2328         * vector. In the initialization clearing the all hardware mapping
2329         * relationship configurations between queues and interrupt vectors is
2330         * needed, so some error caused by the residual configurations, such as
2331         * the unexpected Tx interrupt, can be avoid.
2332         */
2333        vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
2334        if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
2335                vec = vec - 1; /* the last interrupt is reserved */
2336        hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
2337        for (i = 0; i < hw->intr_tqps_num; i++) {
2338                /*
2339                 * Set gap limiter/rate limiter/quanity limiter algorithm
2340                 * configuration for interrupt coalesce of queue's interrupt.
2341                 */
2342                hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
2343                                       HNS3_TQP_INTR_GL_DEFAULT);
2344                hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
2345                                       HNS3_TQP_INTR_GL_DEFAULT);
2346                hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
2347                /*
2348                 * QL(quantity limiter) is not used currently, just set 0 to
2349                 * close it.
2350                 */
2351                hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
2352
2353                ret = hns3_bind_ring_with_vector(hw, vec, false,
2354                                                 HNS3_RING_TYPE_TX, i);
2355                if (ret) {
2356                        PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
2357                                          "vector: %u, ret=%d", i, vec, ret);
2358                        return ret;
2359                }
2360
2361                ret = hns3_bind_ring_with_vector(hw, vec, false,
2362                                                 HNS3_RING_TYPE_RX, i);
2363                if (ret) {
2364                        PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
2365                                          "vector: %u, ret=%d", i, vec, ret);
2366                        return ret;
2367                }
2368        }
2369
2370        return 0;
2371}
2372
2373static int
2374hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
2375{
2376        struct hns3_adapter *hns = dev->data->dev_private;
2377        struct hns3_hw *hw = &hns->hw;
2378        uint32_t max_rx_pkt_len;
2379        uint16_t mtu;
2380        int ret;
2381
2382        if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
2383                return 0;
2384
2385        /*
2386         * If jumbo frames are enabled, MTU needs to be refreshed
2387         * according to the maximum RX packet length.
2388         */
2389        max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
2390        if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
2391            max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
2392                hns3_err(hw, "maximum Rx packet length must be greater than %u "
2393                         "and no more than %u when jumbo frame enabled.",
2394                         (uint16_t)HNS3_DEFAULT_FRAME_LEN,
2395                         (uint16_t)HNS3_MAX_FRAME_LEN);
2396                return -EINVAL;
2397        }
2398
2399        mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
2400        ret = hns3_dev_mtu_set(dev, mtu);
2401        if (ret)
2402                return ret;
2403        dev->data->mtu = mtu;
2404
2405        return 0;
2406}
2407
2408static int
2409hns3_setup_dcb(struct rte_eth_dev *dev)
2410{
2411        struct hns3_adapter *hns = dev->data->dev_private;
2412        struct hns3_hw *hw = &hns->hw;
2413        int ret;
2414
2415        if (!hns3_dev_dcb_supported(hw)) {
2416                hns3_err(hw, "this port does not support dcb configurations.");
2417                return -EOPNOTSUPP;
2418        }
2419
2420        if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
2421                hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
2422                return -EOPNOTSUPP;
2423        }
2424
2425        ret = hns3_dcb_configure(hns);
2426        if (ret)
2427                hns3_err(hw, "failed to config dcb: %d", ret);
2428
2429        return ret;
2430}
2431
2432static int
2433hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
2434{
2435        int ret;
2436
2437        /*
2438         * Some hardware doesn't support auto-negotiation, but users may not
2439         * configure link_speeds (default 0), which means auto-negotiation.
2440         * In this case, it should return success.
2441         */
2442        if (link_speeds == ETH_LINK_SPEED_AUTONEG &&
2443            hw->mac.support_autoneg == 0)
2444                return 0;
2445
2446        if (link_speeds != ETH_LINK_SPEED_AUTONEG) {
2447                ret = hns3_check_port_speed(hw, link_speeds);
2448                if (ret)
2449                        return ret;
2450        }
2451
2452        return 0;
2453}
2454
2455static int
2456hns3_check_dev_conf(struct rte_eth_dev *dev)
2457{
2458        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2459        struct rte_eth_conf *conf = &dev->data->dev_conf;
2460        int ret;
2461
2462        ret = hns3_check_mq_mode(dev);
2463        if (ret)
2464                return ret;
2465
2466        return hns3_check_link_speed(hw, conf->link_speeds);
2467}
2468
2469static int
2470hns3_dev_configure(struct rte_eth_dev *dev)
2471{
2472        struct hns3_adapter *hns = dev->data->dev_private;
2473        struct rte_eth_conf *conf = &dev->data->dev_conf;
2474        enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
2475        struct hns3_hw *hw = &hns->hw;
2476        uint16_t nb_rx_q = dev->data->nb_rx_queues;
2477        uint16_t nb_tx_q = dev->data->nb_tx_queues;
2478        struct rte_eth_rss_conf rss_conf;
2479        bool gro_en;
2480        int ret;
2481
2482        hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
2483
2484        /*
2485         * Some versions of hardware network engine does not support
2486         * individually enable/disable/reset the Tx or Rx queue. These devices
2487         * must enable/disable/reset Tx and Rx queues at the same time. When the
2488         * numbers of Tx queues allocated by upper applications are not equal to
2489         * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
2490         * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
2491         * work as usual. But these fake queues are imperceptible, and can not
2492         * be used by upper applications.
2493         */
2494        ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2495        if (ret) {
2496                hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
2497                hw->cfg_max_queues = 0;
2498                return ret;
2499        }
2500
2501        hw->adapter_state = HNS3_NIC_CONFIGURING;
2502        ret = hns3_check_dev_conf(dev);
2503        if (ret)
2504                goto cfg_err;
2505
2506        if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
2507                ret = hns3_setup_dcb(dev);
2508                if (ret)
2509                        goto cfg_err;
2510        }
2511
2512        /* When RSS is not configured, redirect the packet queue 0 */
2513        if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
2514                conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
2515                rss_conf = conf->rx_adv_conf.rss_conf;
2516                hw->rss_dis_flag = false;
2517                ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2518                if (ret)
2519                        goto cfg_err;
2520        }
2521
2522        ret = hns3_refresh_mtu(dev, conf);
2523        if (ret)
2524                goto cfg_err;
2525
2526        ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
2527        if (ret)
2528                goto cfg_err;
2529
2530        ret = hns3_dev_configure_vlan(dev);
2531        if (ret)
2532                goto cfg_err;
2533
2534        /* config hardware GRO */
2535        gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2536        ret = hns3_config_gro(hw, gro_en);
2537        if (ret)
2538                goto cfg_err;
2539
2540        hns3_init_rx_ptype_tble(dev);
2541        hw->adapter_state = HNS3_NIC_CONFIGURED;
2542
2543        return 0;
2544
2545cfg_err:
2546        hw->cfg_max_queues = 0;
2547        (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2548        hw->adapter_state = HNS3_NIC_INITIALIZED;
2549
2550        return ret;
2551}
2552
2553static int
2554hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2555{
2556        struct hns3_config_max_frm_size_cmd *req;
2557        struct hns3_cmd_desc desc;
2558
2559        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2560
2561        req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2562        req->max_frm_size = rte_cpu_to_le_16(new_mps);
2563        req->min_frm_size = RTE_ETHER_MIN_LEN;
2564
2565        return hns3_cmd_send(hw, &desc, 1);
2566}
2567
2568static int
2569hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2570{
2571        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2572        uint16_t original_mps = hns->pf.mps;
2573        int err;
2574        int ret;
2575
2576        ret = hns3_set_mac_mtu(hw, mps);
2577        if (ret) {
2578                hns3_err(hw, "failed to set mtu, ret = %d", ret);
2579                return ret;
2580        }
2581
2582        hns->pf.mps = mps;
2583        ret = hns3_buffer_alloc(hw);
2584        if (ret) {
2585                hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
2586                goto rollback;
2587        }
2588
2589        return 0;
2590
2591rollback:
2592        err = hns3_set_mac_mtu(hw, original_mps);
2593        if (err) {
2594                hns3_err(hw, "fail to rollback MTU, err = %d", err);
2595                return ret;
2596        }
2597        hns->pf.mps = original_mps;
2598
2599        return ret;
2600}
2601
2602static int
2603hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2604{
2605        struct hns3_adapter *hns = dev->data->dev_private;
2606        uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2607        struct hns3_hw *hw = &hns->hw;
2608        bool is_jumbo_frame;
2609        int ret;
2610
2611        if (dev->data->dev_started) {
2612                hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2613                         "before configuration", dev->data->port_id);
2614                return -EBUSY;
2615        }
2616
2617        rte_spinlock_lock(&hw->lock);
2618        is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false;
2619        frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2620
2621        /*
2622         * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2623         * assign to "uint16_t" type variable.
2624         */
2625        ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2626        if (ret) {
2627                rte_spinlock_unlock(&hw->lock);
2628                hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2629                         dev->data->port_id, mtu, ret);
2630                return ret;
2631        }
2632
2633        if (is_jumbo_frame)
2634                dev->data->dev_conf.rxmode.offloads |=
2635                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
2636        else
2637                dev->data->dev_conf.rxmode.offloads &=
2638                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2639        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2640        rte_spinlock_unlock(&hw->lock);
2641
2642        return 0;
2643}
2644
2645static uint32_t
2646hns3_get_copper_port_speed_capa(uint32_t supported_speed)
2647{
2648        uint32_t speed_capa = 0;
2649
2650        if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
2651                speed_capa |= ETH_LINK_SPEED_10M_HD;
2652        if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
2653                speed_capa |= ETH_LINK_SPEED_10M;
2654        if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
2655                speed_capa |= ETH_LINK_SPEED_100M_HD;
2656        if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
2657                speed_capa |= ETH_LINK_SPEED_100M;
2658        if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
2659                speed_capa |= ETH_LINK_SPEED_1G;
2660
2661        return speed_capa;
2662}
2663
2664static uint32_t
2665hns3_get_firber_port_speed_capa(uint32_t supported_speed)
2666{
2667        uint32_t speed_capa = 0;
2668
2669        if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
2670                speed_capa |= ETH_LINK_SPEED_1G;
2671        if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
2672                speed_capa |= ETH_LINK_SPEED_10G;
2673        if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
2674                speed_capa |= ETH_LINK_SPEED_25G;
2675        if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
2676                speed_capa |= ETH_LINK_SPEED_40G;
2677        if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
2678                speed_capa |= ETH_LINK_SPEED_50G;
2679        if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
2680                speed_capa |= ETH_LINK_SPEED_100G;
2681        if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
2682                speed_capa |= ETH_LINK_SPEED_200G;
2683
2684        return speed_capa;
2685}
2686
2687static uint32_t
2688hns3_get_speed_capa(struct hns3_hw *hw)
2689{
2690        struct hns3_mac *mac = &hw->mac;
2691        uint32_t speed_capa;
2692
2693        if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
2694                speed_capa =
2695                        hns3_get_copper_port_speed_capa(mac->supported_speed);
2696        else
2697                speed_capa =
2698                        hns3_get_firber_port_speed_capa(mac->supported_speed);
2699
2700        if (mac->support_autoneg == 0)
2701                speed_capa |= ETH_LINK_SPEED_FIXED;
2702
2703        return speed_capa;
2704}
2705
2706int
2707hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
2708{
2709        struct hns3_adapter *hns = eth_dev->data->dev_private;
2710        struct hns3_hw *hw = &hns->hw;
2711        uint16_t queue_num = hw->tqps_num;
2712
2713        /*
2714         * In interrupt mode, 'max_rx_queues' is set based on the number of
2715         * MSI-X interrupt resources of the hardware.
2716         */
2717        if (hw->data->dev_conf.intr_conf.rxq == 1)
2718                queue_num = hw->intr_tqps_num;
2719
2720        info->max_rx_queues = queue_num;
2721        info->max_tx_queues = hw->tqps_num;
2722        info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
2723        info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
2724        info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
2725        info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
2726        info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
2727        info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
2728                                 DEV_RX_OFFLOAD_TCP_CKSUM |
2729                                 DEV_RX_OFFLOAD_UDP_CKSUM |
2730                                 DEV_RX_OFFLOAD_SCTP_CKSUM |
2731                                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2732                                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
2733                                 DEV_RX_OFFLOAD_KEEP_CRC |
2734                                 DEV_RX_OFFLOAD_SCATTER |
2735                                 DEV_RX_OFFLOAD_VLAN_STRIP |
2736                                 DEV_RX_OFFLOAD_VLAN_FILTER |
2737                                 DEV_RX_OFFLOAD_JUMBO_FRAME |
2738                                 DEV_RX_OFFLOAD_RSS_HASH |
2739                                 DEV_RX_OFFLOAD_TCP_LRO);
2740        info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2741                                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2742                                 DEV_TX_OFFLOAD_TCP_CKSUM |
2743                                 DEV_TX_OFFLOAD_UDP_CKSUM |
2744                                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2745                                 DEV_TX_OFFLOAD_MULTI_SEGS |
2746                                 DEV_TX_OFFLOAD_TCP_TSO |
2747                                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2748                                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2749                                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
2750                                 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
2751                                 hns3_txvlan_cap_get(hw));
2752
2753        if (hns3_dev_outer_udp_cksum_supported(hw))
2754                info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
2755
2756        if (hns3_dev_indep_txrx_supported(hw))
2757                info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
2758                                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
2759
2760        if (hns3_dev_ptp_supported(hw))
2761                info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
2762
2763        info->rx_desc_lim = (struct rte_eth_desc_lim) {
2764                .nb_max = HNS3_MAX_RING_DESC,
2765                .nb_min = HNS3_MIN_RING_DESC,
2766                .nb_align = HNS3_ALIGN_RING_DESC,
2767        };
2768
2769        info->tx_desc_lim = (struct rte_eth_desc_lim) {
2770                .nb_max = HNS3_MAX_RING_DESC,
2771                .nb_min = HNS3_MIN_RING_DESC,
2772                .nb_align = HNS3_ALIGN_RING_DESC,
2773                .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
2774                .nb_mtu_seg_max = hw->max_non_tso_bd_num,
2775        };
2776
2777        info->speed_capa = hns3_get_speed_capa(hw);
2778        info->default_rxconf = (struct rte_eth_rxconf) {
2779                .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
2780                /*
2781                 * If there are no available Rx buffer descriptors, incoming
2782                 * packets are always dropped by hardware based on hns3 network
2783                 * engine.
2784                 */
2785                .rx_drop_en = 1,
2786                .offloads = 0,
2787        };
2788        info->default_txconf = (struct rte_eth_txconf) {
2789                .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
2790                .offloads = 0,
2791        };
2792
2793        info->reta_size = hw->rss_ind_tbl_size;
2794        info->hash_key_size = HNS3_RSS_KEY_SIZE;
2795        info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
2796
2797        info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2798        info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2799        info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2800        info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2801        info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2802        info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2803
2804        return 0;
2805}
2806
2807static int
2808hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2809                    size_t fw_size)
2810{
2811        struct hns3_adapter *hns = eth_dev->data->dev_private;
2812        struct hns3_hw *hw = &hns->hw;
2813        uint32_t version = hw->fw_version;
2814        int ret;
2815
2816        ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
2817                       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
2818                                      HNS3_FW_VERSION_BYTE3_S),
2819                       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
2820                                      HNS3_FW_VERSION_BYTE2_S),
2821                       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
2822                                      HNS3_FW_VERSION_BYTE1_S),
2823                       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
2824                                      HNS3_FW_VERSION_BYTE0_S));
2825        if (ret < 0)
2826                return -EINVAL;
2827
2828        ret += 1; /* add the size of '\0' */
2829        if (fw_size < (size_t)ret)
2830                return ret;
2831        else
2832                return 0;
2833}
2834
2835static int
2836hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
2837{
2838        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2839        int ret;
2840
2841        (void)hns3_update_link_status(hw);
2842
2843        ret = hns3_update_link_info(eth_dev);
2844        if (ret)
2845                hw->mac.link_status = ETH_LINK_DOWN;
2846
2847        return ret;
2848}
2849
2850static void
2851hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
2852                      struct rte_eth_link *new_link)
2853{
2854        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2855        struct hns3_mac *mac = &hw->mac;
2856
2857        switch (mac->link_speed) {
2858        case ETH_SPEED_NUM_10M:
2859        case ETH_SPEED_NUM_100M:
2860        case ETH_SPEED_NUM_1G:
2861        case ETH_SPEED_NUM_10G:
2862        case ETH_SPEED_NUM_25G:
2863        case ETH_SPEED_NUM_40G:
2864        case ETH_SPEED_NUM_50G:
2865        case ETH_SPEED_NUM_100G:
2866        case ETH_SPEED_NUM_200G:
2867                if (mac->link_status)
2868                        new_link->link_speed = mac->link_speed;
2869                break;
2870        default:
2871                if (mac->link_status)
2872                        new_link->link_speed = ETH_SPEED_NUM_UNKNOWN;
2873                break;
2874        }
2875
2876        if (!mac->link_status)
2877                new_link->link_speed = ETH_SPEED_NUM_NONE;
2878
2879        new_link->link_duplex = mac->link_duplex;
2880        new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
2881        new_link->link_autoneg = mac->link_autoneg;
2882}
2883
2884static int
2885hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
2886{
2887#define HNS3_LINK_CHECK_INTERVAL 100  /* 100ms */
2888#define HNS3_MAX_LINK_CHECK_TIMES 20  /* 2s (100 * 20ms) in total */
2889
2890        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2891        uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
2892        struct hns3_mac *mac = &hw->mac;
2893        struct rte_eth_link new_link;
2894        int ret;
2895
2896        /* When port is stopped, report link down. */
2897        if (eth_dev->data->dev_started == 0) {
2898                new_link.link_autoneg = mac->link_autoneg;
2899                new_link.link_duplex = mac->link_duplex;
2900                new_link.link_speed = ETH_SPEED_NUM_NONE;
2901                new_link.link_status = ETH_LINK_DOWN;
2902                goto out;
2903        }
2904
2905        do {
2906                ret = hns3_update_port_link_info(eth_dev);
2907                if (ret) {
2908                        hns3_err(hw, "failed to get port link info, ret = %d.",
2909                                 ret);
2910                        break;
2911                }
2912
2913                if (!wait_to_complete || mac->link_status == ETH_LINK_UP)
2914                        break;
2915
2916                rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
2917        } while (retry_cnt--);
2918
2919        memset(&new_link, 0, sizeof(new_link));
2920        hns3_setup_linkstatus(eth_dev, &new_link);
2921
2922out:
2923        return rte_eth_linkstatus_set(eth_dev, &new_link);
2924}
2925
2926static int
2927hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2928{
2929        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2930        struct hns3_pf *pf = &hns->pf;
2931
2932        if (!(status->pf_state & HNS3_PF_STATE_DONE))
2933                return -EINVAL;
2934
2935        pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2936
2937        return 0;
2938}
2939
2940static int
2941hns3_query_function_status(struct hns3_hw *hw)
2942{
2943#define HNS3_QUERY_MAX_CNT              10
2944#define HNS3_QUERY_SLEEP_MSCOEND        1
2945        struct hns3_func_status_cmd *req;
2946        struct hns3_cmd_desc desc;
2947        int timeout = 0;
2948        int ret;
2949
2950        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2951        req = (struct hns3_func_status_cmd *)desc.data;
2952
2953        do {
2954                ret = hns3_cmd_send(hw, &desc, 1);
2955                if (ret) {
2956                        PMD_INIT_LOG(ERR, "query function status failed %d",
2957                                     ret);
2958                        return ret;
2959                }
2960
2961                /* Check pf reset is done */
2962                if (req->pf_state)
2963                        break;
2964
2965                rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2966        } while (timeout++ < HNS3_QUERY_MAX_CNT);
2967
2968        return hns3_parse_func_status(hw, req);
2969}
2970
2971static int
2972hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
2973{
2974        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2975        struct hns3_pf *pf = &hns->pf;
2976
2977        if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
2978                /*
2979                 * The total_tqps_num obtained from firmware is maximum tqp
2980                 * numbers of this port, which should be used for PF and VFs.
2981                 * There is no need for pf to have so many tqp numbers in
2982                 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2983                 * coming from config file, is assigned to maximum queue number
2984                 * for the PF of this port by user. So users can modify the
2985                 * maximum queue number of PF according to their own application
2986                 * scenarios, which is more flexible to use. In addition, many
2987                 * memories can be saved due to allocating queue statistics
2988                 * room according to the actual number of queues required. The
2989                 * maximum queue number of PF for network engine with
2990                 * revision_id greater than 0x30 is assigned by config file.
2991                 */
2992                if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
2993                        hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
2994                                 "must be greater than 0.",
2995                                 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
2996                        return -EINVAL;
2997                }
2998
2999                hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
3000                                       hw->total_tqps_num);
3001        } else {
3002                /*
3003                 * Due to the limitation on the number of PF interrupts
3004                 * available, the maximum queue number assigned to PF on
3005                 * the network engine with revision_id 0x21 is 64.
3006                 */
3007                hw->tqps_num = RTE_MIN(hw->total_tqps_num,
3008                                       HNS3_MAX_TQP_NUM_HIP08_PF);
3009        }
3010
3011        return 0;
3012}
3013
3014static int
3015hns3_query_pf_resource(struct hns3_hw *hw)
3016{
3017        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3018        struct hns3_pf *pf = &hns->pf;
3019        struct hns3_pf_res_cmd *req;
3020        struct hns3_cmd_desc desc;
3021        int ret;
3022
3023        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
3024        ret = hns3_cmd_send(hw, &desc, 1);
3025        if (ret) {
3026                PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
3027                return ret;
3028        }
3029
3030        req = (struct hns3_pf_res_cmd *)desc.data;
3031        hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
3032                             rte_le_to_cpu_16(req->ext_tqp_num);
3033        ret = hns3_get_pf_max_tqp_num(hw);
3034        if (ret)
3035                return ret;
3036
3037        pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
3038        pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
3039
3040        if (req->tx_buf_size)
3041                pf->tx_buf_size =
3042                    rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
3043        else
3044                pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
3045
3046        pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
3047
3048        if (req->dv_buf_size)
3049                pf->dv_buf_size =
3050                    rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
3051        else
3052                pf->dv_buf_size = HNS3_DEFAULT_DV;
3053
3054        pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
3055
3056        hw->num_msi =
3057                hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
3058                               HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
3059
3060        return 0;
3061}
3062
3063static void
3064hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
3065{
3066        struct hns3_cfg_param_cmd *req;
3067        uint64_t mac_addr_tmp_high;
3068        uint8_t ext_rss_size_max;
3069        uint64_t mac_addr_tmp;
3070        uint32_t i;
3071
3072        req = (struct hns3_cfg_param_cmd *)desc[0].data;
3073
3074        /* get the configuration */
3075        cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
3076                                     HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
3077        cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
3078                                           HNS3_CFG_TQP_DESC_N_M,
3079                                           HNS3_CFG_TQP_DESC_N_S);
3080
3081        cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
3082                                       HNS3_CFG_PHY_ADDR_M,
3083                                       HNS3_CFG_PHY_ADDR_S);
3084        cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
3085                                         HNS3_CFG_MEDIA_TP_M,
3086                                         HNS3_CFG_MEDIA_TP_S);
3087        cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
3088                                         HNS3_CFG_RX_BUF_LEN_M,
3089                                         HNS3_CFG_RX_BUF_LEN_S);
3090        /* get mac address */
3091        mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
3092        mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
3093                                           HNS3_CFG_MAC_ADDR_H_M,
3094                                           HNS3_CFG_MAC_ADDR_H_S);
3095
3096        mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
3097
3098        cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
3099                                            HNS3_CFG_DEFAULT_SPEED_M,
3100                                            HNS3_CFG_DEFAULT_SPEED_S);
3101        cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
3102                                           HNS3_CFG_RSS_SIZE_M,
3103                                           HNS3_CFG_RSS_SIZE_S);
3104
3105        for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3106                cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
3107
3108        req = (struct hns3_cfg_param_cmd *)desc[1].data;
3109        cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
3110
3111        cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
3112                                            HNS3_CFG_SPEED_ABILITY_M,
3113                                            HNS3_CFG_SPEED_ABILITY_S);
3114        cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
3115                                        HNS3_CFG_UMV_TBL_SPACE_M,
3116                                        HNS3_CFG_UMV_TBL_SPACE_S);
3117        if (!cfg->umv_space)
3118                cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
3119
3120        ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
3121                                               HNS3_CFG_EXT_RSS_SIZE_M,
3122                                               HNS3_CFG_EXT_RSS_SIZE_S);
3123        /*
3124         * Field ext_rss_size_max obtained from firmware will be more flexible
3125         * for future changes and expansions, which is an exponent of 2, instead
3126         * of reading out directly. If this field is not zero, hns3 PF PMD
3127         * driver uses it as rss_size_max under one TC. Device, whose revision
3128         * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
3129         * maximum number of queues supported under a TC through this field.
3130         */
3131        if (ext_rss_size_max)
3132                cfg->rss_size_max = 1U << ext_rss_size_max;
3133}
3134
3135/* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
3136 * @hw: pointer to struct hns3_hw
3137 * @hcfg: the config structure to be getted
3138 */
3139static int
3140hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
3141{
3142        struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
3143        struct hns3_cfg_param_cmd *req;
3144        uint32_t offset;
3145        uint32_t i;
3146        int ret;
3147
3148        for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
3149                offset = 0;
3150                req = (struct hns3_cfg_param_cmd *)desc[i].data;
3151                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
3152                                          true);
3153                hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
3154                               i * HNS3_CFG_RD_LEN_BYTES);
3155                /* Len should be divided by 4 when send to hardware */
3156                hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
3157                               HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
3158                req->offset = rte_cpu_to_le_32(offset);
3159        }
3160
3161        ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
3162        if (ret) {
3163                PMD_INIT_LOG(ERR, "get config failed %d.", ret);
3164                return ret;
3165        }
3166
3167        hns3_parse_cfg(hcfg, desc);
3168
3169        return 0;
3170}
3171
3172static int
3173hns3_parse_speed(int speed_cmd, uint32_t *speed)
3174{
3175        switch (speed_cmd) {
3176        case HNS3_CFG_SPEED_10M:
3177                *speed = ETH_SPEED_NUM_10M;
3178                break;
3179        case HNS3_CFG_SPEED_100M:
3180                *speed = ETH_SPEED_NUM_100M;
3181                break;
3182        case HNS3_CFG_SPEED_1G:
3183                *speed = ETH_SPEED_NUM_1G;
3184                break;
3185        case HNS3_CFG_SPEED_10G:
3186                *speed = ETH_SPEED_NUM_10G;
3187                break;
3188        case HNS3_CFG_SPEED_25G:
3189                *speed = ETH_SPEED_NUM_25G;
3190                break;
3191        case HNS3_CFG_SPEED_40G:
3192                *speed = ETH_SPEED_NUM_40G;
3193                break;
3194        case HNS3_CFG_SPEED_50G:
3195                *speed = ETH_SPEED_NUM_50G;
3196                break;
3197        case HNS3_CFG_SPEED_100G:
3198                *speed = ETH_SPEED_NUM_100G;
3199                break;
3200        case HNS3_CFG_SPEED_200G:
3201                *speed = ETH_SPEED_NUM_200G;
3202                break;
3203        default:
3204                return -EINVAL;
3205        }
3206
3207        return 0;
3208}
3209
3210static void
3211hns3_set_default_dev_specifications(struct hns3_hw *hw)
3212{
3213        hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
3214        hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
3215        hw->rss_key_size = HNS3_RSS_KEY_SIZE;
3216        hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
3217        hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
3218}
3219
3220static void
3221hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
3222{
3223        struct hns3_dev_specs_0_cmd *req0;
3224
3225        req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
3226
3227        hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
3228        hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
3229        hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
3230        hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
3231        hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
3232}
3233
3234static int
3235hns3_check_dev_specifications(struct hns3_hw *hw)
3236{
3237        if (hw->rss_ind_tbl_size == 0 ||
3238            hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
3239                hns3_err(hw, "the size of hash lookup table configured (%u)"
3240                              " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
3241                              HNS3_RSS_IND_TBL_SIZE_MAX);
3242                return -EINVAL;
3243        }
3244
3245        return 0;
3246}
3247
3248static int
3249hns3_query_dev_specifications(struct hns3_hw *hw)
3250{
3251        struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
3252        int ret;
3253        int i;
3254
3255        for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
3256                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
3257                                          true);
3258                desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3259        }
3260        hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
3261
3262        ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
3263        if (ret)
3264                return ret;
3265
3266        hns3_parse_dev_specifications(hw, desc);
3267
3268        return hns3_check_dev_specifications(hw);
3269}
3270
3271static int
3272hns3_get_capability(struct hns3_hw *hw)
3273{
3274        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3275        struct rte_pci_device *pci_dev;
3276        struct hns3_pf *pf = &hns->pf;
3277        struct rte_eth_dev *eth_dev;
3278        uint16_t device_id;
3279        uint8_t revision;
3280        int ret;
3281
3282        eth_dev = &rte_eth_devices[hw->data->port_id];
3283        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3284        device_id = pci_dev->id.device_id;
3285
3286        if (device_id == HNS3_DEV_ID_25GE_RDMA ||
3287            device_id == HNS3_DEV_ID_50GE_RDMA ||
3288            device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
3289            device_id == HNS3_DEV_ID_200G_RDMA)
3290                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
3291
3292        /* Get PCI revision id */
3293        ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
3294                                  HNS3_PCI_REVISION_ID);
3295        if (ret != HNS3_PCI_REVISION_ID_LEN) {
3296                PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
3297                             ret);
3298                return -EIO;
3299        }
3300        hw->revision = revision;
3301
3302        if (revision < PCI_REVISION_ID_HIP09_A) {
3303                hns3_set_default_dev_specifications(hw);
3304                hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
3305                hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
3306                hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
3307                hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
3308                hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
3309                hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
3310                pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
3311                hw->rss_info.ipv6_sctp_offload_supported = false;
3312                hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
3313                pf->support_multi_tc_pause = false;
3314                return 0;
3315        }
3316
3317        ret = hns3_query_dev_specifications(hw);
3318        if (ret) {
3319                PMD_INIT_LOG(ERR,
3320                             "failed to query dev specifications, ret = %d",
3321                             ret);
3322                return ret;
3323        }
3324
3325        hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
3326        hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
3327        hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
3328        hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
3329        hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
3330        hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
3331        pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
3332        hw->rss_info.ipv6_sctp_offload_supported = true;
3333        hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
3334        pf->support_multi_tc_pause = true;
3335
3336        return 0;
3337}
3338
3339static int
3340hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
3341{
3342        int ret;
3343
3344        switch (media_type) {
3345        case HNS3_MEDIA_TYPE_COPPER:
3346                if (!hns3_dev_copper_supported(hw)) {
3347                        PMD_INIT_LOG(ERR,
3348                                     "Media type is copper, not supported.");
3349                        ret = -EOPNOTSUPP;
3350                } else {
3351                        ret = 0;
3352                }
3353                break;
3354        case HNS3_MEDIA_TYPE_FIBER:
3355                ret = 0;
3356                break;
3357        case HNS3_MEDIA_TYPE_BACKPLANE:
3358                PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
3359                ret = -EOPNOTSUPP;
3360                break;
3361        default:
3362                PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
3363                ret = -EINVAL;
3364                break;
3365        }
3366
3367        return ret;
3368}
3369
3370static int
3371hns3_get_board_configuration(struct hns3_hw *hw)
3372{
3373        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3374        struct hns3_pf *pf = &hns->pf;
3375        struct hns3_cfg cfg;
3376        int ret;
3377
3378        ret = hns3_get_board_cfg(hw, &cfg);
3379        if (ret) {
3380                PMD_INIT_LOG(ERR, "get board config failed %d", ret);
3381                return ret;
3382        }
3383
3384        ret = hns3_check_media_type(hw, cfg.media_type);
3385        if (ret)
3386                return ret;
3387
3388        hw->mac.media_type = cfg.media_type;
3389        hw->rss_size_max = cfg.rss_size_max;
3390        hw->rss_dis_flag = false;
3391        memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
3392        hw->mac.phy_addr = cfg.phy_addr;
3393        hw->mac.default_addr_setted = false;
3394        hw->num_tx_desc = cfg.tqp_desc_num;
3395        hw->num_rx_desc = cfg.tqp_desc_num;
3396        hw->dcb_info.num_pg = 1;
3397        hw->dcb_info.hw_pfc_map = 0;
3398
3399        ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
3400        if (ret) {
3401                PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
3402                             cfg.default_speed, ret);
3403                return ret;
3404        }
3405
3406        pf->tc_max = cfg.tc_num;
3407        if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
3408                PMD_INIT_LOG(WARNING,
3409                             "Get TC num(%u) from flash, set TC num to 1",
3410                             pf->tc_max);
3411                pf->tc_max = 1;
3412        }
3413
3414        /* Dev does not support DCB */
3415        if (!hns3_dev_dcb_supported(hw)) {
3416                pf->tc_max = 1;
3417                pf->pfc_max = 0;
3418        } else
3419                pf->pfc_max = pf->tc_max;
3420
3421        hw->dcb_info.num_tc = 1;
3422        hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
3423                                     hw->tqps_num / hw->dcb_info.num_tc);
3424        hns3_set_bit(hw->hw_tc_map, 0, 1);
3425        pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
3426
3427        pf->wanted_umv_size = cfg.umv_space;
3428
3429        return ret;
3430}
3431
3432static int
3433hns3_get_configuration(struct hns3_hw *hw)
3434{
3435        int ret;
3436
3437        ret = hns3_query_function_status(hw);
3438        if (ret) {
3439                PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
3440                return ret;
3441        }
3442
3443        /* Get device capability */
3444        ret = hns3_get_capability(hw);
3445        if (ret) {
3446                PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
3447                return ret;
3448        }
3449
3450        /* Get pf resource */
3451        ret = hns3_query_pf_resource(hw);
3452        if (ret) {
3453                PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
3454                return ret;
3455        }
3456
3457        ret = hns3_get_board_configuration(hw);
3458        if (ret) {
3459                PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
3460                return ret;
3461        }
3462
3463        ret = hns3_query_dev_fec_info(hw);
3464        if (ret)
3465                PMD_INIT_LOG(ERR,
3466                             "failed to query FEC information, ret = %d", ret);
3467
3468        return ret;
3469}
3470
3471static int
3472hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
3473                      uint16_t tqp_vid, bool is_pf)
3474{
3475        struct hns3_tqp_map_cmd *req;
3476        struct hns3_cmd_desc desc;
3477        int ret;
3478
3479        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
3480
3481        req = (struct hns3_tqp_map_cmd *)desc.data;
3482        req->tqp_id = rte_cpu_to_le_16(tqp_pid);
3483        req->tqp_vf = func_id;
3484        req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
3485        if (!is_pf)
3486                req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
3487        req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
3488
3489        ret = hns3_cmd_send(hw, &desc, 1);
3490        if (ret)
3491                PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
3492
3493        return ret;
3494}
3495
3496static int
3497hns3_map_tqp(struct hns3_hw *hw)
3498{
3499        int ret;
3500        int i;
3501
3502        /*
3503         * In current version, VF is not supported when PF is driven by DPDK
3504         * driver, so we assign total tqps_num tqps allocated to this port
3505         * to PF.
3506         */
3507        for (i = 0; i < hw->total_tqps_num; i++) {
3508                ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
3509                if (ret)
3510                        return ret;
3511        }
3512
3513        return 0;
3514}
3515
3516static int
3517hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
3518{
3519        struct hns3_config_mac_speed_dup_cmd *req;
3520        struct hns3_cmd_desc desc;
3521        int ret;
3522
3523        req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
3524
3525        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
3526
3527        hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
3528
3529        switch (speed) {
3530        case ETH_SPEED_NUM_10M:
3531                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3532                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
3533                break;
3534        case ETH_SPEED_NUM_100M:
3535                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3536                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
3537                break;
3538        case ETH_SPEED_NUM_1G:
3539                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3540                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
3541                break;
3542        case ETH_SPEED_NUM_10G:
3543                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3544                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
3545                break;
3546        case ETH_SPEED_NUM_25G:
3547                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3548                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
3549                break;
3550        case ETH_SPEED_NUM_40G:
3551                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3552                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
3553                break;
3554        case ETH_SPEED_NUM_50G:
3555                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3556                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
3557                break;
3558        case ETH_SPEED_NUM_100G:
3559                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3560                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
3561                break;
3562        case ETH_SPEED_NUM_200G:
3563                hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3564                               HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
3565                break;
3566        default:
3567                PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3568                return -EINVAL;
3569        }
3570
3571        hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3572
3573        ret = hns3_cmd_send(hw, &desc, 1);
3574        if (ret)
3575                PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3576
3577        return ret;
3578}
3579
3580static int
3581hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3582{
3583        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3584        struct hns3_pf *pf = &hns->pf;
3585        struct hns3_priv_buf *priv;
3586        uint32_t i, total_size;
3587
3588        total_size = pf->pkt_buf_size;
3589
3590        /* alloc tx buffer for all enabled tc */
3591        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3592                priv = &buf_alloc->priv_buf[i];
3593
3594                if (hw->hw_tc_map & BIT(i)) {
3595                        if (total_size < pf->tx_buf_size)
3596                                return -ENOMEM;
3597
3598                        priv->tx_buf_size = pf->tx_buf_size;
3599                } else
3600                        priv->tx_buf_size = 0;
3601
3602                total_size -= priv->tx_buf_size;
3603        }
3604
3605        return 0;
3606}
3607
3608static int
3609hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3610{
3611/* TX buffer size is unit by 128 byte */
3612#define HNS3_BUF_SIZE_UNIT_SHIFT        7
3613#define HNS3_BUF_SIZE_UPDATE_EN_MSK     BIT(15)
3614        struct hns3_tx_buff_alloc_cmd *req;
3615        struct hns3_cmd_desc desc;
3616        uint32_t buf_size;
3617        uint32_t i;
3618        int ret;
3619
3620        req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3621
3622        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3623        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3624                buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3625
3626                buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3627                req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3628                                                HNS3_BUF_SIZE_UPDATE_EN_MSK);
3629        }
3630
3631        ret = hns3_cmd_send(hw, &desc, 1);
3632        if (ret)
3633                PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3634
3635        return ret;
3636}
3637
3638static int
3639hns3_get_tc_num(struct hns3_hw *hw)
3640{
3641        int cnt = 0;
3642        uint8_t i;
3643
3644        for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3645                if (hw->hw_tc_map & BIT(i))
3646                        cnt++;
3647        return cnt;
3648}
3649
3650static uint32_t
3651hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3652{
3653        struct hns3_priv_buf *priv;
3654        uint32_t rx_priv = 0;
3655        int i;
3656
3657        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3658                priv = &buf_alloc->priv_buf[i];
3659                if (priv->enable)
3660                        rx_priv += priv->buf_size;
3661        }
3662        return rx_priv;
3663}
3664
3665static uint32_t
3666hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3667{
3668        uint32_t total_tx_size = 0;
3669        uint32_t i;
3670
3671        for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3672                total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3673
3674        return total_tx_size;
3675}
3676
3677/* Get the number of pfc enabled TCs, which have private buffer */
3678static int
3679hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3680{
3681        struct hns3_priv_buf *priv;
3682        int cnt = 0;
3683        uint8_t i;
3684
3685        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3686                priv = &buf_alloc->priv_buf[i];
3687                if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3688                        cnt++;
3689        }
3690
3691        return cnt;
3692}
3693
3694/* Get the number of pfc disabled TCs, which have private buffer */
3695static int
3696hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3697                         struct hns3_pkt_buf_alloc *buf_alloc)
3698{
3699        struct hns3_priv_buf *priv;
3700        int cnt = 0;
3701        uint8_t i;
3702
3703        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3704                priv = &buf_alloc->priv_buf[i];
3705                if (hw->hw_tc_map & BIT(i) &&
3706                    !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3707                        cnt++;
3708        }
3709
3710        return cnt;
3711}
3712
3713static bool
3714hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3715                  uint32_t rx_all)
3716{
3717        uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3718        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3719        struct hns3_pf *pf = &hns->pf;
3720        uint32_t shared_buf, aligned_mps;
3721        uint32_t rx_priv;
3722        uint8_t tc_num;
3723        uint8_t i;
3724
3725        tc_num = hns3_get_tc_num(hw);
3726        aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3727
3728        if (hns3_dev_dcb_supported(hw))
3729                shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3730                                        pf->dv_buf_size;
3731        else
3732                shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3733                                        + pf->dv_buf_size;
3734
3735        shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3736        shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3737                             HNS3_BUF_SIZE_UNIT);
3738
3739        rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3740        if (rx_all < rx_priv + shared_std)
3741                return false;
3742
3743        shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3744        buf_alloc->s_buf.buf_size = shared_buf;
3745        if (hns3_dev_dcb_supported(hw)) {
3746                buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3747                buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3748                        - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3749                                  HNS3_BUF_SIZE_UNIT);
3750        } else {
3751                buf_alloc->s_buf.self.high =
3752                        aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3753                buf_alloc->s_buf.self.low = aligned_mps;
3754        }
3755
3756        if (hns3_dev_dcb_supported(hw)) {
3757                hi_thrd = shared_buf - pf->dv_buf_size;
3758
3759                if (tc_num <= NEED_RESERVE_TC_NUM)
3760                        hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
3761                                  BUF_MAX_PERCENT;
3762
3763                if (tc_num)
3764                        hi_thrd = hi_thrd / tc_num;
3765
3766                hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3767                hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3768                lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3769        } else {
3770                hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3771                lo_thrd = aligned_mps;
3772        }
3773
3774        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3775                buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3776                buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3777        }
3778
3779        return true;
3780}
3781
3782static bool
3783hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3784                     struct hns3_pkt_buf_alloc *buf_alloc)
3785{
3786        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3787        struct hns3_pf *pf = &hns->pf;
3788        struct hns3_priv_buf *priv;
3789        uint32_t aligned_mps;
3790        uint32_t rx_all;
3791        uint8_t i;
3792
3793        rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3794        aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3795
3796        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3797                priv = &buf_alloc->priv_buf[i];
3798
3799                priv->enable = 0;
3800                priv->wl.low = 0;
3801                priv->wl.high = 0;
3802                priv->buf_size = 0;
3803
3804                if (!(hw->hw_tc_map & BIT(i)))
3805                        continue;
3806
3807                priv->enable = 1;
3808                if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3809                        priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3810                        priv->wl.high = roundup(priv->wl.low + aligned_mps,
3811                                                HNS3_BUF_SIZE_UNIT);
3812                } else {
3813                        priv->wl.low = 0;
3814                        priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3815                                        aligned_mps;
3816                }
3817
3818                priv->buf_size = priv->wl.high + pf->dv_buf_size;
3819        }
3820
3821        return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3822}
3823
3824static bool
3825hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3826                             struct hns3_pkt_buf_alloc *buf_alloc)
3827{
3828        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3829        struct hns3_pf *pf = &hns->pf;
3830        struct hns3_priv_buf *priv;
3831        int no_pfc_priv_num;
3832        uint32_t rx_all;
3833        uint8_t mask;
3834        int i;
3835
3836        rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3837        no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3838
3839        /* let the last to be cleared first */
3840        for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3841                priv = &buf_alloc->priv_buf[i];
3842                mask = BIT((uint8_t)i);
3843                if (hw->hw_tc_map & mask &&
3844                    !(hw->dcb_info.hw_pfc_map & mask)) {
3845                        /* Clear the no pfc TC private buffer */
3846                        priv->wl.low = 0;
3847                        priv->wl.high = 0;
3848                        priv->buf_size = 0;
3849                        priv->enable = 0;
3850                        no_pfc_priv_num--;
3851                }
3852
3853                if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3854                    no_pfc_priv_num == 0)
3855                        break;
3856        }
3857
3858        return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3859}
3860
3861static bool
3862hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3863                           struct hns3_pkt_buf_alloc *buf_alloc)
3864{
3865        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3866        struct hns3_pf *pf = &hns->pf;
3867        struct hns3_priv_buf *priv;
3868        uint32_t rx_all;
3869        int pfc_priv_num;
3870        uint8_t mask;
3871        int i;
3872
3873        rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3874        pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3875
3876        /* let the last to be cleared first */
3877        for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3878                priv = &buf_alloc->priv_buf[i];
3879                mask = BIT((uint8_t)i);
3880                if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
3881                        /* Reduce the number of pfc TC with private buffer */
3882                        priv->wl.low = 0;
3883                        priv->enable = 0;
3884                        priv->wl.high = 0;
3885                        priv->buf_size = 0;
3886                        pfc_priv_num--;
3887                }
3888                if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3889                    pfc_priv_num == 0)
3890                        break;
3891        }
3892
3893        return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3894}
3895
3896static bool
3897hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3898                          struct hns3_pkt_buf_alloc *buf_alloc)
3899{
3900#define COMPENSATE_BUFFER       0x3C00
3901#define COMPENSATE_HALF_MPS_NUM 5
3902#define PRIV_WL_GAP             0x1800
3903        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3904        struct hns3_pf *pf = &hns->pf;
3905        uint32_t tc_num = hns3_get_tc_num(hw);
3906        uint32_t half_mps = pf->mps >> 1;
3907        struct hns3_priv_buf *priv;
3908        uint32_t min_rx_priv;
3909        uint32_t rx_priv;
3910        uint8_t i;
3911
3912        rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3913        if (tc_num)
3914                rx_priv = rx_priv / tc_num;
3915
3916        if (tc_num <= NEED_RESERVE_TC_NUM)
3917                rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3918
3919        /*
3920         * Minimum value of private buffer in rx direction (min_rx_priv) is
3921         * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3922         * buffer if rx_priv is greater than min_rx_priv.
3923         */
3924        min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3925                        COMPENSATE_HALF_MPS_NUM * half_mps;
3926        min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3927        rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3928        if (rx_priv < min_rx_priv)
3929                return false;
3930
3931        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3932                priv = &buf_alloc->priv_buf[i];
3933                priv->enable = 0;
3934                priv->wl.low = 0;
3935                priv->wl.high = 0;
3936                priv->buf_size = 0;
3937
3938                if (!(hw->hw_tc_map & BIT(i)))
3939                        continue;
3940
3941                priv->enable = 1;
3942                priv->buf_size = rx_priv;
3943                priv->wl.high = rx_priv - pf->dv_buf_size;
3944                priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3945        }
3946
3947        buf_alloc->s_buf.buf_size = 0;
3948
3949        return true;
3950}
3951
3952/*
3953 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3954 * @hw: pointer to struct hns3_hw
3955 * @buf_alloc: pointer to buffer calculation data
3956 * @return: 0: calculate sucessful, negative: fail
3957 */
3958static int
3959hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3960{
3961        /* When DCB is not supported, rx private buffer is not allocated. */
3962        if (!hns3_dev_dcb_supported(hw)) {
3963                struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3964                struct hns3_pf *pf = &hns->pf;
3965                uint32_t rx_all = pf->pkt_buf_size;
3966
3967                rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3968                if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3969                        return -ENOMEM;
3970
3971                return 0;
3972        }
3973
3974        /*
3975         * Try to allocate privated packet buffer for all TCs without share
3976         * buffer.
3977         */
3978        if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3979                return 0;
3980
3981        /*
3982         * Try to allocate privated packet buffer for all TCs with share
3983         * buffer.
3984         */
3985        if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3986                return 0;
3987
3988        /*
3989         * For different application scenes, the enabled port number, TC number
3990         * and no_drop TC number are different. In order to obtain the better
3991         * performance, software could allocate the buffer size and configure
3992         * the waterline by trying to decrease the private buffer size according
3993         * to the order, namely, waterline of valid tc, pfc disabled tc, pfc
3994         * enabled tc.
3995         */
3996        if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3997                return 0;
3998
3999        if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
4000                return 0;
4001
4002        if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
4003                return 0;
4004
4005        return -ENOMEM;
4006}
4007
4008static int
4009hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
4010{
4011        struct hns3_rx_priv_buff_cmd *req;
4012        struct hns3_cmd_desc desc;
4013        uint32_t buf_size;
4014        int ret;
4015        int i;
4016
4017        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
4018        req = (struct hns3_rx_priv_buff_cmd *)desc.data;
4019
4020        /* Alloc private buffer TCs */
4021        for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
4022                struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
4023
4024                req->buf_num[i] =
4025                        rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
4026                req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
4027        }
4028
4029        buf_size = buf_alloc->s_buf.buf_size;
4030        req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
4031                                           (1 << HNS3_TC0_PRI_BUF_EN_B));
4032
4033        ret = hns3_cmd_send(hw, &desc, 1);
4034        if (ret)
4035                PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
4036
4037        return ret;
4038}
4039
4040static int
4041hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
4042{
4043#define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
4044        struct hns3_rx_priv_wl_buf *req;
4045        struct hns3_priv_buf *priv;
4046        struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
4047        int i, j;
4048        int ret;
4049
4050        for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
4051                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
4052                                          false);
4053                req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
4054
4055                /* The first descriptor set the NEXT bit to 1 */
4056                if (i == 0)
4057                        desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4058                else
4059                        desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4060
4061                for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
4062                        uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
4063
4064                        priv = &buf_alloc->priv_buf[idx];
4065                        req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
4066                                                        HNS3_BUF_UNIT_S);
4067                        req->tc_wl[j].high |=
4068                                rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
4069                        req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
4070                                                        HNS3_BUF_UNIT_S);
4071                        req->tc_wl[j].low |=
4072                                rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
4073                }
4074        }
4075
4076        /* Send 2 descriptor at one time */
4077        ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
4078        if (ret)
4079                PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
4080                             ret);
4081        return ret;
4082}
4083
4084static int
4085hns3_common_thrd_config(struct hns3_hw *hw,
4086                        struct hns3_pkt_buf_alloc *buf_alloc)
4087{
4088#define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
4089        struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
4090        struct hns3_rx_com_thrd *req;
4091        struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
4092        struct hns3_tc_thrd *tc;
4093        int tc_idx;
4094        int i, j;
4095        int ret;
4096
4097        for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
4098                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
4099                                          false);
4100                req = (struct hns3_rx_com_thrd *)&desc[i].data;
4101
4102                /* The first descriptor set the NEXT bit to 1 */
4103                if (i == 0)
4104                        desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4105                else
4106                        desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4107
4108                for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
4109                        tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
4110                        tc = &s_buf->tc_thrd[tc_idx];
4111
4112                        req->com_thrd[j].high =
4113                                rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
4114                        req->com_thrd[j].high |=
4115                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
4116                        req->com_thrd[j].low =
4117                                rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
4118                        req->com_thrd[j].low |=
4119                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
4120                }
4121        }
4122
4123        /* Send 2 descriptors at one time */
4124        ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
4125        if (ret)
4126                PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
4127
4128        return ret;
4129}
4130
4131static int
4132hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
4133{
4134        struct hns3_shared_buf *buf = &buf_alloc->s_buf;
4135        struct hns3_rx_com_wl *req;
4136        struct hns3_cmd_desc desc;
4137        int ret;
4138
4139        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
4140
4141        req = (struct hns3_rx_com_wl *)desc.data;
4142        req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
4143        req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
4144
4145        req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
4146        req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
4147
4148        ret = hns3_cmd_send(hw, &desc, 1);
4149        if (ret)
4150                PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
4151
4152        return ret;
4153}
4154
4155int
4156hns3_buffer_alloc(struct hns3_hw *hw)
4157{
4158        struct hns3_pkt_buf_alloc pkt_buf;
4159        int ret;
4160
4161        memset(&pkt_buf, 0, sizeof(pkt_buf));
4162        ret = hns3_tx_buffer_calc(hw, &pkt_buf);
4163        if (ret) {
4164                PMD_INIT_LOG(ERR,
4165                             "could not calc tx buffer size for all TCs %d",
4166                             ret);
4167                return ret;
4168        }
4169
4170        ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
4171        if (ret) {
4172                PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
4173                return ret;
4174        }
4175
4176        ret = hns3_rx_buffer_calc(hw, &pkt_buf);
4177        if (ret) {
4178                PMD_INIT_LOG(ERR,
4179                             "could not calc rx priv buffer size for all TCs %d",
4180                             ret);
4181                return ret;
4182        }
4183
4184        ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
4185        if (ret) {
4186                PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
4187                return ret;
4188        }
4189
4190        if (hns3_dev_dcb_supported(hw)) {
4191                ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
4192                if (ret) {
4193                        PMD_INIT_LOG(ERR,
4194                                     "could not configure rx private waterline %d",
4195                                     ret);
4196                        return ret;
4197                }
4198
4199                ret = hns3_common_thrd_config(hw, &pkt_buf);
4200                if (ret) {
4201                        PMD_INIT_LOG(ERR,
4202                                     "could not configure common threshold %d",
4203                                     ret);
4204                        return ret;
4205                }
4206        }
4207
4208        ret = hns3_common_wl_config(hw, &pkt_buf);
4209        if (ret)
4210                PMD_INIT_LOG(ERR, "could not configure common waterline %d",
4211                             ret);
4212
4213        return ret;
4214}
4215
4216static int
4217hns3_mac_init(struct hns3_hw *hw)
4218{
4219        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4220        struct hns3_mac *mac = &hw->mac;
4221        struct hns3_pf *pf = &hns->pf;
4222        int ret;
4223
4224        pf->support_sfp_query = true;
4225        mac->link_duplex = ETH_LINK_FULL_DUPLEX;
4226        ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
4227        if (ret) {
4228                PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
4229                return ret;
4230        }
4231
4232        mac->link_status = ETH_LINK_DOWN;
4233
4234        return hns3_config_mtu(hw, pf->mps);
4235}
4236
4237static int
4238hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
4239{
4240#define HNS3_ETHERTYPE_SUCCESS_ADD              0
4241#define HNS3_ETHERTYPE_ALREADY_ADD              1
4242#define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW         2
4243#define HNS3_ETHERTYPE_KEY_CONFLICT             3
4244        int return_status;
4245
4246        if (cmdq_resp) {
4247                PMD_INIT_LOG(ERR,
4248                             "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
4249                             cmdq_resp);
4250                return -EIO;
4251        }
4252
4253        switch (resp_code) {
4254        case HNS3_ETHERTYPE_SUCCESS_ADD:
4255        case HNS3_ETHERTYPE_ALREADY_ADD:
4256                return_status = 0;
4257                break;
4258        case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
4259                PMD_INIT_LOG(ERR,
4260                             "add mac ethertype failed for manager table overflow.");
4261                return_status = -EIO;
4262                break;
4263        case HNS3_ETHERTYPE_KEY_CONFLICT:
4264                PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
4265                return_status = -EIO;
4266                break;
4267        default:
4268                PMD_INIT_LOG(ERR,
4269                             "add mac ethertype failed for undefined, code=%u.",
4270                             resp_code);
4271                return_status = -EIO;
4272                break;
4273        }
4274
4275        return return_status;
4276}
4277
4278static int
4279hns3_add_mgr_tbl(struct hns3_hw *hw,
4280                 const struct hns3_mac_mgr_tbl_entry_cmd *req)
4281{
4282        struct hns3_cmd_desc desc;
4283        uint8_t resp_code;
4284        uint16_t retval;
4285        int ret;
4286
4287        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
4288        memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
4289
4290        ret = hns3_cmd_send(hw, &desc, 1);
4291        if (ret) {
4292                PMD_INIT_LOG(ERR,
4293                             "add mac ethertype failed for cmd_send, ret =%d.",
4294                             ret);
4295                return ret;
4296        }
4297
4298        resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
4299        retval = rte_le_to_cpu_16(desc.retval);
4300
4301        return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
4302}
4303
4304static void
4305hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
4306                     int *table_item_num)
4307{
4308        struct hns3_mac_mgr_tbl_entry_cmd *tbl;
4309
4310        /*
4311         * In current version, we add one item in management table as below:
4312         * 0x0180C200000E -- LLDP MC address
4313         */
4314        tbl = mgr_table;
4315        tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
4316        tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
4317        tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
4318        tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
4319        tbl->i_port_bitmap = 0x1;
4320        *table_item_num = 1;
4321}
4322
4323static int
4324hns3_init_mgr_tbl(struct hns3_hw *hw)
4325{
4326#define HNS_MAC_MGR_TBL_MAX_SIZE        16
4327        struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
4328        int table_item_num;
4329        int ret;
4330        int i;
4331
4332        memset(mgr_table, 0, sizeof(mgr_table));
4333        hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
4334        for (i = 0; i < table_item_num; i++) {
4335                ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
4336                if (ret) {
4337                        PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
4338                                     ret);
4339                        return ret;
4340                }
4341        }
4342
4343        return 0;
4344}
4345
4346static void
4347hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
4348                        bool en_mc, bool en_bc, int vport_id)
4349{
4350        if (!param)
4351                return;
4352
4353        memset(param, 0, sizeof(struct hns3_promisc_param));
4354        if (en_uc)
4355                param->enable = HNS3_PROMISC_EN_UC;
4356        if (en_mc)
4357                param->enable |= HNS3_PROMISC_EN_MC;
4358        if (en_bc)
4359                param->enable |= HNS3_PROMISC_EN_BC;
4360        param->vf_id = vport_id;
4361}
4362
4363static int
4364hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
4365{
4366        struct hns3_promisc_cfg_cmd *req;
4367        struct hns3_cmd_desc desc;
4368        int ret;
4369
4370        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
4371
4372        req = (struct hns3_promisc_cfg_cmd *)desc.data;
4373        req->vf_id = param->vf_id;
4374        req->flag = (param->enable << HNS3_PROMISC_EN_B) |
4375            HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
4376
4377        ret = hns3_cmd_send(hw, &desc, 1);
4378        if (ret)
4379                PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
4380
4381        return ret;
4382}
4383
4384static int
4385hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
4386{
4387        struct hns3_promisc_param param;
4388        bool en_bc_pmc = true;
4389        uint8_t vf_id;
4390
4391        /*
4392         * In current version VF is not supported when PF is driven by DPDK
4393         * driver, just need to configure parameters for PF vport.
4394         */
4395        vf_id = HNS3_PF_FUNC_ID;
4396
4397        hns3_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
4398        return hns3_cmd_set_promisc_mode(hw, &param);
4399}
4400
4401static int
4402hns3_promisc_init(struct hns3_hw *hw)
4403{
4404        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4405        struct hns3_pf *pf = &hns->pf;
4406        struct hns3_promisc_param param;
4407        uint16_t func_id;
4408        int ret;
4409
4410        ret = hns3_set_promisc_mode(hw, false, false);
4411        if (ret) {
4412                PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
4413                return ret;
4414        }
4415
4416        /*
4417         * In current version VFs are not supported when PF is driven by DPDK
4418         * driver. After PF has been taken over by DPDK, the original VF will
4419         * be invalid. So, there is a possibility of entry residues. It should
4420         * clear VFs's promisc mode to avoid unnecessary bandwidth usage
4421         * during init.
4422         */
4423        for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
4424                hns3_promisc_param_init(&param, false, false, false, func_id);
4425                ret = hns3_cmd_set_promisc_mode(hw, &param);
4426                if (ret) {
4427                        PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
4428                                        " ret = %d", func_id, ret);
4429                        return ret;
4430                }
4431        }
4432
4433        return 0;
4434}
4435
4436static void
4437hns3_promisc_uninit(struct hns3_hw *hw)
4438{
4439        struct hns3_promisc_param param;
4440        uint16_t func_id;
4441        int ret;
4442
4443        func_id = HNS3_PF_FUNC_ID;
4444
4445        /*
4446         * In current version VFs are not supported when PF is driven by
4447         * DPDK driver, and VFs' promisc mode status has been cleared during
4448         * init and their status will not change. So just clear PF's promisc
4449         * mode status during uninit.
4450         */
4451        hns3_promisc_param_init(&param, false, false, false, func_id);
4452        ret = hns3_cmd_set_promisc_mode(hw, &param);
4453        if (ret)
4454                PMD_INIT_LOG(ERR, "failed to clear promisc status during"
4455                                " uninit, ret = %d", ret);
4456}
4457
4458static int
4459hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
4460{
4461        bool allmulti = dev->data->all_multicast ? true : false;
4462        struct hns3_adapter *hns = dev->data->dev_private;
4463        struct hns3_hw *hw = &hns->hw;
4464        uint64_t offloads;
4465        int err;
4466        int ret;
4467
4468        rte_spinlock_lock(&hw->lock);
4469        ret = hns3_set_promisc_mode(hw, true, true);
4470        if (ret) {
4471                rte_spinlock_unlock(&hw->lock);
4472                hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
4473                         ret);
4474                return ret;
4475        }
4476
4477        /*
4478         * When promiscuous mode was enabled, disable the vlan filter to let
4479         * all packets coming in in the receiving direction.
4480         */
4481        offloads = dev->data->dev_conf.rxmode.offloads;
4482        if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
4483                ret = hns3_enable_vlan_filter(hns, false);
4484                if (ret) {
4485                        hns3_err(hw, "failed to enable promiscuous mode due to "
4486                                     "failure to disable vlan filter, ret = %d",
4487                                 ret);
4488                        err = hns3_set_promisc_mode(hw, false, allmulti);
4489                        if (err)
4490                                hns3_err(hw, "failed to restore promiscuous "
4491                                         "status after disable vlan filter "
4492                                         "failed during enabling promiscuous "
4493                                         "mode, ret = %d", ret);
4494                }
4495        }
4496
4497        rte_spinlock_unlock(&hw->lock);
4498
4499        return ret;
4500}
4501
4502static int
4503hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
4504{
4505        bool allmulti = dev->data->all_multicast ? true : false;
4506        struct hns3_adapter *hns = dev->data->dev_private;
4507        struct hns3_hw *hw = &hns->hw;
4508        uint64_t offloads;
4509        int err;
4510        int ret;
4511
4512        /* If now in all_multicast mode, must remain in all_multicast mode. */
4513        rte_spinlock_lock(&hw->lock);
4514        ret = hns3_set_promisc_mode(hw, false, allmulti);
4515        if (ret) {
4516                rte_spinlock_unlock(&hw->lock);
4517                hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
4518                         ret);
4519                return ret;
4520        }
4521        /* when promiscuous mode was disabled, restore the vlan filter status */
4522        offloads = dev->data->dev_conf.rxmode.offloads;
4523        if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
4524                ret = hns3_enable_vlan_filter(hns, true);
4525                if (ret) {
4526                        hns3_err(hw, "failed to disable promiscuous mode due to"
4527                                 " failure to restore vlan filter, ret = %d",
4528                                 ret);
4529                        err = hns3_set_promisc_mode(hw, true, true);
4530                        if (err)
4531                                hns3_err(hw, "failed to restore promiscuous "
4532                                         "status after enabling vlan filter "
4533                                         "failed during disabling promiscuous "
4534                                         "mode, ret = %d", ret);
4535                }
4536        }
4537        rte_spinlock_unlock(&hw->lock);
4538
4539        return ret;
4540}
4541
4542static int
4543hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
4544{
4545        struct hns3_adapter *hns = dev->data->dev_private;
4546        struct hns3_hw *hw = &hns->hw;
4547        int ret;
4548
4549        if (dev->data->promiscuous)
4550                return 0;
4551
4552        rte_spinlock_lock(&hw->lock);
4553        ret = hns3_set_promisc_mode(hw, false, true);
4554        rte_spinlock_unlock(&hw->lock);
4555        if (ret)
4556                hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
4557                         ret);
4558
4559        return ret;
4560}
4561
4562static int
4563hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
4564{
4565        struct hns3_adapter *hns = dev->data->dev_private;
4566        struct hns3_hw *hw = &hns->hw;
4567        int ret;
4568
4569        /* If now in promiscuous mode, must remain in all_multicast mode. */
4570        if (dev->data->promiscuous)
4571                return 0;
4572
4573        rte_spinlock_lock(&hw->lock);
4574        ret = hns3_set_promisc_mode(hw, false, false);
4575        rte_spinlock_unlock(&hw->lock);
4576        if (ret)
4577                hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4578                         ret);
4579
4580        return ret;
4581}
4582
4583static int
4584hns3_dev_promisc_restore(struct hns3_adapter *hns)
4585{
4586        struct hns3_hw *hw = &hns->hw;
4587        bool allmulti = hw->data->all_multicast ? true : false;
4588        int ret;
4589
4590        if (hw->data->promiscuous) {
4591                ret = hns3_set_promisc_mode(hw, true, true);
4592                if (ret)
4593                        hns3_err(hw, "failed to restore promiscuous mode, "
4594                                 "ret = %d", ret);
4595                return ret;
4596        }
4597
4598        ret = hns3_set_promisc_mode(hw, false, allmulti);
4599        if (ret)
4600                hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4601                         ret);
4602        return ret;
4603}
4604
4605static int
4606hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
4607{
4608        struct hns3_sfp_info_cmd *resp;
4609        struct hns3_cmd_desc desc;
4610        int ret;
4611
4612        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
4613        resp = (struct hns3_sfp_info_cmd *)desc.data;
4614        resp->query_type = HNS3_ACTIVE_QUERY;
4615
4616        ret = hns3_cmd_send(hw, &desc, 1);
4617        if (ret == -EOPNOTSUPP) {
4618                hns3_warn(hw, "firmware does not support get SFP info,"
4619                          " ret = %d.", ret);
4620                return ret;
4621        } else if (ret) {
4622                hns3_err(hw, "get sfp info failed, ret = %d.", ret);
4623                return ret;
4624        }
4625
4626        /*
4627         * In some case, the speed of MAC obtained from firmware may be 0, it
4628         * shouldn't be set to mac->speed.
4629         */
4630        if (!rte_le_to_cpu_32(resp->sfp_speed))
4631                return 0;
4632
4633        mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed);
4634        /*
4635         * if resp->supported_speed is 0, it means it's an old version
4636         * firmware, do not update these params.
4637         */
4638        if (resp->supported_speed) {
4639                mac_info->query_type = HNS3_ACTIVE_QUERY;
4640                mac_info->supported_speed =
4641                                        rte_le_to_cpu_32(resp->supported_speed);
4642                mac_info->support_autoneg = resp->autoneg_ability;
4643                mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED
4644                                        : ETH_LINK_AUTONEG;
4645        } else {
4646                mac_info->query_type = HNS3_DEFAULT_QUERY;
4647        }
4648
4649        return 0;
4650}
4651
4652static uint8_t
4653hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4654{
4655        if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
4656                duplex = ETH_LINK_FULL_DUPLEX;
4657
4658        return duplex;
4659}
4660
4661static int
4662hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4663{
4664        struct hns3_mac *mac = &hw->mac;
4665        int ret;
4666
4667        duplex = hns3_check_speed_dup(duplex, speed);
4668        if (mac->link_speed == speed && mac->link_duplex == duplex)
4669                return 0;
4670
4671        ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4672        if (ret)
4673                return ret;
4674
4675        ret = hns3_port_shaper_update(hw, speed);
4676        if (ret)
4677                return ret;
4678
4679        mac->link_speed = speed;
4680        mac->link_duplex = duplex;
4681
4682        return 0;
4683}
4684
4685static int
4686hns3_update_fiber_link_info(struct hns3_hw *hw)
4687{
4688        struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4689        struct hns3_mac *mac = &hw->mac;
4690        struct hns3_mac mac_info;
4691        int ret;
4692
4693        /* If firmware do not support get SFP/qSFP speed, return directly */
4694        if (!pf->support_sfp_query)
4695                return 0;
4696
4697        memset(&mac_info, 0, sizeof(struct hns3_mac));
4698        ret = hns3_get_sfp_info(hw, &mac_info);
4699        if (ret == -EOPNOTSUPP) {
4700                pf->support_sfp_query = false;
4701                return ret;
4702        } else if (ret)
4703                return ret;
4704
4705        /* Do nothing if no SFP */
4706        if (mac_info.link_speed == ETH_SPEED_NUM_NONE)
4707                return 0;
4708
4709        /*
4710         * If query_type is HNS3_ACTIVE_QUERY, it is no need
4711         * to reconfigure the speed of MAC. Otherwise, it indicates
4712         * that the current firmware only supports to obtain the
4713         * speed of the SFP, and the speed of MAC needs to reconfigure.
4714         */
4715        mac->query_type = mac_info.query_type;
4716        if (mac->query_type == HNS3_ACTIVE_QUERY) {
4717                if (mac_info.link_speed != mac->link_speed) {
4718                        ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4719                        if (ret)
4720                                return ret;
4721                }
4722
4723                mac->link_speed = mac_info.link_speed;
4724                mac->supported_speed = mac_info.supported_speed;
4725                mac->support_autoneg = mac_info.support_autoneg;
4726                mac->link_autoneg = mac_info.link_autoneg;
4727
4728                return 0;
4729        }
4730
4731        /* Config full duplex for SFP */
4732        return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
4733                                      ETH_LINK_FULL_DUPLEX);
4734}
4735
4736static void
4737hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
4738{
4739#define HNS3_PHY_SUPPORTED_SPEED_MASK   0x2f
4740
4741        struct hns3_phy_params_bd0_cmd *req;
4742        uint32_t supported;
4743
4744        req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4745        mac->link_speed = rte_le_to_cpu_32(req->speed);
4746        mac->link_duplex = hns3_get_bit(req->duplex,
4747                                           HNS3_PHY_DUPLEX_CFG_B);
4748        mac->link_autoneg = hns3_get_bit(req->autoneg,
4749                                           HNS3_PHY_AUTONEG_CFG_B);
4750        mac->advertising = rte_le_to_cpu_32(req->advertising);
4751        mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
4752        supported = rte_le_to_cpu_32(req->supported);
4753        mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK;
4754        mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT);
4755}
4756
4757static int
4758hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
4759{
4760        struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4761        uint16_t i;
4762        int ret;
4763
4764        for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4765                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4766                                          true);
4767                desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4768        }
4769        hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
4770
4771        ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4772        if (ret) {
4773                hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
4774                return ret;
4775        }
4776
4777        hns3_parse_copper_phy_params(desc, mac);
4778
4779        return 0;
4780}
4781
4782static int
4783hns3_update_copper_link_info(struct hns3_hw *hw)
4784{
4785        struct hns3_mac *mac = &hw->mac;
4786        struct hns3_mac mac_info;
4787        int ret;
4788
4789        memset(&mac_info, 0, sizeof(struct hns3_mac));
4790        ret = hns3_get_copper_phy_params(hw, &mac_info);
4791        if (ret)
4792                return ret;
4793
4794        if (mac_info.link_speed != mac->link_speed) {
4795                ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4796                if (ret)
4797                        return ret;
4798        }
4799
4800        mac->link_speed = mac_info.link_speed;
4801        mac->link_duplex = mac_info.link_duplex;
4802        mac->link_autoneg = mac_info.link_autoneg;
4803        mac->supported_speed = mac_info.supported_speed;
4804        mac->advertising = mac_info.advertising;
4805        mac->lp_advertising = mac_info.lp_advertising;
4806        mac->support_autoneg = mac_info.support_autoneg;
4807
4808        return 0;
4809}
4810
4811static int
4812hns3_update_link_info(struct rte_eth_dev *eth_dev)
4813{
4814        struct hns3_adapter *hns = eth_dev->data->dev_private;
4815        struct hns3_hw *hw = &hns->hw;
4816        int ret = 0;
4817
4818        if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
4819                ret = hns3_update_copper_link_info(hw);
4820        else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
4821                ret = hns3_update_fiber_link_info(hw);
4822
4823        return ret;
4824}
4825
4826static int
4827hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4828{
4829        struct hns3_config_mac_mode_cmd *req;
4830        struct hns3_cmd_desc desc;
4831        uint32_t loop_en = 0;
4832        uint8_t val = 0;
4833        int ret;
4834
4835        req = (struct hns3_config_mac_mode_cmd *)desc.data;
4836
4837        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4838        if (enable)
4839                val = 1;
4840        hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4841        hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4842        hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4843        hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4844        hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4845        hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4846        hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4847        hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4848        hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4849        hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4850
4851        /*
4852         * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4853         * when receiving frames. Otherwise, CRC will be stripped.
4854         */
4855        if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4856                hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4857        else
4858                hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4859        hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4860        hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4861        hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4862        req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4863
4864        ret = hns3_cmd_send(hw, &desc, 1);
4865        if (ret)
4866                PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4867
4868        return ret;
4869}
4870
4871static int
4872hns3_get_mac_link_status(struct hns3_hw *hw)
4873{
4874        struct hns3_link_status_cmd *req;
4875        struct hns3_cmd_desc desc;
4876        int link_status;
4877        int ret;
4878
4879        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4880        ret = hns3_cmd_send(hw, &desc, 1);
4881        if (ret) {
4882                hns3_err(hw, "get link status cmd failed %d", ret);
4883                return ETH_LINK_DOWN;
4884        }
4885
4886        req = (struct hns3_link_status_cmd *)desc.data;
4887        link_status = req->status & HNS3_LINK_STATUS_UP_M;
4888
4889        return !!link_status;
4890}
4891
4892static bool
4893hns3_update_link_status(struct hns3_hw *hw)
4894{
4895        int state;
4896
4897        state = hns3_get_mac_link_status(hw);
4898        if (state != hw->mac.link_status) {
4899                hw->mac.link_status = state;
4900                hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4901                return true;
4902        }
4903
4904        return false;
4905}
4906
4907void
4908hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query)
4909{
4910        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4911        struct rte_eth_link new_link;
4912        int ret;
4913
4914        if (query)
4915                hns3_update_port_link_info(dev);
4916
4917        memset(&new_link, 0, sizeof(new_link));
4918        hns3_setup_linkstatus(dev, &new_link);
4919
4920        ret = rte_eth_linkstatus_set(dev, &new_link);
4921        if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
4922                hns3_start_report_lse(dev);
4923}
4924
4925static void
4926hns3_service_handler(void *param)
4927{
4928        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4929        struct hns3_adapter *hns = eth_dev->data->dev_private;
4930        struct hns3_hw *hw = &hns->hw;
4931
4932        if (!hns3_is_reset_pending(hns))
4933                hns3_update_linkstatus_and_event(hw, true);
4934        else
4935                hns3_warn(hw, "Cancel the query when reset is pending");
4936
4937        rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4938}
4939
4940static int
4941hns3_init_hardware(struct hns3_adapter *hns)
4942{
4943        struct hns3_hw *hw = &hns->hw;
4944        int ret;
4945
4946        ret = hns3_map_tqp(hw);
4947        if (ret) {
4948                PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4949                return ret;
4950        }
4951
4952        ret = hns3_init_umv_space(hw);
4953        if (ret) {
4954                PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4955                return ret;
4956        }
4957
4958        ret = hns3_mac_init(hw);
4959        if (ret) {
4960                PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4961                goto err_mac_init;
4962        }
4963
4964        ret = hns3_init_mgr_tbl(hw);
4965        if (ret) {
4966                PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4967                goto err_mac_init;
4968        }
4969
4970        ret = hns3_promisc_init(hw);
4971        if (ret) {
4972                PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4973                             ret);
4974                goto err_mac_init;
4975        }
4976
4977        ret = hns3_init_vlan_config(hns);
4978        if (ret) {
4979                PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4980                goto err_mac_init;
4981        }
4982
4983        ret = hns3_dcb_init(hw);
4984        if (ret) {
4985                PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4986                goto err_mac_init;
4987        }
4988
4989        ret = hns3_init_fd_config(hns);
4990        if (ret) {
4991                PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4992                goto err_mac_init;
4993        }
4994
4995        ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4996        if (ret) {
4997                PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4998                goto err_mac_init;
4999        }
5000
5001        ret = hns3_config_gro(hw, false);
5002        if (ret) {
5003                PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
5004                goto err_mac_init;
5005        }
5006
5007        /*
5008         * In the initialization clearing the all hardware mapping relationship
5009         * configurations between queues and interrupt vectors is needed, so
5010         * some error caused by the residual configurations, such as the
5011         * unexpected interrupt, can be avoid.
5012         */
5013        ret = hns3_init_ring_with_vector(hw);
5014        if (ret) {
5015                PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
5016                goto err_mac_init;
5017        }
5018
5019        return 0;
5020
5021err_mac_init:
5022        hns3_uninit_umv_space(hw);
5023        return ret;
5024}
5025
5026static int
5027hns3_clear_hw(struct hns3_hw *hw)
5028{
5029        struct hns3_cmd_desc desc;
5030        int ret;
5031
5032        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
5033
5034        ret = hns3_cmd_send(hw, &desc, 1);
5035        if (ret && ret != -EOPNOTSUPP)
5036                return ret;
5037
5038        return 0;
5039}
5040
5041static void
5042hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
5043{
5044        uint32_t val;
5045
5046        /*
5047         * The new firmware support report more hardware error types by
5048         * msix mode. These errors are defined as RAS errors in hardware
5049         * and belong to a different type from the MSI-x errors processed
5050         * by the network driver.
5051         *
5052         * Network driver should open the new error report on initialization.
5053         */
5054        val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5055        hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
5056        hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
5057}
5058
5059static uint32_t
5060hns3_set_firber_default_support_speed(struct hns3_hw *hw)
5061{
5062        struct hns3_mac *mac = &hw->mac;
5063
5064        switch (mac->link_speed) {
5065        case ETH_SPEED_NUM_1G:
5066                return HNS3_FIBER_LINK_SPEED_1G_BIT;
5067        case ETH_SPEED_NUM_10G:
5068                return HNS3_FIBER_LINK_SPEED_10G_BIT;
5069        case ETH_SPEED_NUM_25G:
5070                return HNS3_FIBER_LINK_SPEED_25G_BIT;
5071        case ETH_SPEED_NUM_40G:
5072                return HNS3_FIBER_LINK_SPEED_40G_BIT;
5073        case ETH_SPEED_NUM_50G:
5074                return HNS3_FIBER_LINK_SPEED_50G_BIT;
5075        case ETH_SPEED_NUM_100G:
5076                return HNS3_FIBER_LINK_SPEED_100G_BIT;
5077        case ETH_SPEED_NUM_200G:
5078                return HNS3_FIBER_LINK_SPEED_200G_BIT;
5079        default:
5080                hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
5081                return 0;
5082        }
5083}
5084
5085/*
5086 * Validity of supported_speed for firber and copper media type can be
5087 * guaranteed by the following policy:
5088 * Copper:
5089 *       Although the initialization of the phy in the firmware may not be
5090 *       completed, the firmware can guarantees that the supported_speed is
5091 *       an valid value.
5092 * Firber:
5093 *       If the version of firmware supports the acitive query way of the
5094 *       HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
5095 *       through it. If unsupported, use the SFP's speed as the value of the
5096 *       supported_speed.
5097 */
5098static int
5099hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
5100{
5101        struct hns3_adapter *hns = eth_dev->data->dev_private;
5102        struct hns3_hw *hw = &hns->hw;
5103        struct hns3_mac *mac = &hw->mac;
5104        int ret;
5105
5106        ret = hns3_update_link_info(eth_dev);
5107        if (ret)
5108                return ret;
5109
5110        if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
5111                /*
5112                 * Some firmware does not support the report of supported_speed,
5113                 * and only report the effective speed of SFP. In this case, it
5114                 * is necessary to use the SFP's speed as the supported_speed.
5115                 */
5116                if (mac->supported_speed == 0)
5117                        mac->supported_speed =
5118                                hns3_set_firber_default_support_speed(hw);
5119        }
5120
5121        return 0;
5122}
5123
5124static void
5125hns3_get_fc_autoneg_capability(struct hns3_adapter *hns)
5126{
5127        struct hns3_mac *mac = &hns->hw.mac;
5128
5129        if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
5130                hns->pf.support_fc_autoneg = true;
5131                return;
5132        }
5133
5134        /*
5135         * Flow control auto-negotiation requires the cooperation of the driver
5136         * and firmware. Currently, the optical port does not support flow
5137         * control auto-negotiation.
5138         */
5139        hns->pf.support_fc_autoneg = false;
5140}
5141
5142static int
5143hns3_init_pf(struct rte_eth_dev *eth_dev)
5144{
5145        struct rte_device *dev = eth_dev->device;
5146        struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
5147        struct hns3_adapter *hns = eth_dev->data->dev_private;
5148        struct hns3_hw *hw = &hns->hw;
5149        int ret;
5150
5151        PMD_INIT_FUNC_TRACE();
5152
5153        /* Get hardware io base address from pcie BAR2 IO space */
5154        hw->io_base = pci_dev->mem_resource[2].addr;
5155
5156        /* Firmware command queue initialize */
5157        ret = hns3_cmd_init_queue(hw);
5158        if (ret) {
5159                PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
5160                goto err_cmd_init_queue;
5161        }
5162
5163        hns3_clear_all_event_cause(hw);
5164
5165        /* Firmware command initialize */
5166        ret = hns3_cmd_init(hw);
5167        if (ret) {
5168                PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
5169                goto err_cmd_init;
5170        }
5171
5172        hns3_tx_push_init(eth_dev);
5173
5174        /*
5175         * To ensure that the hardware environment is clean during
5176         * initialization, the driver actively clear the hardware environment
5177         * during initialization, including PF and corresponding VFs' vlan, mac,
5178         * flow table configurations, etc.
5179         */
5180        ret = hns3_clear_hw(hw);
5181        if (ret) {
5182                PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
5183                goto err_cmd_init;
5184        }
5185
5186        /* Hardware statistics of imissed registers cleared. */
5187        ret = hns3_update_imissed_stats(hw, true);
5188        if (ret) {
5189                hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
5190                goto err_cmd_init;
5191        }
5192
5193        hns3_config_all_msix_error(hw, true);
5194
5195        ret = rte_intr_callback_register(&pci_dev->intr_handle,
5196                                         hns3_interrupt_handler,
5197                                         eth_dev);
5198        if (ret) {
5199                PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
5200                goto err_intr_callback_register;
5201        }
5202
5203        ret = hns3_ptp_init(hw);
5204        if (ret)
5205                goto err_get_config;
5206
5207        /* Enable interrupt */
5208        rte_intr_enable(&pci_dev->intr_handle);
5209        hns3_pf_enable_irq0(hw);
5210
5211        /* Get configuration */
5212        ret = hns3_get_configuration(hw);
5213        if (ret) {
5214                PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
5215                goto err_get_config;
5216        }
5217
5218        ret = hns3_tqp_stats_init(hw);
5219        if (ret)
5220                goto err_get_config;
5221
5222        ret = hns3_init_hardware(hns);
5223        if (ret) {
5224                PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
5225                goto err_init_hw;
5226        }
5227
5228        /* Initialize flow director filter list & hash */
5229        ret = hns3_fdir_filter_init(hns);
5230        if (ret) {
5231                PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
5232                goto err_fdir;
5233        }
5234
5235        hns3_rss_set_default_args(hw);
5236
5237        ret = hns3_enable_hw_error_intr(hns, true);
5238        if (ret) {
5239                PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
5240                             ret);
5241                goto err_enable_intr;
5242        }
5243
5244        ret = hns3_get_port_supported_speed(eth_dev);
5245        if (ret) {
5246                PMD_INIT_LOG(ERR, "failed to get speed capabilities supported "
5247                             "by device, ret = %d.", ret);
5248                goto err_supported_speed;
5249        }
5250
5251        hns3_get_fc_autoneg_capability(hns);
5252
5253        hns3_tm_conf_init(eth_dev);
5254
5255        return 0;
5256
5257err_supported_speed:
5258        (void)hns3_enable_hw_error_intr(hns, false);
5259err_enable_intr:
5260        hns3_fdir_filter_uninit(hns);
5261err_fdir:
5262        hns3_uninit_umv_space(hw);
5263err_init_hw:
5264        hns3_tqp_stats_uninit(hw);
5265err_get_config:
5266        hns3_pf_disable_irq0(hw);
5267        rte_intr_disable(&pci_dev->intr_handle);
5268        hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
5269                             eth_dev);
5270err_intr_callback_register:
5271err_cmd_init:
5272        hns3_cmd_uninit(hw);
5273        hns3_cmd_destroy_queue(hw);
5274err_cmd_init_queue:
5275        hw->io_base = NULL;
5276
5277        return ret;
5278}
5279
5280static void
5281hns3_uninit_pf(struct rte_eth_dev *eth_dev)
5282{
5283        struct hns3_adapter *hns = eth_dev->data->dev_private;
5284        struct rte_device *dev = eth_dev->device;
5285        struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
5286        struct hns3_hw *hw = &hns->hw;
5287
5288        PMD_INIT_FUNC_TRACE();
5289
5290        hns3_tm_conf_uninit(eth_dev);
5291        hns3_enable_hw_error_intr(hns, false);
5292        hns3_rss_uninit(hns);
5293        (void)hns3_config_gro(hw, false);
5294        hns3_promisc_uninit(hw);
5295        hns3_flow_uninit(eth_dev);
5296        hns3_fdir_filter_uninit(hns);
5297        hns3_uninit_umv_space(hw);
5298        hns3_tqp_stats_uninit(hw);
5299        hns3_config_mac_tnl_int(hw, false);
5300        hns3_pf_disable_irq0(hw);
5301        rte_intr_disable(&pci_dev->intr_handle);
5302        hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
5303                             eth_dev);
5304        hns3_config_all_msix_error(hw, false);
5305        hns3_cmd_uninit(hw);
5306        hns3_cmd_destroy_queue(hw);
5307        hw->io_base = NULL;
5308}
5309
5310static uint32_t
5311hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
5312{
5313        uint32_t speed_bit;
5314
5315        switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
5316        case ETH_LINK_SPEED_10M:
5317                speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
5318                break;
5319        case ETH_LINK_SPEED_10M_HD:
5320                speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
5321                break;
5322        case ETH_LINK_SPEED_100M:
5323                speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
5324                break;
5325        case ETH_LINK_SPEED_100M_HD:
5326                speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
5327                break;
5328        case ETH_LINK_SPEED_1G:
5329                speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
5330                break;
5331        default:
5332                speed_bit = 0;
5333                break;
5334        }
5335
5336        return speed_bit;
5337}
5338
5339static uint32_t
5340hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
5341{
5342        uint32_t speed_bit;
5343
5344        switch (link_speeds & ~ETH_LINK_SPEED_FIXED) {
5345        case ETH_LINK_SPEED_1G:
5346                speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
5347                break;
5348        case ETH_LINK_SPEED_10G:
5349                speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
5350                break;
5351        case ETH_LINK_SPEED_25G:
5352                speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
5353                break;
5354        case ETH_LINK_SPEED_40G:
5355                speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
5356                break;
5357        case ETH_LINK_SPEED_50G:
5358                speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
5359                break;
5360        case ETH_LINK_SPEED_100G:
5361                speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
5362                break;
5363        case ETH_LINK_SPEED_200G:
5364                speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
5365                break;
5366        default:
5367                speed_bit = 0;
5368                break;
5369        }
5370
5371        return speed_bit;
5372}
5373
5374static int
5375hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
5376{
5377        struct hns3_mac *mac = &hw->mac;
5378        uint32_t supported_speed = mac->supported_speed;
5379        uint32_t speed_bit = 0;
5380
5381        if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
5382                speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
5383        else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
5384                speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
5385
5386        if (!(speed_bit & supported_speed)) {
5387                hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.",
5388                         link_speeds);
5389                return -EINVAL;
5390        }
5391
5392        return 0;
5393}
5394
5395static inline uint32_t
5396hns3_get_link_speed(uint32_t link_speeds)
5397{
5398        uint32_t speed = ETH_SPEED_NUM_NONE;
5399
5400        if (link_speeds & ETH_LINK_SPEED_10M ||
5401            link_speeds & ETH_LINK_SPEED_10M_HD)
5402                speed = ETH_SPEED_NUM_10M;
5403        if (link_speeds & ETH_LINK_SPEED_100M ||
5404            link_speeds & ETH_LINK_SPEED_100M_HD)
5405                speed = ETH_SPEED_NUM_100M;
5406        if (link_speeds & ETH_LINK_SPEED_1G)
5407                speed = ETH_SPEED_NUM_1G;
5408        if (link_speeds & ETH_LINK_SPEED_10G)
5409                speed = ETH_SPEED_NUM_10G;
5410        if (link_speeds & ETH_LINK_SPEED_25G)
5411                speed = ETH_SPEED_NUM_25G;
5412        if (link_speeds & ETH_LINK_SPEED_40G)
5413                speed = ETH_SPEED_NUM_40G;
5414        if (link_speeds & ETH_LINK_SPEED_50G)
5415                speed = ETH_SPEED_NUM_50G;
5416        if (link_speeds & ETH_LINK_SPEED_100G)
5417                speed = ETH_SPEED_NUM_100G;
5418        if (link_speeds & ETH_LINK_SPEED_200G)
5419                speed = ETH_SPEED_NUM_200G;
5420
5421        return speed;
5422}
5423
5424static uint8_t
5425hns3_get_link_duplex(uint32_t link_speeds)
5426{
5427        if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
5428            (link_speeds & ETH_LINK_SPEED_100M_HD))
5429                return ETH_LINK_HALF_DUPLEX;
5430        else
5431                return ETH_LINK_FULL_DUPLEX;
5432}
5433
5434static int
5435hns3_set_copper_port_link_speed(struct hns3_hw *hw,
5436                                struct hns3_set_link_speed_cfg *cfg)
5437{
5438        struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
5439        struct hns3_phy_params_bd0_cmd *req;
5440        uint16_t i;
5441
5442        for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
5443                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
5444                                          false);
5445                desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
5446        }
5447        hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false);
5448        req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
5449        req->autoneg = cfg->autoneg;
5450
5451        /*
5452         * The full speed capability is used to negotiate when
5453         * auto-negotiation is enabled.
5454         */
5455        if (cfg->autoneg) {
5456                req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT |
5457                                    HNS3_PHY_LINK_SPEED_10M_HD_BIT |
5458                                    HNS3_PHY_LINK_SPEED_100M_BIT |
5459                                    HNS3_PHY_LINK_SPEED_100M_HD_BIT |
5460                                    HNS3_PHY_LINK_SPEED_1000M_BIT;
5461        } else {
5462                req->speed = cfg->speed;
5463                req->duplex = cfg->duplex;
5464        }
5465
5466        return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
5467}
5468
5469static int
5470hns3_set_autoneg(struct hns3_hw *hw, bool enable)
5471{
5472        struct hns3_config_auto_neg_cmd *req;
5473        struct hns3_cmd_desc desc;
5474        uint32_t flag = 0;
5475        int ret;
5476
5477        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false);
5478
5479        req = (struct hns3_config_auto_neg_cmd *)desc.data;
5480        if (enable)
5481                hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1);
5482        req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag);
5483
5484        ret = hns3_cmd_send(hw, &desc, 1);
5485        if (ret)
5486                hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret);
5487
5488        return ret;
5489}
5490
5491static int
5492hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
5493                               struct hns3_set_link_speed_cfg *cfg)
5494{
5495        int ret;
5496
5497        if (hw->mac.support_autoneg) {
5498                ret = hns3_set_autoneg(hw, cfg->autoneg);
5499                if (ret) {
5500                        hns3_err(hw, "failed to configure auto-negotiation.");
5501                        return ret;
5502                }
5503
5504                /*
5505                 * To enable auto-negotiation, we only need to open the switch
5506                 * of auto-negotiation, then firmware sets all speed
5507                 * capabilities.
5508                 */
5509                if (cfg->autoneg)
5510                        return 0;
5511        }
5512
5513        /*
5514         * Some hardware doesn't support auto-negotiation, but users may not
5515         * configure link_speeds (default 0), which means auto-negotiation.
5516         * In this case, a warning message need to be printed, instead of
5517         * an error.
5518         */
5519        if (cfg->autoneg) {
5520                hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
5521                return 0;
5522        }
5523
5524        return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
5525}
5526
5527static int
5528hns3_set_port_link_speed(struct hns3_hw *hw,
5529                         struct hns3_set_link_speed_cfg *cfg)
5530{
5531        int ret;
5532
5533        if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
5534#if defined(RTE_HNS3_ONLY_1630_FPGA)
5535                struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5536                if (pf->is_tmp_phy)
5537                        return 0;
5538#endif
5539
5540                ret = hns3_set_copper_port_link_speed(hw, cfg);
5541                if (ret) {
5542                        hns3_err(hw, "failed to set copper port link speed,"
5543                                 "ret = %d.", ret);
5544                        return ret;
5545                }
5546        } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
5547                ret = hns3_set_fiber_port_link_speed(hw, cfg);
5548                if (ret) {
5549                        hns3_err(hw, "failed to set fiber port link speed,"
5550                                 "ret = %d.", ret);
5551                        return ret;
5552                }
5553        }
5554
5555        return 0;
5556}
5557
5558static int
5559hns3_apply_link_speed(struct hns3_hw *hw)
5560{
5561        struct rte_eth_conf *conf = &hw->data->dev_conf;
5562        struct hns3_set_link_speed_cfg cfg;
5563
5564        memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
5565        cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ?
5566                        ETH_LINK_AUTONEG : ETH_LINK_FIXED;
5567        if (cfg.autoneg != ETH_LINK_AUTONEG) {
5568                cfg.speed = hns3_get_link_speed(conf->link_speeds);
5569                cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
5570        }
5571
5572        return hns3_set_port_link_speed(hw, &cfg);
5573}
5574
5575static int
5576hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
5577{
5578        struct hns3_hw *hw = &hns->hw;
5579        int ret;
5580
5581        ret = hns3_update_queue_map_configure(hns);
5582        if (ret) {
5583                hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
5584                         ret);
5585                return ret;
5586        }
5587
5588        /* Note: hns3_tm_conf_update must be called after configuring DCB. */
5589        ret = hns3_tm_conf_update(hw);
5590        if (ret) {
5591                PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
5592                return ret;
5593        }
5594
5595        hns3_enable_rxd_adv_layout(hw);
5596
5597        ret = hns3_init_queues(hns, reset_queue);
5598        if (ret) {
5599                PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
5600                return ret;
5601        }
5602
5603        ret = hns3_cfg_mac_mode(hw, true);
5604        if (ret) {
5605                PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
5606                goto err_config_mac_mode;
5607        }
5608
5609        ret = hns3_apply_link_speed(hw);
5610        if (ret)
5611                goto err_set_link_speed;
5612
5613        return 0;
5614
5615err_set_link_speed:
5616        (void)hns3_cfg_mac_mode(hw, false);
5617
5618err_config_mac_mode:
5619        hns3_dev_release_mbufs(hns);
5620        /*
5621         * Here is exception handling, hns3_reset_all_tqps will have the
5622         * corresponding error message if it is handled incorrectly, so it is
5623         * not necessary to check hns3_reset_all_tqps return value, here keep
5624         * ret as the error code causing the exception.
5625         */
5626        (void)hns3_reset_all_tqps(hns);
5627        return ret;
5628}
5629
5630static int
5631hns3_map_rx_interrupt(struct rte_eth_dev *dev)
5632{
5633        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5634        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5635        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5636        uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
5637        uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
5638        uint32_t intr_vector;
5639        uint16_t q_id;
5640        int ret;
5641
5642        /*
5643         * hns3 needs a separate interrupt to be used as event interrupt which
5644         * could not be shared with task queue pair, so KERNEL drivers need
5645         * support multiple interrupt vectors.
5646         */
5647        if (dev->data->dev_conf.intr_conf.rxq == 0 ||
5648            !rte_intr_cap_multiple(intr_handle))
5649                return 0;
5650
5651        rte_intr_disable(intr_handle);
5652        intr_vector = hw->used_rx_queues;
5653        /* creates event fd for each intr vector when MSIX is used */
5654        if (rte_intr_efd_enable(intr_handle, intr_vector))
5655                return -EINVAL;
5656
5657        if (intr_handle->intr_vec == NULL) {
5658                intr_handle->intr_vec =
5659                        rte_zmalloc("intr_vec",
5660                                    hw->used_rx_queues * sizeof(int), 0);
5661                if (intr_handle->intr_vec == NULL) {
5662                        hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
5663                                        hw->used_rx_queues);
5664                        ret = -ENOMEM;
5665                        goto alloc_intr_vec_error;
5666                }
5667        }
5668
5669        if (rte_intr_allow_others(intr_handle)) {
5670                vec = RTE_INTR_VEC_RXTX_OFFSET;
5671                base = RTE_INTR_VEC_RXTX_OFFSET;
5672        }
5673
5674        for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
5675                ret = hns3_bind_ring_with_vector(hw, vec, true,
5676                                                 HNS3_RING_TYPE_RX, q_id);
5677                if (ret)
5678                        goto bind_vector_error;
5679                intr_handle->intr_vec[q_id] = vec;
5680                /*
5681                 * If there are not enough efds (e.g. not enough interrupt),
5682                 * remaining queues will be bond to the last interrupt.
5683                 */
5684                if (vec < base + intr_handle->nb_efd - 1)
5685                        vec++;
5686        }
5687        rte_intr_enable(intr_handle);
5688        return 0;
5689
5690bind_vector_error:
5691        rte_free(intr_handle->intr_vec);
5692        intr_handle->intr_vec = NULL;
5693alloc_intr_vec_error:
5694        rte_intr_efd_disable(intr_handle);
5695        return ret;
5696}
5697
5698static int
5699hns3_restore_rx_interrupt(struct hns3_hw *hw)
5700{
5701        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
5702        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5703        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5704        uint16_t q_id;
5705        int ret;
5706
5707        if (dev->data->dev_conf.intr_conf.rxq == 0)
5708                return 0;
5709
5710        if (rte_intr_dp_is_en(intr_handle)) {
5711                for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
5712                        ret = hns3_bind_ring_with_vector(hw,
5713                                        intr_handle->intr_vec[q_id], true,
5714                                        HNS3_RING_TYPE_RX, q_id);
5715                        if (ret)
5716                                return ret;
5717                }
5718        }
5719
5720        return 0;
5721}
5722
5723static void
5724hns3_restore_filter(struct rte_eth_dev *dev)
5725{
5726        hns3_restore_rss_filter(dev);
5727}
5728
5729static int
5730hns3_dev_start(struct rte_eth_dev *dev)
5731{
5732        struct hns3_adapter *hns = dev->data->dev_private;
5733        struct hns3_hw *hw = &hns->hw;
5734        int ret;
5735
5736        PMD_INIT_FUNC_TRACE();
5737        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
5738                return -EBUSY;
5739
5740        rte_spinlock_lock(&hw->lock);
5741        hw->adapter_state = HNS3_NIC_STARTING;
5742
5743        ret = hns3_do_start(hns, true);
5744        if (ret) {
5745                hw->adapter_state = HNS3_NIC_CONFIGURED;
5746                rte_spinlock_unlock(&hw->lock);
5747                return ret;
5748        }
5749        ret = hns3_map_rx_interrupt(dev);
5750        if (ret)
5751                goto map_rx_inter_err;
5752
5753        /*
5754         * There are three register used to control the status of a TQP
5755         * (contains a pair of Tx queue and Rx queue) in the new version network
5756         * engine. One is used to control the enabling of Tx queue, the other is
5757         * used to control the enabling of Rx queue, and the last is the master
5758         * switch used to control the enabling of the tqp. The Tx register and
5759         * TQP register must be enabled at the same time to enable a Tx queue.
5760         * The same applies to the Rx queue. For the older network engine, this
5761         * function only refresh the enabled flag, and it is used to update the
5762         * status of queue in the dpdk framework.
5763         */
5764        ret = hns3_start_all_txqs(dev);
5765        if (ret)
5766                goto map_rx_inter_err;
5767
5768        ret = hns3_start_all_rxqs(dev);
5769        if (ret)
5770                goto start_all_rxqs_fail;
5771
5772        hw->adapter_state = HNS3_NIC_STARTED;
5773        rte_spinlock_unlock(&hw->lock);
5774
5775        hns3_rx_scattered_calc(dev);
5776        hns3_set_rxtx_function(dev);
5777        hns3_mp_req_start_rxtx(dev);
5778
5779        hns3_restore_filter(dev);
5780
5781        /* Enable interrupt of all rx queues before enabling queues */
5782        hns3_dev_all_rx_queue_intr_enable(hw, true);
5783
5784        /*
5785         * After finished the initialization, enable tqps to receive/transmit
5786         * packets and refresh all queue status.
5787         */
5788        hns3_start_tqps(hw);
5789
5790        hns3_tm_dev_start_proc(hw);
5791
5792        if (dev->data->dev_conf.intr_conf.lsc != 0)
5793                hns3_dev_link_update(dev, 0);
5794        rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
5795
5796        hns3_info(hw, "hns3 dev start successful!");
5797
5798        return 0;
5799
5800start_all_rxqs_fail:
5801        hns3_stop_all_txqs(dev);
5802map_rx_inter_err:
5803        (void)hns3_do_stop(hns);
5804        hw->adapter_state = HNS3_NIC_CONFIGURED;
5805        rte_spinlock_unlock(&hw->lock);
5806
5807        return ret;
5808}
5809
5810static int
5811hns3_do_stop(struct hns3_adapter *hns)
5812{
5813        struct hns3_hw *hw = &hns->hw;
5814        int ret;
5815
5816        /*
5817         * The "hns3_do_stop" function will also be called by .stop_service to
5818         * prepare reset. At the time of global or IMP reset, the command cannot
5819         * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
5820         * accessed during the reset process. So the mbuf can not be released
5821         * during reset and is required to be released after the reset is
5822         * completed.
5823         */
5824        if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
5825                hns3_dev_release_mbufs(hns);
5826
5827        ret = hns3_cfg_mac_mode(hw, false);
5828        if (ret)
5829                return ret;
5830        hw->mac.link_status = ETH_LINK_DOWN;
5831
5832        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
5833                hns3_configure_all_mac_addr(hns, true);
5834                ret = hns3_reset_all_tqps(hns);
5835                if (ret) {
5836                        hns3_err(hw, "failed to reset all queues ret = %d.",
5837                                 ret);
5838                        return ret;
5839                }
5840        }
5841        hw->mac.default_addr_setted = false;
5842        return 0;
5843}
5844
5845static void
5846hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
5847{
5848        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5849        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5850        struct hns3_adapter *hns = dev->data->dev_private;
5851        struct hns3_hw *hw = &hns->hw;
5852        uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
5853        uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
5854        uint16_t q_id;
5855
5856        if (dev->data->dev_conf.intr_conf.rxq == 0)
5857                return;
5858
5859        /* unmap the ring with vector */
5860        if (rte_intr_allow_others(intr_handle)) {
5861                vec = RTE_INTR_VEC_RXTX_OFFSET;
5862                base = RTE_INTR_VEC_RXTX_OFFSET;
5863        }
5864        if (rte_intr_dp_is_en(intr_handle)) {
5865                for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
5866                        (void)hns3_bind_ring_with_vector(hw, vec, false,
5867                                                         HNS3_RING_TYPE_RX,
5868                                                         q_id);
5869                        if (vec < base + intr_handle->nb_efd - 1)
5870                                vec++;
5871                }
5872        }
5873        /* Clean datapath event and queue/vec mapping */
5874        rte_intr_efd_disable(intr_handle);
5875        if (intr_handle->intr_vec) {
5876                rte_free(intr_handle->intr_vec);
5877                intr_handle->intr_vec = NULL;
5878        }
5879}
5880
5881static int
5882hns3_dev_stop(struct rte_eth_dev *dev)
5883{
5884        struct hns3_adapter *hns = dev->data->dev_private;
5885        struct hns3_hw *hw = &hns->hw;
5886
5887        PMD_INIT_FUNC_TRACE();
5888        dev->data->dev_started = 0;
5889
5890        hw->adapter_state = HNS3_NIC_STOPPING;
5891        hns3_set_rxtx_function(dev);
5892        rte_wmb();
5893        /* Disable datapath on secondary process. */
5894        hns3_mp_req_stop_rxtx(dev);
5895        /* Prevent crashes when queues are still in use. */
5896        rte_delay_ms(hw->cfg_max_queues);
5897
5898        rte_spinlock_lock(&hw->lock);
5899        if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
5900                hns3_tm_dev_stop_proc(hw);
5901                hns3_config_mac_tnl_int(hw, false);
5902                hns3_stop_tqps(hw);
5903                hns3_do_stop(hns);
5904                hns3_unmap_rx_interrupt(dev);
5905                hw->adapter_state = HNS3_NIC_CONFIGURED;
5906        }
5907        hns3_rx_scattered_reset(dev);
5908        rte_eal_alarm_cancel(hns3_service_handler, dev);
5909        hns3_stop_report_lse(dev);
5910        rte_spinlock_unlock(&hw->lock);
5911
5912        return 0;
5913}
5914
5915static int
5916hns3_dev_close(struct rte_eth_dev *eth_dev)
5917{
5918        struct hns3_adapter *hns = eth_dev->data->dev_private;
5919        struct hns3_hw *hw = &hns->hw;
5920        int ret = 0;
5921
5922        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5923                return 0;
5924
5925        if (hw->adapter_state == HNS3_NIC_STARTED)
5926                ret = hns3_dev_stop(eth_dev);
5927
5928        hw->adapter_state = HNS3_NIC_CLOSING;
5929        hns3_reset_abort(hns);
5930        hw->adapter_state = HNS3_NIC_CLOSED;
5931
5932        hns3_configure_all_mc_mac_addr(hns, true);
5933        hns3_remove_all_vlan_table(hns);
5934        hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
5935        hns3_uninit_pf(eth_dev);
5936        hns3_free_all_queues(eth_dev);
5937        rte_free(hw->reset.wait_data);
5938        hns3_mp_uninit_primary();
5939        hns3_warn(hw, "Close port %u finished", hw->data->port_id);
5940
5941        return ret;
5942}
5943
5944static void
5945hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause,
5946                                   bool *tx_pause)
5947{
5948        struct hns3_mac *mac = &hw->mac;
5949        uint32_t advertising = mac->advertising;
5950        uint32_t lp_advertising = mac->lp_advertising;
5951        *rx_pause = false;
5952        *tx_pause = false;
5953
5954        if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) {
5955                *rx_pause = true;
5956                *tx_pause = true;
5957        } else if (advertising & lp_advertising &
5958                   HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) {
5959                if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5960                        *rx_pause = true;
5961                else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5962                        *tx_pause = true;
5963        }
5964}
5965
5966static enum hns3_fc_mode
5967hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
5968{
5969        enum hns3_fc_mode current_mode;
5970        bool rx_pause = false;
5971        bool tx_pause = false;
5972
5973        switch (hw->mac.media_type) {
5974        case HNS3_MEDIA_TYPE_COPPER:
5975                hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause);
5976                break;
5977
5978        /*
5979         * Flow control auto-negotiation is not supported for fiber and
5980         * backpalne media type.
5981         */
5982        case HNS3_MEDIA_TYPE_FIBER:
5983        case HNS3_MEDIA_TYPE_BACKPLANE:
5984                hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled.");
5985                current_mode = hw->requested_fc_mode;
5986                goto out;
5987        default:
5988                hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).",
5989                         hw->mac.media_type);
5990                current_mode = HNS3_FC_NONE;
5991                goto out;
5992        }
5993
5994        if (rx_pause && tx_pause)
5995                current_mode = HNS3_FC_FULL;
5996        else if (rx_pause)
5997                current_mode = HNS3_FC_RX_PAUSE;
5998        else if (tx_pause)
5999                current_mode = HNS3_FC_TX_PAUSE;
6000        else
6001                current_mode = HNS3_FC_NONE;
6002
6003out:
6004        return current_mode;
6005}
6006
6007static enum hns3_fc_mode
6008hns3_get_current_fc_mode(struct rte_eth_dev *dev)
6009{
6010        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6011        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6012        struct hns3_mac *mac = &hw->mac;
6013
6014        /*
6015         * When the flow control mode is obtained, the device may not complete
6016         * auto-negotiation. It is necessary to wait for link establishment.
6017         */
6018        (void)hns3_dev_link_update(dev, 1);
6019
6020        /*
6021         * If the link auto-negotiation of the nic is disabled, or the flow
6022         * control auto-negotiation is not supported, the forced flow control
6023         * mode is used.
6024         */
6025        if (mac->link_autoneg == 0 || !pf->support_fc_autoneg)
6026                return hw->requested_fc_mode;
6027
6028        return hns3_get_autoneg_fc_mode(hw);
6029}
6030
6031static int
6032hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
6033{
6034        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6035        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6036        enum hns3_fc_mode current_mode;
6037
6038        current_mode = hns3_get_current_fc_mode(dev);
6039        switch (current_mode) {
6040        case HNS3_FC_FULL:
6041                fc_conf->mode = RTE_FC_FULL;
6042                break;
6043        case HNS3_FC_TX_PAUSE:
6044                fc_conf->mode = RTE_FC_TX_PAUSE;
6045                break;
6046        case HNS3_FC_RX_PAUSE:
6047                fc_conf->mode = RTE_FC_RX_PAUSE;
6048                break;
6049        case HNS3_FC_NONE:
6050        default:
6051                fc_conf->mode = RTE_FC_NONE;
6052                break;
6053        }
6054
6055        fc_conf->pause_time = pf->pause_time;
6056        fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0;
6057
6058        return 0;
6059}
6060
6061static int
6062hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
6063{
6064        struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
6065
6066        if (!pf->support_fc_autoneg) {
6067                if (autoneg != 0) {
6068                        hns3_err(hw, "unsupported fc auto-negotiation setting.");
6069                        return -EOPNOTSUPP;
6070                }
6071
6072                /*
6073                 * Flow control auto-negotiation of the NIC is not supported,
6074                 * but other auto-negotiation features may be supported.
6075                 */
6076                if (autoneg != hw->mac.link_autoneg) {
6077                        hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!");
6078                        return -EOPNOTSUPP;
6079                }
6080
6081                return 0;
6082        }
6083
6084        /*
6085         * If flow control auto-negotiation of the NIC is supported, all
6086         * auto-negotiation features are supported.
6087         */
6088        if (autoneg != hw->mac.link_autoneg) {
6089                hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!");
6090                return -EOPNOTSUPP;
6091        }
6092
6093        return 0;
6094}
6095
6096static int
6097hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
6098{
6099        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6100        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6101        int ret;
6102
6103        if (fc_conf->high_water || fc_conf->low_water ||
6104            fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
6105                hns3_err(hw, "Unsupported flow control settings specified, "
6106                         "high_water(%u), low_water(%u), send_xon(%u) and "
6107                         "mac_ctrl_frame_fwd(%u) must be set to '0'",
6108                         fc_conf->high_water, fc_conf->low_water,
6109                         fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
6110                return -EINVAL;
6111        }
6112
6113        ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg);
6114        if (ret)
6115                return ret;
6116
6117        if (!fc_conf->pause_time) {
6118                hns3_err(hw, "Invalid pause time %u setting.",
6119                         fc_conf->pause_time);
6120                return -EINVAL;
6121        }
6122
6123        if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
6124            hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
6125                hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
6126                         "current_fc_status = %d", hw->current_fc_status);
6127                return -EOPNOTSUPP;
6128        }
6129
6130        if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
6131                hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
6132                return -EOPNOTSUPP;
6133        }
6134
6135        rte_spinlock_lock(&hw->lock);
6136        ret = hns3_fc_enable(dev, fc_conf);
6137        rte_spinlock_unlock(&hw->lock);
6138
6139        return ret;
6140}
6141
6142static int
6143hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
6144                            struct rte_eth_pfc_conf *pfc_conf)
6145{
6146        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6147        int ret;
6148
6149        if (!hns3_dev_dcb_supported(hw)) {
6150                hns3_err(hw, "This port does not support dcb configurations.");
6151                return -EOPNOTSUPP;
6152        }
6153
6154        if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
6155            pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
6156                hns3_err(hw, "Unsupported flow control settings specified, "
6157                         "high_water(%u), low_water(%u), send_xon(%u) and "
6158                         "mac_ctrl_frame_fwd(%u) must be set to '0'",
6159                         pfc_conf->fc.high_water, pfc_conf->fc.low_water,
6160                         pfc_conf->fc.send_xon,
6161                         pfc_conf->fc.mac_ctrl_frame_fwd);
6162                return -EINVAL;
6163        }
6164        if (pfc_conf->fc.autoneg) {
6165                hns3_err(hw, "Unsupported fc auto-negotiation setting.");
6166                return -EINVAL;
6167        }
6168        if (pfc_conf->fc.pause_time == 0) {
6169                hns3_err(hw, "Invalid pause time %u setting.",
6170                         pfc_conf->fc.pause_time);
6171                return -EINVAL;
6172        }
6173
6174        if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
6175            hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
6176                hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
6177                             "current_fc_status = %d", hw->current_fc_status);
6178                return -EOPNOTSUPP;
6179        }
6180
6181        rte_spinlock_lock(&hw->lock);
6182        ret = hns3_dcb_pfc_enable(dev, pfc_conf);
6183        rte_spinlock_unlock(&hw->lock);
6184
6185        return ret;
6186}
6187
6188static int
6189hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
6190{
6191        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6192        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6193        enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
6194        int i;
6195
6196        rte_spinlock_lock(&hw->lock);
6197        if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
6198                dcb_info->nb_tcs = pf->local_max_tc;
6199        else
6200                dcb_info->nb_tcs = 1;
6201
6202        for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
6203                dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
6204        for (i = 0; i < dcb_info->nb_tcs; i++)
6205                dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
6206
6207        for (i = 0; i < hw->num_tc; i++) {
6208                dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
6209                dcb_info->tc_queue.tc_txq[0][i].base =
6210                                                hw->tc_queue[i].tqp_offset;
6211                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
6212                dcb_info->tc_queue.tc_txq[0][i].nb_queue =
6213                                                hw->tc_queue[i].tqp_count;
6214        }
6215        rte_spinlock_unlock(&hw->lock);
6216
6217        return 0;
6218}
6219
6220static int
6221hns3_reinit_dev(struct hns3_adapter *hns)
6222{
6223        struct hns3_hw *hw = &hns->hw;
6224        int ret;
6225
6226        ret = hns3_cmd_init(hw);
6227        if (ret) {
6228                hns3_err(hw, "Failed to init cmd: %d", ret);
6229                return ret;
6230        }
6231
6232        ret = hns3_reset_all_tqps(hns);
6233        if (ret) {
6234                hns3_err(hw, "Failed to reset all queues: %d", ret);
6235                return ret;
6236        }
6237
6238        ret = hns3_init_hardware(hns);
6239        if (ret) {
6240                hns3_err(hw, "Failed to init hardware: %d", ret);
6241                return ret;
6242        }
6243
6244        ret = hns3_enable_hw_error_intr(hns, true);
6245        if (ret) {
6246                hns3_err(hw, "fail to enable hw error interrupts: %d",
6247                             ret);
6248                return ret;
6249        }
6250        hns3_info(hw, "Reset done, driver initialization finished.");
6251
6252        return 0;
6253}
6254
6255static bool
6256is_pf_reset_done(struct hns3_hw *hw)
6257{
6258        uint32_t val, reg, reg_bit;
6259
6260        switch (hw->reset.level) {
6261        case HNS3_IMP_RESET:
6262                reg = HNS3_GLOBAL_RESET_REG;
6263                reg_bit = HNS3_IMP_RESET_BIT;
6264                break;
6265        case HNS3_GLOBAL_RESET:
6266                reg = HNS3_GLOBAL_RESET_REG;
6267                reg_bit = HNS3_GLOBAL_RESET_BIT;
6268                break;
6269        case HNS3_FUNC_RESET:
6270                reg = HNS3_FUN_RST_ING;
6271                reg_bit = HNS3_FUN_RST_ING_B;
6272                break;
6273        case HNS3_FLR_RESET:
6274        default:
6275                hns3_err(hw, "Wait for unsupported reset level: %d",
6276                         hw->reset.level);
6277                return true;
6278        }
6279        val = hns3_read_dev(hw, reg);
6280        if (hns3_get_bit(val, reg_bit))
6281                return false;
6282        else
6283                return true;
6284}
6285
6286bool
6287hns3_is_reset_pending(struct hns3_adapter *hns)
6288{
6289        struct hns3_hw *hw = &hns->hw;
6290        enum hns3_reset_level reset;
6291
6292        hns3_check_event_cause(hns, NULL);
6293        reset = hns3_get_reset_level(hns, &hw->reset.pending);
6294        if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
6295            hw->reset.level < reset) {
6296                hns3_warn(hw, "High level reset %d is pending", reset);
6297                return true;
6298        }
6299        reset = hns3_get_reset_level(hns, &hw->reset.request);
6300        if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
6301            hw->reset.level < reset) {
6302                hns3_warn(hw, "High level reset %d is request", reset);
6303                return true;
6304        }
6305        return false;
6306}
6307
6308static int
6309hns3_wait_hardware_ready(struct hns3_adapter *hns)
6310{
6311        struct hns3_hw *hw = &hns->hw;
6312        struct hns3_wait_data *wait_data = hw->reset.wait_data;
6313        struct timeval tv;
6314
6315        if (wait_data->result == HNS3_WAIT_SUCCESS)
6316                return 0;
6317        else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
6318                hns3_clock_gettime(&tv);
6319                hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
6320                          tv.tv_sec, tv.tv_usec);
6321                return -ETIME;
6322        } else if (wait_data->result == HNS3_WAIT_REQUEST)
6323                return -EAGAIN;
6324
6325        wait_data->hns = hns;
6326        wait_data->check_completion = is_pf_reset_done;
6327        wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
6328                                HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms();
6329        wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
6330        wait_data->count = HNS3_RESET_WAIT_CNT;
6331        wait_data->result = HNS3_WAIT_REQUEST;
6332        rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
6333        return -EAGAIN;
6334}
6335
6336static int
6337hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
6338{
6339        struct hns3_cmd_desc desc;
6340        struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
6341
6342        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
6343        hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
6344        req->fun_reset_vfid = func_id;
6345
6346        return hns3_cmd_send(hw, &desc, 1);
6347}
6348
6349static int
6350hns3_imp_reset_cmd(struct hns3_hw *hw)
6351{
6352        struct hns3_cmd_desc desc;
6353
6354        hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
6355        desc.data[0] = 0xeedd;
6356
6357        return hns3_cmd_send(hw, &desc, 1);
6358}
6359
6360static void
6361hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
6362{
6363        struct hns3_hw *hw = &hns->hw;
6364        struct timeval tv;
6365        uint32_t val;
6366
6367        hns3_clock_gettime(&tv);
6368        if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
6369            hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
6370                hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
6371                          tv.tv_sec, tv.tv_usec);
6372                return;
6373        }
6374
6375        switch (reset_level) {
6376        case HNS3_IMP_RESET:
6377                hns3_imp_reset_cmd(hw);
6378                hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
6379                          tv.tv_sec, tv.tv_usec);
6380                break;
6381        case HNS3_GLOBAL_RESET:
6382                val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
6383                hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
6384                hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
6385                hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
6386                          tv.tv_sec, tv.tv_usec);
6387                break;
6388        case HNS3_FUNC_RESET:
6389                hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
6390                          tv.tv_sec, tv.tv_usec);
6391                /* schedule again to check later */
6392                hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
6393                hns3_schedule_reset(hns);
6394                break;
6395        default:
6396                hns3_warn(hw, "Unsupported reset level: %d", reset_level);
6397                return;
6398        }
6399        hns3_atomic_clear_bit(reset_level, &hw->reset.request);
6400}
6401
6402static enum hns3_reset_level
6403hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
6404{
6405        struct hns3_hw *hw = &hns->hw;
6406        enum hns3_reset_level reset_level = HNS3_NONE_RESET;
6407
6408        /* Return the highest priority reset level amongst all */
6409        if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
6410                reset_level = HNS3_IMP_RESET;
6411        else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
6412                reset_level = HNS3_GLOBAL_RESET;
6413        else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
6414                reset_level = HNS3_FUNC_RESET;
6415        else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
6416                reset_level = HNS3_FLR_RESET;
6417
6418        if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
6419                return HNS3_NONE_RESET;
6420
6421        return reset_level;
6422}
6423
6424static void
6425hns3_record_imp_error(struct hns3_adapter *hns)
6426{
6427        struct hns3_hw *hw = &hns->hw;
6428        uint32_t reg_val;
6429
6430        reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
6431        if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
6432                hns3_warn(hw, "Detected IMP RD poison!");
6433                hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
6434                hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
6435        }
6436
6437        if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
6438                hns3_warn(hw, "Detected IMP CMDQ error!");
6439                hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
6440                hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
6441        }
6442}
6443
6444static int
6445hns3_prepare_reset(struct hns3_adapter *hns)
6446{
6447        struct hns3_hw *hw = &hns->hw;
6448        uint32_t reg_val;
6449        int ret;
6450
6451        switch (hw->reset.level) {
6452        case HNS3_FUNC_RESET:
6453                ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
6454                if (ret)
6455                        return ret;
6456
6457                /*
6458                 * After performaning pf reset, it is not necessary to do the
6459                 * mailbox handling or send any command to firmware, because
6460                 * any mailbox handling or command to firmware is only valid
6461                 * after hns3_cmd_init is called.
6462                 */
6463                __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
6464                hw->reset.stats.request_cnt++;
6465                break;
6466        case HNS3_IMP_RESET:
6467                hns3_record_imp_error(hns);
6468                reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
6469                hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
6470                               BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
6471                break;
6472        default:
6473                break;
6474        }
6475        return 0;
6476}
6477
6478static int
6479hns3_set_rst_done(struct hns3_hw *hw)
6480{
6481        struct hns3_pf_rst_done_cmd *req;
6482        struct hns3_cmd_desc desc;
6483
6484        req = (struct hns3_pf_rst_done_cmd *)desc.data;
6485        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
6486        req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
6487        return hns3_cmd_send(hw, &desc, 1);
6488}
6489
6490static int
6491hns3_stop_service(struct hns3_adapter *hns)
6492{
6493        struct hns3_hw *hw = &hns->hw;
6494        struct rte_eth_dev *eth_dev;
6495
6496        eth_dev = &rte_eth_devices[hw->data->port_id];
6497        hw->mac.link_status = ETH_LINK_DOWN;
6498        if (hw->adapter_state == HNS3_NIC_STARTED) {
6499                rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
6500                hns3_update_linkstatus_and_event(hw, false);
6501        }
6502
6503        hns3_set_rxtx_function(eth_dev);
6504        rte_wmb();
6505        /* Disable datapath on secondary process. */
6506        hns3_mp_req_stop_rxtx(eth_dev);
6507        rte_delay_ms(hw->cfg_max_queues);
6508
6509        rte_spinlock_lock(&hw->lock);
6510        if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
6511            hw->adapter_state == HNS3_NIC_STOPPING) {
6512                hns3_enable_all_queues(hw, false);
6513                hns3_do_stop(hns);
6514                hw->reset.mbuf_deferred_free = true;
6515        } else
6516                hw->reset.mbuf_deferred_free = false;
6517
6518        /*
6519         * It is cumbersome for hardware to pick-and-choose entries for deletion
6520         * from table space. Hence, for function reset software intervention is
6521         * required to delete the entries
6522         */
6523        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
6524                hns3_configure_all_mc_mac_addr(hns, true);
6525        rte_spinlock_unlock(&hw->lock);
6526
6527        return 0;
6528}
6529
6530static int
6531hns3_start_service(struct hns3_adapter *hns)
6532{
6533        struct hns3_hw *hw = &hns->hw;
6534        struct rte_eth_dev *eth_dev;
6535
6536        if (hw->reset.level == HNS3_IMP_RESET ||
6537            hw->reset.level == HNS3_GLOBAL_RESET)
6538                hns3_set_rst_done(hw);
6539        eth_dev = &rte_eth_devices[hw->data->port_id];
6540        hns3_set_rxtx_function(eth_dev);
6541        hns3_mp_req_start_rxtx(eth_dev);
6542        if (hw->adapter_state == HNS3_NIC_STARTED) {
6543                /*
6544                 * This API parent function already hold the hns3_hw.lock, the
6545                 * hns3_service_handler may report lse, in bonding application
6546                 * it will call driver's ops which may acquire the hns3_hw.lock
6547                 * again, thus lead to deadlock.
6548                 * We defer calls hns3_service_handler to avoid the deadlock.
6549                 */
6550                rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
6551                                  hns3_service_handler, eth_dev);
6552
6553                /* Enable interrupt of all rx queues before enabling queues */
6554                hns3_dev_all_rx_queue_intr_enable(hw, true);
6555                /*
6556                 * Enable state of each rxq and txq will be recovered after
6557                 * reset, so we need to restore them before enable all tqps;
6558                 */
6559                hns3_restore_tqp_enable_state(hw);
6560                /*
6561                 * When finished the initialization, enable queues to receive
6562                 * and transmit packets.
6563                 */
6564                hns3_enable_all_queues(hw, true);
6565        }
6566
6567        return 0;
6568}
6569
6570static int
6571hns3_restore_conf(struct hns3_adapter *hns)
6572{
6573        struct hns3_hw *hw = &hns->hw;
6574        int ret;
6575
6576        ret = hns3_configure_all_mac_addr(hns, false);
6577        if (ret)
6578                return ret;
6579
6580        ret = hns3_configure_all_mc_mac_addr(hns, false);
6581        if (ret)
6582                goto err_mc_mac;
6583
6584        ret = hns3_dev_promisc_restore(hns);
6585        if (ret)
6586                goto err_promisc;
6587
6588        ret = hns3_restore_vlan_table(hns);
6589        if (ret)
6590                goto err_promisc;
6591
6592        ret = hns3_restore_vlan_conf(hns);
6593        if (ret)
6594                goto err_promisc;
6595
6596        ret = hns3_restore_all_fdir_filter(hns);
6597        if (ret)
6598                goto err_promisc;
6599
6600        ret = hns3_restore_ptp(hns);
6601        if (ret)
6602                goto err_promisc;
6603
6604        ret = hns3_restore_rx_interrupt(hw);
6605        if (ret)
6606                goto err_promisc;
6607
6608        ret = hns3_restore_gro_conf(hw);
6609        if (ret)
6610                goto err_promisc;
6611
6612        ret = hns3_restore_fec(hw);
6613        if (ret)
6614                goto err_promisc;
6615
6616        if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
6617                ret = hns3_do_start(hns, false);
6618                if (ret)
6619                        goto err_promisc;
6620                hns3_info(hw, "hns3 dev restart successful!");
6621        } else if (hw->adapter_state == HNS3_NIC_STOPPING)
6622                hw->adapter_state = HNS3_NIC_CONFIGURED;
6623        return 0;
6624
6625err_promisc:
6626        hns3_configure_all_mc_mac_addr(hns, true);
6627err_mc_mac:
6628        hns3_configure_all_mac_addr(hns, true);
6629        return ret;
6630}
6631
6632static void
6633hns3_reset_service(void *param)
6634{
6635        struct hns3_adapter *hns = (struct hns3_adapter *)param;
6636        struct hns3_hw *hw = &hns->hw;
6637        enum hns3_reset_level reset_level;
6638        struct timeval tv_delta;
6639        struct timeval tv_start;
6640        struct timeval tv;
6641        uint64_t msec;
6642        int ret;
6643
6644        /*
6645         * The interrupt is not triggered within the delay time.
6646         * The interrupt may have been lost. It is necessary to handle
6647         * the interrupt to recover from the error.
6648         */
6649        if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
6650                            SCHEDULE_DEFERRED) {
6651                __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
6652                                  __ATOMIC_RELAXED);
6653                hns3_err(hw, "Handling interrupts in delayed tasks");
6654                hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
6655                reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6656                if (reset_level == HNS3_NONE_RESET) {
6657                        hns3_err(hw, "No reset level is set, try IMP reset");
6658                        hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
6659                }
6660        }
6661        __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
6662
6663        /*
6664         * Check if there is any ongoing reset in the hardware. This status can
6665         * be checked from reset_pending. If there is then, we need to wait for
6666         * hardware to complete reset.
6667         *    a. If we are able to figure out in reasonable time that hardware
6668         *       has fully resetted then, we can proceed with driver, client
6669         *       reset.
6670         *    b. else, we can come back later to check this status so re-sched
6671         *       now.
6672         */
6673        reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6674        if (reset_level != HNS3_NONE_RESET) {
6675                hns3_clock_gettime(&tv_start);
6676                ret = hns3_reset_process(hns, reset_level);
6677                hns3_clock_gettime(&tv);
6678                timersub(&tv, &tv_start, &tv_delta);
6679                msec = hns3_clock_calctime_ms(&tv_delta);
6680                if (msec > HNS3_RESET_PROCESS_MS)
6681                        hns3_err(hw, "%d handle long time delta %" PRIu64
6682                                     " ms time=%ld.%.6ld",
6683                                 hw->reset.level, msec,
6684                                 tv.tv_sec, tv.tv_usec);
6685                if (ret == -EAGAIN)
6686                        return;
6687        }
6688
6689        /* Check if we got any *new* reset requests to be honored */
6690        reset_level = hns3_get_reset_level(hns, &hw->reset.request);
6691        if (reset_level != HNS3_NONE_RESET)
6692                hns3_msix_process(hns, reset_level);
6693}
6694
6695static unsigned int
6696hns3_get_speed_capa_num(uint16_t device_id)
6697{
6698        unsigned int num;
6699
6700        switch (device_id) {
6701        case HNS3_DEV_ID_25GE:
6702        case HNS3_DEV_ID_25GE_RDMA:
6703                num = 2;
6704                break;
6705        case HNS3_DEV_ID_100G_RDMA_MACSEC:
6706        case HNS3_DEV_ID_200G_RDMA:
6707                num = 1;
6708                break;
6709        default:
6710                num = 0;
6711                break;
6712        }
6713
6714        return num;
6715}
6716
6717static int
6718hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
6719                        uint16_t device_id)
6720{
6721        switch (device_id) {
6722        case HNS3_DEV_ID_25GE:
6723        /* fallthrough */
6724        case HNS3_DEV_ID_25GE_RDMA:
6725                speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
6726                speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
6727
6728                /* In HNS3 device, the 25G NIC is compatible with 10G rate */
6729                speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
6730                speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
6731                break;
6732        case HNS3_DEV_ID_100G_RDMA_MACSEC:
6733                speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
6734                speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
6735                break;
6736        case HNS3_DEV_ID_200G_RDMA:
6737                speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
6738                speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
6739                break;
6740        default:
6741                return -ENOTSUP;
6742        }
6743
6744        return 0;
6745}
6746
6747static int
6748hns3_fec_get_capability(struct rte_eth_dev *dev,
6749                        struct rte_eth_fec_capa *speed_fec_capa,
6750                        unsigned int num)
6751{
6752        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6753        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6754        uint16_t device_id = pci_dev->id.device_id;
6755        unsigned int capa_num;
6756        int ret;
6757
6758        capa_num = hns3_get_speed_capa_num(device_id);
6759        if (capa_num == 0) {
6760                hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
6761                         device_id);
6762                return -ENOTSUP;
6763        }
6764
6765        if (speed_fec_capa == NULL || num < capa_num)
6766                return capa_num;
6767
6768        ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
6769        if (ret)
6770                return -ENOTSUP;
6771
6772        return capa_num;
6773}
6774
6775static int
6776get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
6777{
6778        struct hns3_config_fec_cmd *req;
6779        struct hns3_cmd_desc desc;
6780        int ret;
6781
6782        /*
6783         * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
6784         * in device of link speed
6785         * below 10 Gbps.
6786         */
6787        if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
6788                *state = 0;
6789                return 0;
6790        }
6791
6792        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
6793        req = (struct hns3_config_fec_cmd *)desc.data;
6794        ret = hns3_cmd_send(hw, &desc, 1);
6795        if (ret) {
6796                hns3_err(hw, "get current fec auto state failed, ret = %d",
6797                         ret);
6798                return ret;
6799        }
6800
6801        *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
6802        return 0;
6803}
6804
6805static int
6806hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
6807{
6808        struct hns3_sfp_info_cmd *resp;
6809        uint32_t tmp_fec_capa;
6810        uint8_t auto_state;
6811        struct hns3_cmd_desc desc;
6812        int ret;
6813
6814        /*
6815         * If link is down and AUTO is enabled, AUTO is returned, otherwise,
6816         * configured FEC mode is returned.
6817         * If link is up, current FEC mode is returned.
6818         */
6819        if (hw->mac.link_status == ETH_LINK_DOWN) {
6820                ret = get_current_fec_auto_state(hw, &auto_state);
6821                if (ret)
6822                        return ret;
6823
6824                if (auto_state == 0x1) {
6825                        *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
6826                        return 0;
6827                }
6828        }
6829
6830        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
6831        resp = (struct hns3_sfp_info_cmd *)desc.data;
6832        resp->query_type = HNS3_ACTIVE_QUERY;
6833
6834        ret = hns3_cmd_send(hw, &desc, 1);
6835        if (ret == -EOPNOTSUPP) {
6836                hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
6837                return ret;
6838        } else if (ret) {
6839                hns3_err(hw, "get FEC failed, ret = %d", ret);
6840                return ret;
6841        }
6842
6843        /*
6844         * FEC mode order defined in hns3 hardware is inconsistend with
6845         * that defined in the ethdev library. So the sequence needs
6846         * to be converted.
6847         */
6848        switch (resp->active_fec) {
6849        case HNS3_HW_FEC_MODE_NOFEC:
6850                tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6851                break;
6852        case HNS3_HW_FEC_MODE_BASER:
6853                tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
6854                break;
6855        case HNS3_HW_FEC_MODE_RS:
6856                tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
6857                break;
6858        default:
6859                tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6860                break;
6861        }
6862
6863        *fec_capa = tmp_fec_capa;
6864        return 0;
6865}
6866
6867static int
6868hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
6869{
6870        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6871
6872        return hns3_fec_get_internal(hw, fec_capa);
6873}
6874
6875static int
6876hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
6877{
6878        struct hns3_config_fec_cmd *req;
6879        struct hns3_cmd_desc desc;
6880        int ret;
6881
6882        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
6883
6884        req = (struct hns3_config_fec_cmd *)desc.data;
6885        switch (mode) {
6886        case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
6887                hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6888                                HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
6889                break;
6890        case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
6891                hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6892                                HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
6893                break;
6894        case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
6895                hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6896                                HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
6897                break;
6898        case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
6899                hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
6900                break;
6901        default:
6902                return 0;
6903        }
6904        ret = hns3_cmd_send(hw, &desc, 1);
6905        if (ret)
6906                hns3_err(hw, "set fec mode failed, ret = %d", ret);
6907
6908        return ret;
6909}
6910
6911static uint32_t
6912get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
6913{
6914        struct hns3_mac *mac = &hw->mac;
6915        uint32_t cur_capa;
6916
6917        switch (mac->link_speed) {
6918        case ETH_SPEED_NUM_10G:
6919                cur_capa = fec_capa[1].capa;
6920                break;
6921        case ETH_SPEED_NUM_25G:
6922        case ETH_SPEED_NUM_100G:
6923        case ETH_SPEED_NUM_200G:
6924                cur_capa = fec_capa[0].capa;
6925                break;
6926        default:
6927                cur_capa = 0;
6928                break;
6929        }
6930
6931        return cur_capa;
6932}
6933
6934static bool
6935is_fec_mode_one_bit_set(uint32_t mode)
6936{
6937        int cnt = 0;
6938        uint8_t i;
6939
6940        for (i = 0; i < sizeof(mode); i++)
6941                if (mode >> i & 0x1)
6942                        cnt++;
6943
6944        return cnt == 1 ? true : false;
6945}
6946
6947static int
6948hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
6949{
6950#define FEC_CAPA_NUM 2
6951        struct hns3_adapter *hns = dev->data->dev_private;
6952        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6953        struct hns3_pf *pf = &hns->pf;
6954
6955        struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
6956        uint32_t cur_capa;
6957        uint32_t num = FEC_CAPA_NUM;
6958        int ret;
6959
6960        ret = hns3_fec_get_capability(dev, fec_capa, num);
6961        if (ret < 0)
6962                return ret;
6963
6964        /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
6965        if (!is_fec_mode_one_bit_set(mode)) {
6966                hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
6967                             "FEC mode should be only one bit set", mode);
6968                return -EINVAL;
6969        }
6970
6971        /*
6972         * Check whether the configured mode is within the FEC capability.
6973         * If not, the configured mode will not be supported.
6974         */
6975        cur_capa = get_current_speed_fec_cap(hw, fec_capa);
6976        if (!(cur_capa & mode)) {
6977                hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
6978                return -EINVAL;
6979        }
6980
6981        rte_spinlock_lock(&hw->lock);
6982        ret = hns3_set_fec_hw(hw, mode);
6983        if (ret) {
6984                rte_spinlock_unlock(&hw->lock);
6985                return ret;
6986        }
6987
6988        pf->fec_mode = mode;
6989        rte_spinlock_unlock(&hw->lock);
6990
6991        return 0;
6992}
6993
6994static int
6995hns3_restore_fec(struct hns3_hw *hw)
6996{
6997        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6998        struct hns3_pf *pf = &hns->pf;
6999        uint32_t mode = pf->fec_mode;
7000        int ret;
7001
7002        ret = hns3_set_fec_hw(hw, mode);
7003        if (ret)
7004                hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
7005                         mode, ret);
7006
7007        return ret;
7008}
7009
7010static int
7011hns3_query_dev_fec_info(struct hns3_hw *hw)
7012{
7013        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
7014        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
7015        int ret;
7016
7017        ret = hns3_fec_get_internal(hw, &pf->fec_mode);
7018        if (ret)
7019                hns3_err(hw, "query device FEC info failed, ret = %d", ret);
7020
7021        return ret;
7022}
7023
7024static bool
7025hns3_optical_module_existed(struct hns3_hw *hw)
7026{
7027        struct hns3_cmd_desc desc;
7028        bool existed;
7029        int ret;
7030
7031        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
7032        ret = hns3_cmd_send(hw, &desc, 1);
7033        if (ret) {
7034                hns3_err(hw,
7035                         "fail to get optical module exist state, ret = %d.\n",
7036                         ret);
7037                return false;
7038        }
7039        existed = !!desc.data[0];
7040
7041        return existed;
7042}
7043
7044static int
7045hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
7046                                uint32_t len, uint8_t *data)
7047{
7048#define HNS3_SFP_INFO_CMD_NUM 6
7049#define HNS3_SFP_INFO_MAX_LEN \
7050        (HNS3_SFP_INFO_BD0_LEN + \
7051        (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
7052        struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
7053        struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
7054        uint16_t read_len;
7055        uint16_t copy_len;
7056        int ret;
7057        int i;
7058
7059        for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
7060                hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
7061                                          true);
7062                if (i < HNS3_SFP_INFO_CMD_NUM - 1)
7063                        desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
7064        }
7065
7066        sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
7067        sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
7068        read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
7069        sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
7070
7071        ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
7072        if (ret) {
7073                hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
7074                                ret);
7075                return ret;
7076        }
7077
7078        /* The data format in BD0 is different with the others. */
7079        copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
7080        memcpy(data, sfp_info_bd0->data, copy_len);
7081        read_len = copy_len;
7082
7083        for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
7084                if (read_len >= len)
7085                        break;
7086
7087                copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
7088                memcpy(data + read_len, desc[i].data, copy_len);
7089                read_len += copy_len;
7090        }
7091
7092        return (int)read_len;
7093}
7094
7095static int
7096hns3_get_module_eeprom(struct rte_eth_dev *dev,
7097                       struct rte_dev_eeprom_info *info)
7098{
7099        struct hns3_adapter *hns = dev->data->dev_private;
7100        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
7101        uint32_t offset = info->offset;
7102        uint32_t len = info->length;
7103        uint8_t *data = info->data;
7104        uint32_t read_len = 0;
7105
7106        if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
7107                return -ENOTSUP;
7108
7109        if (!hns3_optical_module_existed(hw)) {
7110                hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
7111                return -EIO;
7112        }
7113
7114        while (read_len < len) {
7115                int ret;
7116                ret = hns3_get_module_eeprom_data(hw, offset + read_len,
7117                                                  len - read_len,
7118                                                  data + read_len);
7119                if (ret < 0)
7120                        return -EIO;
7121                read_len += ret;
7122        }
7123
7124        return 0;
7125}
7126
7127static int
7128hns3_get_module_info(struct rte_eth_dev *dev,
7129                     struct rte_eth_dev_module_info *modinfo)
7130{
7131#define HNS3_SFF8024_ID_SFP             0x03
7132#define HNS3_SFF8024_ID_QSFP_8438       0x0c
7133#define HNS3_SFF8024_ID_QSFP_8436_8636  0x0d
7134#define HNS3_SFF8024_ID_QSFP28_8636     0x11
7135#define HNS3_SFF_8636_V1_3              0x03
7136        struct hns3_adapter *hns = dev->data->dev_private;
7137        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
7138        struct rte_dev_eeprom_info info;
7139        struct hns3_sfp_type sfp_type;
7140        int ret;
7141
7142        memset(&sfp_type, 0, sizeof(sfp_type));
7143        memset(&info, 0, sizeof(info));
7144        info.data = (uint8_t *)&sfp_type;
7145        info.length = sizeof(sfp_type);
7146        ret = hns3_get_module_eeprom(dev, &info);
7147        if (ret)
7148                return ret;
7149
7150        switch (sfp_type.type) {
7151        case HNS3_SFF8024_ID_SFP:
7152                modinfo->type = RTE_ETH_MODULE_SFF_8472;
7153                modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
7154                break;
7155        case HNS3_SFF8024_ID_QSFP_8438:
7156                modinfo->type = RTE_ETH_MODULE_SFF_8436;
7157                modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
7158                break;
7159        case HNS3_SFF8024_ID_QSFP_8436_8636:
7160                if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
7161                        modinfo->type = RTE_ETH_MODULE_SFF_8436;
7162                        modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
7163                } else {
7164                        modinfo->type = RTE_ETH_MODULE_SFF_8636;
7165                        modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
7166                }
7167                break;
7168        case HNS3_SFF8024_ID_QSFP28_8636:
7169                modinfo->type = RTE_ETH_MODULE_SFF_8636;
7170                modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
7171                break;
7172        default:
7173                hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
7174                         sfp_type.type, sfp_type.ext_type);
7175                return -EINVAL;
7176        }
7177
7178        return 0;
7179}
7180
7181void
7182hns3_clock_gettime(struct timeval *tv)
7183{
7184#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
7185#define CLOCK_TYPE CLOCK_MONOTONIC_RAW
7186#else
7187#define CLOCK_TYPE CLOCK_MONOTONIC
7188#endif
7189#define NSEC_TO_USEC_DIV 1000
7190
7191        struct timespec spec;
7192        (void)clock_gettime(CLOCK_TYPE, &spec);
7193
7194        tv->tv_sec = spec.tv_sec;
7195        tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
7196}
7197
7198uint64_t
7199hns3_clock_calctime_ms(struct timeval *tv)
7200{
7201        return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
7202                tv->tv_usec / USEC_PER_MSEC;
7203}
7204
7205uint64_t
7206hns3_clock_gettime_ms(void)
7207{
7208        struct timeval tv;
7209
7210        hns3_clock_gettime(&tv);
7211        return hns3_clock_calctime_ms(&tv);
7212}
7213
7214static int
7215hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
7216{
7217        uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
7218
7219        RTE_SET_USED(key);
7220
7221        if (strcmp(value, "vec") == 0)
7222                hint = HNS3_IO_FUNC_HINT_VEC;
7223        else if (strcmp(value, "sve") == 0)
7224                hint = HNS3_IO_FUNC_HINT_SVE;
7225        else if (strcmp(value, "simple") == 0)
7226                hint = HNS3_IO_FUNC_HINT_SIMPLE;
7227        else if (strcmp(value, "common") == 0)
7228                hint = HNS3_IO_FUNC_HINT_COMMON;
7229
7230        /* If the hint is valid then update output parameters */
7231        if (hint != HNS3_IO_FUNC_HINT_NONE)
7232                *(uint32_t *)extra_args = hint;
7233
7234        return 0;
7235}
7236
7237static const char *
7238hns3_get_io_hint_func_name(uint32_t hint)
7239{
7240        switch (hint) {
7241        case HNS3_IO_FUNC_HINT_VEC:
7242                return "vec";
7243        case HNS3_IO_FUNC_HINT_SVE:
7244                return "sve";
7245        case HNS3_IO_FUNC_HINT_SIMPLE:
7246                return "simple";
7247        case HNS3_IO_FUNC_HINT_COMMON:
7248                return "common";
7249        default:
7250                return "none";
7251        }
7252}
7253
7254static int
7255hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
7256{
7257        uint64_t val;
7258
7259        RTE_SET_USED(key);
7260
7261        val = strtoull(value, NULL, 16);
7262        *(uint64_t *)extra_args = val;
7263
7264        return 0;
7265}
7266
7267void
7268hns3_parse_devargs(struct rte_eth_dev *dev)
7269{
7270        struct hns3_adapter *hns = dev->data->dev_private;
7271        uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
7272        uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
7273        struct hns3_hw *hw = &hns->hw;
7274        uint64_t dev_caps_mask = 0;
7275        struct rte_kvargs *kvlist;
7276
7277        if (dev->device->devargs == NULL)
7278                return;
7279
7280        kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
7281        if (!kvlist)
7282                return;
7283
7284        (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
7285                           &hns3_parse_io_hint_func, &rx_func_hint);
7286        (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
7287                           &hns3_parse_io_hint_func, &tx_func_hint);
7288        (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
7289                           &hns3_parse_dev_caps_mask, &dev_caps_mask);
7290        rte_kvargs_free(kvlist);
7291
7292        if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
7293                hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
7294                          hns3_get_io_hint_func_name(rx_func_hint));
7295        hns->rx_func_hint = rx_func_hint;
7296        if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
7297                hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
7298                          hns3_get_io_hint_func_name(tx_func_hint));
7299        hns->tx_func_hint = tx_func_hint;
7300
7301        if (dev_caps_mask != 0)
7302                hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
7303                          HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
7304        hns->dev_caps_mask = dev_caps_mask;
7305}
7306
7307static const struct eth_dev_ops hns3_eth_dev_ops = {
7308        .dev_configure      = hns3_dev_configure,
7309        .dev_start          = hns3_dev_start,
7310        .dev_stop           = hns3_dev_stop,
7311        .dev_close          = hns3_dev_close,
7312        .promiscuous_enable = hns3_dev_promiscuous_enable,
7313        .promiscuous_disable = hns3_dev_promiscuous_disable,
7314        .allmulticast_enable  = hns3_dev_allmulticast_enable,
7315        .allmulticast_disable = hns3_dev_allmulticast_disable,
7316        .mtu_set            = hns3_dev_mtu_set,
7317        .stats_get          = hns3_stats_get,
7318        .stats_reset        = hns3_stats_reset,
7319        .xstats_get         = hns3_dev_xstats_get,
7320        .xstats_get_names   = hns3_dev_xstats_get_names,
7321        .xstats_reset       = hns3_dev_xstats_reset,
7322        .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
7323        .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
7324        .dev_infos_get          = hns3_dev_infos_get,
7325        .fw_version_get         = hns3_fw_version_get,
7326        .rx_queue_setup         = hns3_rx_queue_setup,
7327        .tx_queue_setup         = hns3_tx_queue_setup,
7328        .rx_queue_release       = hns3_dev_rx_queue_release,
7329        .tx_queue_release       = hns3_dev_tx_queue_release,
7330        .rx_queue_start         = hns3_dev_rx_queue_start,
7331        .rx_queue_stop          = hns3_dev_rx_queue_stop,
7332        .tx_queue_start         = hns3_dev_tx_queue_start,
7333        .tx_queue_stop          = hns3_dev_tx_queue_stop,
7334        .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
7335        .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
7336        .rxq_info_get           = hns3_rxq_info_get,
7337        .txq_info_get           = hns3_txq_info_get,
7338        .rx_burst_mode_get      = hns3_rx_burst_mode_get,
7339        .tx_burst_mode_get      = hns3_tx_burst_mode_get,
7340        .flow_ctrl_get          = hns3_flow_ctrl_get,
7341        .flow_ctrl_set          = hns3_flow_ctrl_set,
7342        .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
7343        .mac_addr_add           = hns3_add_mac_addr,
7344        .mac_addr_remove        = hns3_remove_mac_addr,
7345        .mac_addr_set           = hns3_set_default_mac_addr,
7346        .set_mc_addr_list       = hns3_set_mc_mac_addr_list,
7347        .link_update            = hns3_dev_link_update,
7348        .rss_hash_update        = hns3_dev_rss_hash_update,
7349        .rss_hash_conf_get      = hns3_dev_rss_hash_conf_get,
7350        .reta_update            = hns3_dev_rss_reta_update,
7351        .reta_query             = hns3_dev_rss_reta_query,
7352        .flow_ops_get           = hns3_dev_flow_ops_get,
7353        .vlan_filter_set        = hns3_vlan_filter_set,
7354        .vlan_tpid_set          = hns3_vlan_tpid_set,
7355        .vlan_offload_set       = hns3_vlan_offload_set,
7356        .vlan_pvid_set          = hns3_vlan_pvid_set,
7357        .get_reg                = hns3_get_regs,
7358        .get_module_info        = hns3_get_module_info,
7359        .get_module_eeprom      = hns3_get_module_eeprom,
7360        .get_dcb_info           = hns3_get_dcb_info,
7361        .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
7362        .fec_get_capability     = hns3_fec_get_capability,
7363        .fec_get                = hns3_fec_get,
7364        .fec_set                = hns3_fec_set,
7365        .tm_ops_get             = hns3_tm_ops_get,
7366        .tx_done_cleanup        = hns3_tx_done_cleanup,
7367        .timesync_enable            = hns3_timesync_enable,
7368        .timesync_disable           = hns3_timesync_disable,
7369        .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
7370        .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
7371        .timesync_adjust_time       = hns3_timesync_adjust_time,
7372        .timesync_read_time         = hns3_timesync_read_time,
7373        .timesync_write_time        = hns3_timesync_write_time,
7374};
7375
7376static const struct hns3_reset_ops hns3_reset_ops = {
7377        .reset_service       = hns3_reset_service,
7378        .stop_service        = hns3_stop_service,
7379        .prepare_reset       = hns3_prepare_reset,
7380        .wait_hardware_ready = hns3_wait_hardware_ready,
7381        .reinit_dev          = hns3_reinit_dev,
7382        .restore_conf        = hns3_restore_conf,
7383        .start_service       = hns3_start_service,
7384};
7385
7386static int
7387hns3_dev_init(struct rte_eth_dev *eth_dev)
7388{
7389        struct hns3_adapter *hns = eth_dev->data->dev_private;
7390        char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
7391        struct rte_ether_addr *eth_addr;
7392        struct hns3_hw *hw = &hns->hw;
7393        int ret;
7394
7395        PMD_INIT_FUNC_TRACE();
7396
7397        hns3_flow_init(eth_dev);
7398
7399        hns3_set_rxtx_function(eth_dev);
7400        eth_dev->dev_ops = &hns3_eth_dev_ops;
7401        eth_dev->rx_queue_count = hns3_rx_queue_count;
7402        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
7403                ret = hns3_mp_init_secondary();
7404                if (ret) {
7405                        PMD_INIT_LOG(ERR, "Failed to init for secondary "
7406                                     "process, ret = %d", ret);
7407                        goto err_mp_init_secondary;
7408                }
7409                hw->secondary_cnt++;
7410                hns3_tx_push_init(eth_dev);
7411                return 0;
7412        }
7413
7414        ret = hns3_mp_init_primary();
7415        if (ret) {
7416                PMD_INIT_LOG(ERR,
7417                             "Failed to init for primary process, ret = %d",
7418                             ret);
7419                goto err_mp_init_primary;
7420        }
7421
7422        hw->adapter_state = HNS3_NIC_UNINITIALIZED;
7423        hns->is_vf = false;
7424        hw->data = eth_dev->data;
7425        hns3_parse_devargs(eth_dev);
7426
7427        /*
7428         * Set default max packet size according to the mtu
7429         * default vale in DPDK frame.
7430         */
7431        hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
7432
7433        ret = hns3_reset_init(hw);
7434        if (ret)
7435                goto err_init_reset;
7436        hw->reset.ops = &hns3_reset_ops;
7437
7438        ret = hns3_init_pf(eth_dev);
7439        if (ret) {
7440                PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
7441                goto err_init_pf;
7442        }
7443
7444        /* Allocate memory for storing MAC addresses */
7445        eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
7446                                               sizeof(struct rte_ether_addr) *
7447                                               HNS3_UC_MACADDR_NUM, 0);
7448        if (eth_dev->data->mac_addrs == NULL) {
7449                PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
7450                             "to store MAC addresses",
7451                             sizeof(struct rte_ether_addr) *
7452                             HNS3_UC_MACADDR_NUM);
7453                ret = -ENOMEM;
7454                goto err_rte_zmalloc;
7455        }
7456
7457        eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
7458        if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
7459                rte_eth_random_addr(hw->mac.mac_addr);
7460                hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
7461                                (struct rte_ether_addr *)hw->mac.mac_addr);
7462                hns3_warn(hw, "default mac_addr from firmware is an invalid "
7463                          "unicast address, using random MAC address %s",
7464                          mac_str);
7465        }
7466        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
7467                            &eth_dev->data->mac_addrs[0]);
7468
7469        hw->adapter_state = HNS3_NIC_INITIALIZED;
7470
7471        if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
7472                            SCHEDULE_PENDING) {
7473                hns3_err(hw, "Reschedule reset service after dev_init");
7474                hns3_schedule_reset(hns);
7475        } else {
7476                /* IMP will wait ready flag before reset */
7477                hns3_notify_reset_ready(hw, false);
7478        }
7479
7480        hns3_info(hw, "hns3 dev initialization successful!");
7481        return 0;
7482
7483err_rte_zmalloc:
7484        hns3_uninit_pf(eth_dev);
7485
7486err_init_pf:
7487        rte_free(hw->reset.wait_data);
7488
7489err_init_reset:
7490        hns3_mp_uninit_primary();
7491
7492err_mp_init_primary:
7493err_mp_init_secondary:
7494        eth_dev->dev_ops = NULL;
7495        eth_dev->rx_pkt_burst = NULL;
7496        eth_dev->rx_descriptor_status = NULL;
7497        eth_dev->tx_pkt_burst = NULL;
7498        eth_dev->tx_pkt_prepare = NULL;
7499        eth_dev->tx_descriptor_status = NULL;
7500        return ret;
7501}
7502
7503static int
7504hns3_dev_uninit(struct rte_eth_dev *eth_dev)
7505{
7506        struct hns3_adapter *hns = eth_dev->data->dev_private;
7507        struct hns3_hw *hw = &hns->hw;
7508
7509        PMD_INIT_FUNC_TRACE();
7510
7511        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
7512                return 0;
7513
7514        if (hw->adapter_state < HNS3_NIC_CLOSING)
7515                hns3_dev_close(eth_dev);
7516
7517        hw->adapter_state = HNS3_NIC_REMOVED;
7518        return 0;
7519}
7520
7521static int
7522eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
7523                   struct rte_pci_device *pci_dev)
7524{
7525        return rte_eth_dev_pci_generic_probe(pci_dev,
7526                                             sizeof(struct hns3_adapter),
7527                                             hns3_dev_init);
7528}
7529
7530static int
7531eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
7532{
7533        return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
7534}
7535
7536static const struct rte_pci_id pci_id_hns3_map[] = {
7537        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
7538        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
7539        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
7540        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
7541        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
7542        { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
7543        { .vendor_id = 0, }, /* sentinel */
7544};
7545
7546static struct rte_pci_driver rte_hns3_pmd = {
7547        .id_table = pci_id_hns3_map,
7548        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
7549        .probe = eth_hns3_pci_probe,
7550        .remove = eth_hns3_pci_remove,
7551};
7552
7553RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
7554RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
7555RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
7556RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
7557                HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
7558                HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
7559                HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> ");
7560RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE);
7561RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE);
7562