dpdk/drivers/net/hns3/hns3_cmd.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 HiSilicon Limited.
   3 */
   4
   5#include <ethdev_pci.h>
   6#include <rte_io.h>
   7
   8#include "hns3_ethdev.h"
   9#include "hns3_regs.h"
  10#include "hns3_intr.h"
  11#include "hns3_logs.h"
  12
  13static int
  14hns3_ring_space(struct hns3_cmq_ring *ring)
  15{
  16        int ntu = ring->next_to_use;
  17        int ntc = ring->next_to_clean;
  18        int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
  19
  20        return ring->desc_num - used - 1;
  21}
  22
  23static bool
  24is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
  25{
  26        int ntu = ring->next_to_use;
  27        int ntc = ring->next_to_clean;
  28
  29        if (ntu > ntc)
  30                return head >= ntc && head <= ntu;
  31
  32        return head >= ntc || head <= ntu;
  33}
  34
  35/*
  36 * hns3_allocate_dma_mem - Specific memory alloc for command function.
  37 * Malloc a memzone, which is a contiguous portion of physical memory identified
  38 * by a name.
  39 * @ring: pointer to the ring structure
  40 * @size: size of memory requested
  41 * @alignment: what to align the allocation to
  42 */
  43static int
  44hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
  45                      uint64_t size, uint32_t alignment)
  46{
  47        static uint64_t hns3_dma_memzone_id;
  48        const struct rte_memzone *mz = NULL;
  49        char z_name[RTE_MEMZONE_NAMESIZE];
  50
  51        snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
  52                __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
  53        mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
  54                                         RTE_MEMZONE_IOVA_CONTIG, alignment,
  55                                         RTE_PGSIZE_2M);
  56        if (mz == NULL)
  57                return -ENOMEM;
  58
  59        ring->buf_size = size;
  60        ring->desc = mz->addr;
  61        ring->desc_dma_addr = mz->iova;
  62        ring->zone = (const void *)mz;
  63        hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
  64                 mz->name, ring->desc_dma_addr);
  65
  66        return 0;
  67}
  68
  69static void
  70hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
  71{
  72        hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
  73                 ((const struct rte_memzone *)ring->zone)->name,
  74                 ring->desc_dma_addr);
  75        rte_memzone_free((const struct rte_memzone *)ring->zone);
  76        ring->buf_size = 0;
  77        ring->desc = NULL;
  78        ring->desc_dma_addr = 0;
  79        ring->zone = NULL;
  80}
  81
  82static int
  83hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
  84{
  85        int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
  86
  87        if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
  88                hns3_err(hw, "allocate dma mem failed");
  89                return -ENOMEM;
  90        }
  91
  92        return 0;
  93}
  94
  95static void
  96hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
  97{
  98        if (ring->desc)
  99                hns3_free_dma_mem(hw, ring);
 100}
 101
 102static int
 103hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
 104{
 105        struct hns3_cmq_ring *ring =
 106                (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
 107        int ret;
 108
 109        ring->ring_type = ring_type;
 110        ring->hw = hw;
 111
 112        ret = hns3_alloc_cmd_desc(hw, ring);
 113        if (ret)
 114                hns3_err(hw, "descriptor %s alloc error %d",
 115                            (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
 116
 117        return ret;
 118}
 119
 120void
 121hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
 122{
 123        desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
 124        if (is_read)
 125                desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
 126        else
 127                desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
 128}
 129
 130void
 131hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
 132                          enum hns3_opcode_type opcode, bool is_read)
 133{
 134        memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
 135        desc->opcode = rte_cpu_to_le_16(opcode);
 136        desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
 137
 138        if (is_read)
 139                desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
 140}
 141
 142static void
 143hns3_cmd_clear_regs(struct hns3_hw *hw)
 144{
 145        hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
 146        hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
 147        hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
 148        hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
 149        hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
 150        hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
 151        hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
 152        hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
 153        hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
 154        hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
 155}
 156
 157static void
 158hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
 159{
 160        uint64_t dma = ring->desc_dma_addr;
 161
 162        if (ring->ring_type == HNS3_TYPE_CSQ) {
 163                hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
 164                               lower_32_bits(dma));
 165                hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
 166                               upper_32_bits(dma));
 167                hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
 168                               ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
 169                               HNS3_NIC_SW_RST_RDY);
 170                hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
 171                hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
 172        } else {
 173                hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
 174                               lower_32_bits(dma));
 175                hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
 176                               upper_32_bits(dma));
 177                hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
 178                               ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
 179                hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
 180                hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
 181        }
 182}
 183
 184static void
 185hns3_cmd_init_regs(struct hns3_hw *hw)
 186{
 187        hns3_cmd_config_regs(&hw->cmq.csq);
 188        hns3_cmd_config_regs(&hw->cmq.crq);
 189}
 190
 191static int
 192hns3_cmd_csq_clean(struct hns3_hw *hw)
 193{
 194        struct hns3_cmq_ring *csq = &hw->cmq.csq;
 195        uint32_t head;
 196        uint32_t addr;
 197        int clean;
 198
 199        head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
 200        addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
 201        if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
 202                hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
 203                         csq->next_to_use, csq->next_to_clean);
 204                if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 205                        __atomic_store_n(&hw->reset.disable_cmd, 1,
 206                                         __ATOMIC_RELAXED);
 207                        hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
 208                }
 209
 210                return -EIO;
 211        }
 212
 213        clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
 214        csq->next_to_clean = head;
 215        return clean;
 216}
 217
 218static int
 219hns3_cmd_csq_done(struct hns3_hw *hw)
 220{
 221        uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
 222
 223        return head == hw->cmq.csq.next_to_use;
 224}
 225
 226static bool
 227hns3_is_special_opcode(uint16_t opcode)
 228{
 229        /*
 230         * These commands have several descriptors,
 231         * and use the first one to save opcode and return value.
 232         */
 233        uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
 234                                  HNS3_OPC_STATS_32_BIT,
 235                                  HNS3_OPC_STATS_MAC,
 236                                  HNS3_OPC_STATS_MAC_ALL,
 237                                  HNS3_OPC_QUERY_32_BIT_REG,
 238                                  HNS3_OPC_QUERY_64_BIT_REG,
 239                                  HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
 240                                  HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
 241                                  HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
 242                                  HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
 243                                  HNS3_OPC_QUERY_ALL_ERR_INFO,};
 244        uint32_t i;
 245
 246        for (i = 0; i < RTE_DIM(spec_opcode); i++)
 247                if (spec_opcode[i] == opcode)
 248                        return true;
 249
 250        return false;
 251}
 252
 253static int
 254hns3_cmd_convert_err_code(uint16_t desc_ret)
 255{
 256        static const struct {
 257                uint16_t imp_errcode;
 258                int linux_errcode;
 259        } hns3_cmdq_status[] = {
 260                {HNS3_CMD_EXEC_SUCCESS, 0},
 261                {HNS3_CMD_NO_AUTH, -EPERM},
 262                {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
 263                {HNS3_CMD_QUEUE_FULL, -EXFULL},
 264                {HNS3_CMD_NEXT_ERR, -ENOSR},
 265                {HNS3_CMD_UNEXE_ERR, -ENOTBLK},
 266                {HNS3_CMD_PARA_ERR, -EINVAL},
 267                {HNS3_CMD_RESULT_ERR, -ERANGE},
 268                {HNS3_CMD_TIMEOUT, -ETIME},
 269                {HNS3_CMD_HILINK_ERR, -ENOLINK},
 270                {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
 271                {HNS3_CMD_INVALID, -EBADR},
 272                {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
 273        };
 274
 275        uint32_t i;
 276
 277        for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++)
 278                if (hns3_cmdq_status[i].imp_errcode == desc_ret)
 279                        return hns3_cmdq_status[i].linux_errcode;
 280
 281        return -EREMOTEIO;
 282}
 283
 284static int
 285hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
 286                            struct hns3_cmd_desc *desc, int num, int ntc)
 287{
 288        uint16_t opcode, desc_ret;
 289        int current_ntc = ntc;
 290        int handle;
 291
 292        opcode = rte_le_to_cpu_16(desc[0].opcode);
 293        for (handle = 0; handle < num; handle++) {
 294                /* Get the result of hardware write back */
 295                desc[handle] = hw->cmq.csq.desc[current_ntc];
 296
 297                current_ntc++;
 298                if (current_ntc == hw->cmq.csq.desc_num)
 299                        current_ntc = 0;
 300        }
 301
 302        if (likely(!hns3_is_special_opcode(opcode)))
 303                desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
 304        else
 305                desc_ret = rte_le_to_cpu_16(desc[0].retval);
 306
 307        hw->cmq.last_status = desc_ret;
 308        return hns3_cmd_convert_err_code(desc_ret);
 309}
 310
 311static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 312{
 313        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 314        uint32_t timeout = 0;
 315
 316        do {
 317                if (hns3_cmd_csq_done(hw))
 318                        return 0;
 319
 320                if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
 321                        hns3_err(hw,
 322                                 "Don't wait for reply because of disable_cmd");
 323                        return -EBUSY;
 324                }
 325
 326                if (is_reset_pending(hns)) {
 327                        hns3_err(hw, "Don't wait for reply because of reset pending");
 328                        return -EIO;
 329                }
 330
 331                rte_delay_us(1);
 332                timeout++;
 333        } while (timeout < hw->cmq.tx_timeout);
 334        hns3_err(hw, "Wait for reply timeout");
 335        return -ETIME;
 336}
 337
 338/*
 339 * hns3_cmd_send - send command to command queue
 340 *
 341 * @param hw
 342 *   pointer to the hw struct
 343 * @param desc
 344 *   prefilled descriptor for describing the command
 345 * @param num
 346 *   the number of descriptors to be sent
 347 * @return
 348 *   - -EBUSY if detect device is in resetting
 349 *   - -EIO   if detect cmd csq corrupted (due to reset) or
 350 *            there is reset pending
 351 *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
 352 *   - Zero   if operation completed successfully
 353 *
 354 * Note -BUSY/-EIO only used in reset case
 355 *
 356 * Note this is the main send command for command queue, it
 357 * sends the queue, cleans the queue, etc
 358 */
 359int
 360hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
 361{
 362        struct hns3_cmd_desc *desc_to_use;
 363        int handle = 0;
 364        int retval;
 365        uint32_t ntc;
 366
 367        if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
 368                return -EBUSY;
 369
 370        rte_spinlock_lock(&hw->cmq.csq.lock);
 371
 372        /* Clean the command send queue */
 373        retval = hns3_cmd_csq_clean(hw);
 374        if (retval < 0) {
 375                rte_spinlock_unlock(&hw->cmq.csq.lock);
 376                return retval;
 377        }
 378
 379        if (num > hns3_ring_space(&hw->cmq.csq)) {
 380                rte_spinlock_unlock(&hw->cmq.csq.lock);
 381                return -ENOMEM;
 382        }
 383
 384        /*
 385         * Record the location of desc in the ring for this time
 386         * which will be use for hardware to write back
 387         */
 388        ntc = hw->cmq.csq.next_to_use;
 389
 390        while (handle < num) {
 391                desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
 392                *desc_to_use = desc[handle];
 393                (hw->cmq.csq.next_to_use)++;
 394                if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
 395                        hw->cmq.csq.next_to_use = 0;
 396                handle++;
 397        }
 398
 399        /* Write to hardware */
 400        hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
 401
 402        /*
 403         * If the command is sync, wait for the firmware to write back,
 404         * if multi descriptors to be sent, use the first one to check.
 405         */
 406        if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
 407                retval = hns3_cmd_poll_reply(hw);
 408                if (!retval)
 409                        retval = hns3_cmd_get_hardware_reply(hw, desc, num,
 410                                                             ntc);
 411        }
 412
 413        rte_spinlock_unlock(&hw->cmq.csq.lock);
 414        return retval;
 415}
 416
 417static const char *
 418hns3_get_caps_name(uint32_t caps_id)
 419{
 420        const struct {
 421                enum HNS3_CAPS_BITS caps;
 422                const char *name;
 423        } dev_caps[] = {
 424                { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
 425                { HNS3_CAPS_PTP_B,             "ptp"             },
 426                { HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
 427                { HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
 428                { HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
 429                { HNS3_CAPS_STASH_B,           "stash"           },
 430                { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
 431                { HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
 432                { HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  },
 433                { HNS3_CAPS_TM_B,              "tm_capability"   }
 434        };
 435        uint32_t i;
 436
 437        for (i = 0; i < RTE_DIM(dev_caps); i++) {
 438                if (dev_caps[i].caps == caps_id)
 439                        return dev_caps[i].name;
 440        }
 441
 442        return "unknown";
 443}
 444
 445static void
 446hns3_mask_capability(struct hns3_hw *hw,
 447                     struct hns3_query_version_cmd *cmd)
 448{
 449#define MAX_CAPS_BIT    64
 450
 451        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 452        uint64_t caps_org, caps_new, caps_masked;
 453        uint32_t i;
 454
 455        if (hns->dev_caps_mask == 0)
 456                return;
 457
 458        memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
 459        caps_org = rte_le_to_cpu_64(caps_org);
 460        caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
 461        caps_masked = caps_org ^ caps_new;
 462        caps_new = rte_cpu_to_le_64(caps_new);
 463        memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
 464
 465        for (i = 0; i < MAX_CAPS_BIT; i++) {
 466                if (!(caps_masked & BIT_ULL(i)))
 467                        continue;
 468                hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
 469                          i, hns3_get_caps_name(i));
 470        }
 471}
 472
 473static void
 474hns3_parse_capability(struct hns3_hw *hw,
 475                      struct hns3_query_version_cmd *cmd)
 476{
 477        uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
 478
 479        if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
 480                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
 481                             1);
 482        if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
 483                /*
 484                 * PTP depends on special packet type reported by hardware which
 485                 * enabled rxd advanced layout, so if the hardware doesn't
 486                 * support rxd advanced layout, driver should ignore the PTP
 487                 * capability.
 488                 */
 489                if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
 490                        hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
 491                else
 492                        hns3_warn(hw, "ignore PTP capability due to lack of "
 493                                  "rxd advanced layout capability.");
 494        }
 495        if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
 496                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
 497        if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
 498                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
 499        if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
 500                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
 501        if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
 502                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
 503                             1);
 504        if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
 505                hns3_set_bit(hw->capability,
 506                                HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
 507        if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
 508                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
 509        if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
 510                hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
 511}
 512
 513static uint32_t
 514hns3_build_api_caps(void)
 515{
 516        uint32_t api_caps = 0;
 517
 518        hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
 519
 520        return rte_cpu_to_le_32(api_caps);
 521}
 522
 523static int
 524hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
 525{
 526        struct hns3_query_version_cmd *resp;
 527        struct hns3_cmd_desc desc;
 528        int ret;
 529
 530        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
 531        resp = (struct hns3_query_version_cmd *)desc.data;
 532        resp->api_caps = hns3_build_api_caps();
 533
 534        /* Initialize the cmd function */
 535        ret = hns3_cmd_send(hw, &desc, 1);
 536        if (ret)
 537                return ret;
 538
 539        hw->fw_version = rte_le_to_cpu_32(resp->firmware);
 540        /*
 541         * Make sure mask the capability before parse capability because it
 542         * may overwrite resp's data.
 543         */
 544        hns3_mask_capability(hw, resp);
 545        hns3_parse_capability(hw, resp);
 546
 547        return 0;
 548}
 549
 550int
 551hns3_cmd_init_queue(struct hns3_hw *hw)
 552{
 553        int ret;
 554
 555        /* Setup the lock for command queue */
 556        rte_spinlock_init(&hw->cmq.csq.lock);
 557        rte_spinlock_init(&hw->cmq.crq.lock);
 558
 559        /*
 560         * Clear up all command register,
 561         * in case there are some residual values
 562         */
 563        hns3_cmd_clear_regs(hw);
 564
 565        /* Setup the queue entries for use cmd queue */
 566        hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
 567        hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
 568
 569        /* Setup Tx write back timeout */
 570        hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
 571
 572        /* Setup queue rings */
 573        ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
 574        if (ret) {
 575                PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
 576                return ret;
 577        }
 578
 579        ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
 580        if (ret) {
 581                PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
 582                goto err_crq;
 583        }
 584
 585        return 0;
 586
 587err_crq:
 588        hns3_free_cmd_desc(hw, &hw->cmq.csq);
 589
 590        return ret;
 591}
 592
 593static void
 594hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
 595{
 596        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
 597
 598        if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
 599                return;
 600
 601        if (fw_compact_cmd_result != 0) {
 602                /*
 603                 * If fw_compact_cmd_result is not zero, it means firmware don't
 604                 * support link status change interrupt.
 605                 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
 606                 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
 607                 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
 608                 * don't support link status change interrupt.
 609                 */
 610                dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
 611        }
 612}
 613
 614static int
 615hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
 616{
 617        if (result != 0 && hns3_dev_copper_supported(hw)) {
 618                hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
 619                         result);
 620                return result;
 621        }
 622
 623        hns3_update_dev_lsc_cap(hw, result);
 624
 625        return 0;
 626}
 627
 628static int
 629hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
 630{
 631        struct hns3_firmware_compat_cmd *req;
 632        struct hns3_cmd_desc desc;
 633        uint32_t compat = 0;
 634
 635#if defined(RTE_HNS3_ONLY_1630_FPGA)
 636        /* If resv reg enabled phy driver of imp is not configured, driver
 637         * will use temporary phy driver.
 638         */
 639        struct rte_pci_device *pci_dev;
 640        struct rte_eth_dev *eth_dev;
 641        uint8_t revision;
 642        int ret;
 643
 644        eth_dev = &rte_eth_devices[hw->data->port_id];
 645        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 646        /* Get PCI revision id */
 647        ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
 648                                  HNS3_PCI_REVISION_ID);
 649        if (ret != HNS3_PCI_REVISION_ID_LEN) {
 650                PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
 651                             ret);
 652                return -EIO;
 653        }
 654        if (revision == PCI_REVISION_ID_HIP09_A) {
 655                struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
 656                if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
 657                        PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
 658                        pf->is_tmp_phy = true;
 659                        hns3_set_bit(hw->capability,
 660                                     HNS3_DEV_SUPPORT_COPPER_B, 1);
 661                        return 0;
 662                }
 663
 664                PMD_INIT_LOG(ERR, "***use phy driver in imp***");
 665        }
 666#endif
 667
 668        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
 669        req = (struct hns3_firmware_compat_cmd *)desc.data;
 670
 671        if (is_init) {
 672                hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
 673                hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
 674                if (hns3_dev_copper_supported(hw))
 675                        hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
 676        }
 677        req->compat = rte_cpu_to_le_32(compat);
 678
 679        return hns3_cmd_send(hw, &desc, 1);
 680}
 681
 682int
 683hns3_cmd_init(struct hns3_hw *hw)
 684{
 685        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 686        uint32_t version;
 687        int ret;
 688
 689        rte_spinlock_lock(&hw->cmq.csq.lock);
 690        rte_spinlock_lock(&hw->cmq.crq.lock);
 691
 692        hw->cmq.csq.next_to_clean = 0;
 693        hw->cmq.csq.next_to_use = 0;
 694        hw->cmq.crq.next_to_clean = 0;
 695        hw->cmq.crq.next_to_use = 0;
 696        hw->mbx_resp.head = 0;
 697        hw->mbx_resp.tail = 0;
 698        hw->mbx_resp.lost = 0;
 699        hns3_cmd_init_regs(hw);
 700
 701        rte_spinlock_unlock(&hw->cmq.crq.lock);
 702        rte_spinlock_unlock(&hw->cmq.csq.lock);
 703
 704        /*
 705         * Check if there is new reset pending, because the higher level
 706         * reset may happen when lower level reset is being processed.
 707         */
 708        if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
 709                PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
 710                ret = -EBUSY;
 711                goto err_cmd_init;
 712        }
 713        __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
 714
 715        ret = hns3_cmd_query_firmware_version_and_capability(hw);
 716        if (ret) {
 717                PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
 718                goto err_cmd_init;
 719        }
 720
 721        version = hw->fw_version;
 722        PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
 723                     hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
 724                                    HNS3_FW_VERSION_BYTE3_S),
 725                     hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
 726                                    HNS3_FW_VERSION_BYTE2_S),
 727                     hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
 728                                    HNS3_FW_VERSION_BYTE1_S),
 729                     hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
 730                                    HNS3_FW_VERSION_BYTE0_S));
 731
 732        if (hns->is_vf)
 733                return 0;
 734
 735        /*
 736         * Requiring firmware to enable some features, firber port can still
 737         * work without it, but copper port can't work because the firmware
 738         * fails to take over the PHY.
 739         */
 740        ret = hns3_firmware_compat_config(hw, true);
 741        if (ret)
 742                PMD_INIT_LOG(WARNING, "firmware compatible features not "
 743                             "supported, ret = %d.", ret);
 744
 745        /*
 746         * Perform some corresponding operations based on the firmware
 747         * compatibility configuration result.
 748         */
 749        ret = hns3_apply_fw_compat_cmd_result(hw, ret);
 750        if (ret)
 751                goto err_cmd_init;
 752
 753        return 0;
 754
 755err_cmd_init:
 756        __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
 757        return ret;
 758}
 759
 760static void
 761hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
 762{
 763        rte_spinlock_lock(&ring->lock);
 764
 765        hns3_free_cmd_desc(hw, ring);
 766
 767        rte_spinlock_unlock(&ring->lock);
 768}
 769
 770void
 771hns3_cmd_destroy_queue(struct hns3_hw *hw)
 772{
 773        hns3_destroy_queue(hw, &hw->cmq.csq);
 774        hns3_destroy_queue(hw, &hw->cmq.crq);
 775}
 776
 777void
 778hns3_cmd_uninit(struct hns3_hw *hw)
 779{
 780        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 781
 782        if (!hns->is_vf)
 783                (void)hns3_firmware_compat_config(hw, false);
 784
 785        __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
 786
 787        /*
 788         * A delay is added to ensure that the register cleanup operations
 789         * will not be performed concurrently with the firmware command and
 790         * ensure that all the reserved commands are executed.
 791         * Concurrency may occur in two scenarios: asynchronous command and
 792         * timeout command. If the command fails to be executed due to busy
 793         * scheduling, the command will be processed in the next scheduling
 794         * of the firmware.
 795         */
 796        rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
 797
 798        rte_spinlock_lock(&hw->cmq.csq.lock);
 799        rte_spinlock_lock(&hw->cmq.crq.lock);
 800        hns3_cmd_clear_regs(hw);
 801        rte_spinlock_unlock(&hw->cmq.crq.lock);
 802        rte_spinlock_unlock(&hw->cmq.csq.lock);
 803}
 804