linux/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2021-2021 Hisilicon Limited.
   3
   4#include "hnae3.h"
   5#include "hclge_comm_cmd.h"
   6
   7static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
   8                                       struct hclge_comm_cmq_ring *ring)
   9{
  10        dma_addr_t dma = ring->desc_dma_addr;
  11        u32 reg_val;
  12
  13        if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
  14                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
  15                                     lower_32_bits(dma));
  16                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
  17                                     upper_32_bits(dma));
  18                reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
  19                reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
  20                reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
  21                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
  22                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
  23                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
  24        } else {
  25                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
  26                                     lower_32_bits(dma));
  27                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
  28                                     upper_32_bits(dma));
  29                reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
  30                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
  31                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
  32                hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
  33        }
  34}
  35
  36void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
  37{
  38        hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
  39        hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
  40}
  41
  42void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
  43{
  44        desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
  45                                 HCLGE_COMM_CMD_FLAG_IN);
  46        if (is_read)
  47                desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
  48        else
  49                desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
  50}
  51
  52static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
  53                                              bool is_pf)
  54{
  55        set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
  56        set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
  57        if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
  58                set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
  59                set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
  60        }
  61}
  62
  63void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
  64                                     enum hclge_opcode_type opcode,
  65                                     bool is_read)
  66{
  67        memset((void *)desc, 0, sizeof(struct hclge_desc));
  68        desc->opcode = cpu_to_le16(opcode);
  69        desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
  70                                 HCLGE_COMM_CMD_FLAG_IN);
  71
  72        if (is_read)
  73                desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
  74}
  75
  76int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
  77                                      struct hclge_comm_hw *hw, bool en)
  78{
  79        struct hclge_comm_firmware_compat_cmd *req;
  80        struct hclge_desc desc;
  81        u32 compat = 0;
  82
  83        hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
  84
  85        if (en) {
  86                req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
  87
  88                hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
  89                hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
  90                if (hclge_comm_dev_phy_imp_supported(ae_dev))
  91                        hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
  92                hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
  93                hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
  94
  95                req->compat = cpu_to_le32(compat);
  96        }
  97
  98        return hclge_comm_cmd_send(hw, &desc, 1);
  99}
 100
 101void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
 102{
 103        int size  = ring->desc_num * sizeof(struct hclge_desc);
 104
 105        if (!ring->desc)
 106                return;
 107
 108        dma_free_coherent(&ring->pdev->dev, size,
 109                          ring->desc, ring->desc_dma_addr);
 110        ring->desc = NULL;
 111}
 112
 113static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
 114{
 115        int size  = ring->desc_num * sizeof(struct hclge_desc);
 116
 117        ring->desc = dma_alloc_coherent(&ring->pdev->dev,
 118                                        size, &ring->desc_dma_addr, GFP_KERNEL);
 119        if (!ring->desc)
 120                return -ENOMEM;
 121
 122        return 0;
 123}
 124
 125static __le32 hclge_comm_build_api_caps(void)
 126{
 127        u32 api_caps = 0;
 128
 129        hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
 130
 131        return cpu_to_le32(api_caps);
 132}
 133
 134static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
 135        {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
 136        {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
 137        {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
 138        {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
 139        {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
 140        {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
 141        {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
 142        {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
 143        {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
 144        {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
 145        {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
 146        {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
 147        {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
 148        {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
 149        {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
 150         HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
 151        {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
 152};
 153
 154static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
 155        {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
 156        {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
 157        {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
 158        {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
 159        {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
 160        {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
 161        {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
 162        {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
 163};
 164
 165static void
 166hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
 167                            struct hclge_comm_query_version_cmd *cmd)
 168{
 169        const struct hclge_comm_caps_bit_map *caps_map =
 170                                is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
 171        u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
 172                                ARRAY_SIZE(hclge_vf_cmd_caps);
 173        u32 caps, i;
 174
 175        caps = __le32_to_cpu(cmd->caps[0]);
 176        for (i = 0; i < size; i++)
 177                if (hnae3_get_bit(caps, caps_map[i].imp_bit))
 178                        set_bit(caps_map[i].local_bit, ae_dev->caps);
 179}
 180
 181int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
 182{
 183        struct hclge_comm_cmq_ring *ring =
 184                (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
 185                                                     &hw->cmq.crq;
 186        int ret;
 187
 188        ring->ring_type = ring_type;
 189
 190        ret = hclge_comm_alloc_cmd_desc(ring);
 191        if (ret)
 192                dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
 193                        (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
 194                        ret);
 195
 196        return ret;
 197}
 198
 199int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
 200                                                struct hclge_comm_hw *hw,
 201                                                u32 *fw_version, bool is_pf)
 202{
 203        struct hclge_comm_query_version_cmd *resp;
 204        struct hclge_desc desc;
 205        int ret;
 206
 207        hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
 208        resp = (struct hclge_comm_query_version_cmd *)desc.data;
 209        resp->api_caps = hclge_comm_build_api_caps();
 210
 211        ret = hclge_comm_cmd_send(hw, &desc, 1);
 212        if (ret)
 213                return ret;
 214
 215        *fw_version = le32_to_cpu(resp->firmware);
 216
 217        ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
 218                                         HNAE3_PCI_REVISION_BIT_SIZE;
 219        ae_dev->dev_version |= ae_dev->pdev->revision;
 220
 221        if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
 222                hclge_comm_set_default_capability(ae_dev, is_pf);
 223
 224        hclge_comm_parse_capability(ae_dev, is_pf, resp);
 225
 226        return ret;
 227}
 228
 229static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
 230                                   HCLGE_OPC_STATS_32_BIT,
 231                                   HCLGE_OPC_STATS_MAC,
 232                                   HCLGE_OPC_STATS_MAC_ALL,
 233                                   HCLGE_OPC_QUERY_32_BIT_REG,
 234                                   HCLGE_OPC_QUERY_64_BIT_REG,
 235                                   HCLGE_QUERY_CLEAR_MPF_RAS_INT,
 236                                   HCLGE_QUERY_CLEAR_PF_RAS_INT,
 237                                   HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
 238                                   HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
 239                                   HCLGE_QUERY_ALL_ERR_INFO };
 240
 241static bool hclge_comm_is_special_opcode(u16 opcode)
 242{
 243        /* these commands have several descriptors,
 244         * and use the first one to save opcode and return value
 245         */
 246        u32 i;
 247
 248        for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
 249                if (spec_opcode[i] == opcode)
 250                        return true;
 251
 252        return false;
 253}
 254
 255static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
 256{
 257        int ntc = ring->next_to_clean;
 258        int ntu = ring->next_to_use;
 259        int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
 260
 261        return ring->desc_num - used - 1;
 262}
 263
 264static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
 265                                     struct hclge_desc *desc, int num)
 266{
 267        struct hclge_desc *desc_to_use;
 268        int handle = 0;
 269
 270        while (handle < num) {
 271                desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
 272                *desc_to_use = desc[handle];
 273                (hw->cmq.csq.next_to_use)++;
 274                if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
 275                        hw->cmq.csq.next_to_use = 0;
 276                handle++;
 277        }
 278}
 279
 280static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
 281                                              int head)
 282{
 283        int ntc = ring->next_to_clean;
 284        int ntu = ring->next_to_use;
 285
 286        if (ntu > ntc)
 287                return head >= ntc && head <= ntu;
 288
 289        return head >= ntc || head <= ntu;
 290}
 291
 292static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
 293{
 294        struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
 295        int clean;
 296        u32 head;
 297
 298        head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
 299        rmb(); /* Make sure head is ready before touch any data */
 300
 301        if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
 302                dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
 303                         head, csq->next_to_use, csq->next_to_clean);
 304                dev_warn(&hw->cmq.csq.pdev->dev,
 305                         "Disabling any further commands to IMP firmware\n");
 306                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
 307                dev_warn(&hw->cmq.csq.pdev->dev,
 308                         "IMP firmware watchdog reset soon expected!\n");
 309                return -EIO;
 310        }
 311
 312        clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
 313        csq->next_to_clean = head;
 314        return clean;
 315}
 316
 317static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
 318{
 319        u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
 320        return head == hw->cmq.csq.next_to_use;
 321}
 322
 323static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
 324                                     bool *is_completed)
 325{
 326        u32 timeout = 0;
 327
 328        do {
 329                if (hclge_comm_cmd_csq_done(hw)) {
 330                        *is_completed = true;
 331                        break;
 332                }
 333                udelay(1);
 334                timeout++;
 335        } while (timeout < hw->cmq.tx_timeout);
 336}
 337
 338static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
 339{
 340        struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
 341                { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
 342                { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
 343                { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
 344                { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
 345                { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
 346                { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
 347                { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
 348                { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
 349                { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
 350                { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
 351                { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
 352                { HCLGE_COMM_CMD_INVALID, -EBADR },
 353        };
 354        u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
 355        u32 i;
 356
 357        for (i = 0; i < errcode_count; i++)
 358                if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
 359                        return hclge_comm_cmd_errcode[i].common_errno;
 360
 361        return -EIO;
 362}
 363
 364static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
 365                                       struct hclge_desc *desc, int num,
 366                                       int ntc)
 367{
 368        u16 opcode, desc_ret;
 369        int handle;
 370
 371        opcode = le16_to_cpu(desc[0].opcode);
 372        for (handle = 0; handle < num; handle++) {
 373                desc[handle] = hw->cmq.csq.desc[ntc];
 374                ntc++;
 375                if (ntc >= hw->cmq.csq.desc_num)
 376                        ntc = 0;
 377        }
 378        if (likely(!hclge_comm_is_special_opcode(opcode)))
 379                desc_ret = le16_to_cpu(desc[num - 1].retval);
 380        else
 381                desc_ret = le16_to_cpu(desc[0].retval);
 382
 383        hw->cmq.last_status = desc_ret;
 384
 385        return hclge_comm_cmd_convert_err_code(desc_ret);
 386}
 387
 388static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
 389                                       struct hclge_desc *desc,
 390                                       int num, int ntc)
 391{
 392        bool is_completed = false;
 393        int handle, ret;
 394
 395        /* If the command is sync, wait for the firmware to write back,
 396         * if multi descriptors to be sent, use the first one to check
 397         */
 398        if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
 399                hclge_comm_wait_for_resp(hw, &is_completed);
 400
 401        if (!is_completed)
 402                ret = -EBADE;
 403        else
 404                ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
 405
 406        /* Clean the command send queue */
 407        handle = hclge_comm_cmd_csq_clean(hw);
 408        if (handle < 0)
 409                ret = handle;
 410        else if (handle != num)
 411                dev_warn(&hw->cmq.csq.pdev->dev,
 412                         "cleaned %d, need to clean %d\n", handle, num);
 413        return ret;
 414}
 415
 416/**
 417 * hclge_comm_cmd_send - send command to command queue
 418 * @hw: pointer to the hw struct
 419 * @desc: prefilled descriptor for describing the command
 420 * @num : the number of descriptors to be sent
 421 *
 422 * This is the main send command for command queue, it
 423 * sends the queue, cleans the queue, etc
 424 **/
 425int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
 426                        int num)
 427{
 428        struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
 429        int ret;
 430        int ntc;
 431
 432        spin_lock_bh(&hw->cmq.csq.lock);
 433
 434        if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
 435                spin_unlock_bh(&hw->cmq.csq.lock);
 436                return -EBUSY;
 437        }
 438
 439        if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
 440                /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
 441                 * need update the SW HEAD pointer csq->next_to_clean
 442                 */
 443                csq->next_to_clean =
 444                        hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
 445                spin_unlock_bh(&hw->cmq.csq.lock);
 446                return -EBUSY;
 447        }
 448
 449        /**
 450         * Record the location of desc in the ring for this time
 451         * which will be use for hardware to write back
 452         */
 453        ntc = hw->cmq.csq.next_to_use;
 454
 455        hclge_comm_cmd_copy_desc(hw, desc, num);
 456
 457        /* Write to hardware */
 458        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
 459                             hw->cmq.csq.next_to_use);
 460
 461        ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
 462
 463        spin_unlock_bh(&hw->cmq.csq.lock);
 464
 465        return ret;
 466}
 467
 468static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
 469{
 470        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
 471        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
 472        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
 473        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
 474        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
 475        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
 476        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
 477        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
 478        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
 479        hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
 480}
 481
 482void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
 483                           struct hclge_comm_hw *hw)
 484{
 485        struct hclge_comm_cmq *cmdq = &hw->cmq;
 486
 487        hclge_comm_firmware_compat_config(ae_dev, hw, false);
 488        set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
 489
 490        /* wait to ensure that the firmware completes the possible left
 491         * over commands.
 492         */
 493        msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
 494        spin_lock_bh(&cmdq->csq.lock);
 495        spin_lock(&cmdq->crq.lock);
 496        hclge_comm_cmd_uninit_regs(hw);
 497        spin_unlock(&cmdq->crq.lock);
 498        spin_unlock_bh(&cmdq->csq.lock);
 499
 500        hclge_comm_free_cmd_desc(&cmdq->csq);
 501        hclge_comm_free_cmd_desc(&cmdq->crq);
 502}
 503
 504int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
 505{
 506        struct hclge_comm_cmq *cmdq = &hw->cmq;
 507        int ret;
 508
 509        /* Setup the lock for command queue */
 510        spin_lock_init(&cmdq->csq.lock);
 511        spin_lock_init(&cmdq->crq.lock);
 512
 513        cmdq->csq.pdev = pdev;
 514        cmdq->crq.pdev = pdev;
 515
 516        /* Setup the queue entries for use cmd queue */
 517        cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
 518        cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
 519
 520        /* Setup Tx write back timeout */
 521        cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
 522
 523        /* Setup queue rings */
 524        ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
 525        if (ret) {
 526                dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
 527                return ret;
 528        }
 529
 530        ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
 531        if (ret) {
 532                dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
 533                goto err_csq;
 534        }
 535
 536        return 0;
 537err_csq:
 538        hclge_comm_free_cmd_desc(&hw->cmq.csq);
 539        return ret;
 540}
 541
 542int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
 543                        u32 *fw_version, bool is_pf,
 544                        unsigned long reset_pending)
 545{
 546        struct hclge_comm_cmq *cmdq = &hw->cmq;
 547        int ret;
 548
 549        spin_lock_bh(&cmdq->csq.lock);
 550        spin_lock(&cmdq->crq.lock);
 551
 552        cmdq->csq.next_to_clean = 0;
 553        cmdq->csq.next_to_use = 0;
 554        cmdq->crq.next_to_clean = 0;
 555        cmdq->crq.next_to_use = 0;
 556
 557        hclge_comm_cmd_init_regs(hw);
 558
 559        spin_unlock(&cmdq->crq.lock);
 560        spin_unlock_bh(&cmdq->csq.lock);
 561
 562        clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
 563
 564        /* Check if there is new reset pending, because the higher level
 565         * reset may happen when lower level reset is being processed.
 566         */
 567        if (reset_pending) {
 568                ret = -EBUSY;
 569                goto err_cmd_init;
 570        }
 571
 572        /* get version and device capabilities */
 573        ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
 574                                                          fw_version, is_pf);
 575        if (ret) {
 576                dev_err(&ae_dev->pdev->dev,
 577                        "failed to query version and capabilities, ret = %d\n",
 578                        ret);
 579                goto err_cmd_init;
 580        }
 581
 582        dev_info(&ae_dev->pdev->dev,
 583                 "The firmware version is %lu.%lu.%lu.%lu\n",
 584                 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
 585                                 HNAE3_FW_VERSION_BYTE3_SHIFT),
 586                 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
 587                                 HNAE3_FW_VERSION_BYTE2_SHIFT),
 588                 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
 589                                 HNAE3_FW_VERSION_BYTE1_SHIFT),
 590                 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
 591                                 HNAE3_FW_VERSION_BYTE0_SHIFT));
 592
 593        if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
 594                return 0;
 595
 596        /* ask the firmware to enable some features, driver can work without
 597         * it.
 598         */
 599        ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
 600        if (ret)
 601                dev_warn(&ae_dev->pdev->dev,
 602                         "Firmware compatible features not enabled(%d).\n",
 603                         ret);
 604        return 0;
 605
 606err_cmd_init:
 607        set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
 608
 609        return ret;
 610}
 611