linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include <linux/dma-mapping.h>
   5#include <linux/slab.h>
   6#include <linux/pci.h>
   7#include <linux/device.h>
   8#include <linux/err.h>
   9#include <linux/dma-direction.h>
  10#include "hclge_cmd.h"
  11#include "hnae3.h"
  12#include "hclge_main.h"
  13
  14#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
  15
  16#define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
  17
  18static int hclge_ring_space(struct hclge_cmq_ring *ring)
  19{
  20        int ntu = ring->next_to_use;
  21        int ntc = ring->next_to_clean;
  22        int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
  23
  24        return ring->desc_num - used - 1;
  25}
  26
  27static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
  28{
  29        int u = ring->next_to_use;
  30        int c = ring->next_to_clean;
  31
  32        if (unlikely(h >= ring->desc_num))
  33                return 0;
  34
  35        return u > c ? (h > c && h <= u) : (h > c || h <= u);
  36}
  37
  38static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
  39{
  40        int size  = ring->desc_num * sizeof(struct hclge_desc);
  41
  42        ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
  43                                         size, &ring->desc_dma_addr,
  44                                         GFP_KERNEL);
  45        if (!ring->desc)
  46                return -ENOMEM;
  47
  48        return 0;
  49}
  50
  51static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
  52{
  53        int size  = ring->desc_num * sizeof(struct hclge_desc);
  54
  55        if (ring->desc) {
  56                dma_free_coherent(cmq_ring_to_dev(ring), size,
  57                                  ring->desc, ring->desc_dma_addr);
  58                ring->desc = NULL;
  59        }
  60}
  61
  62static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
  63{
  64        struct hclge_hw *hw = &hdev->hw;
  65        struct hclge_cmq_ring *ring =
  66                (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
  67        int ret;
  68
  69        ring->ring_type = ring_type;
  70        ring->dev = hdev;
  71
  72        ret = hclge_alloc_cmd_desc(ring);
  73        if (ret) {
  74                dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
  75                        (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
  76                return ret;
  77        }
  78
  79        return 0;
  80}
  81
  82void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
  83{
  84        desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
  85        if (is_read)
  86                desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
  87        else
  88                desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
  89}
  90
  91void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
  92                                enum hclge_opcode_type opcode, bool is_read)
  93{
  94        memset((void *)desc, 0, sizeof(struct hclge_desc));
  95        desc->opcode = cpu_to_le16(opcode);
  96        desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
  97
  98        if (is_read)
  99                desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
 100}
 101
 102static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
 103{
 104        dma_addr_t dma = ring->desc_dma_addr;
 105        struct hclge_dev *hdev = ring->dev;
 106        struct hclge_hw *hw = &hdev->hw;
 107
 108        if (ring->ring_type == HCLGE_TYPE_CSQ) {
 109                hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
 110                                lower_32_bits(dma));
 111                hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
 112                                upper_32_bits(dma));
 113                hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
 114                                (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
 115                                HCLGE_NIC_CMQ_ENABLE);
 116                hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
 117                hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
 118        } else {
 119                hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
 120                                lower_32_bits(dma));
 121                hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
 122                                upper_32_bits(dma));
 123                hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
 124                                (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
 125                                HCLGE_NIC_CMQ_ENABLE);
 126                hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
 127                hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
 128        }
 129}
 130
 131static void hclge_cmd_init_regs(struct hclge_hw *hw)
 132{
 133        hclge_cmd_config_regs(&hw->cmq.csq);
 134        hclge_cmd_config_regs(&hw->cmq.crq);
 135}
 136
 137static int hclge_cmd_csq_clean(struct hclge_hw *hw)
 138{
 139        struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
 140        struct hclge_cmq_ring *csq = &hw->cmq.csq;
 141        u32 head;
 142        int clean;
 143
 144        head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
 145        rmb(); /* Make sure head is ready before touch any data */
 146
 147        if (!is_valid_csq_clean_head(csq, head)) {
 148                dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
 149                         csq->next_to_use, csq->next_to_clean);
 150                dev_warn(&hdev->pdev->dev,
 151                         "Disabling any further commands to IMP firmware\n");
 152                set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 153                dev_warn(&hdev->pdev->dev,
 154                         "IMP firmware watchdog reset soon expected!\n");
 155                return -EIO;
 156        }
 157
 158        clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
 159        csq->next_to_clean = head;
 160        return clean;
 161}
 162
 163static int hclge_cmd_csq_done(struct hclge_hw *hw)
 164{
 165        u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
 166        return head == hw->cmq.csq.next_to_use;
 167}
 168
 169static bool hclge_is_special_opcode(u16 opcode)
 170{
 171        /* these commands have several descriptors,
 172         * and use the first one to save opcode and return value
 173         */
 174        u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
 175                HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
 176        int i;
 177
 178        for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
 179                if (spec_opcode[i] == opcode)
 180                        return true;
 181        }
 182
 183        return false;
 184}
 185
 186/**
 187 * hclge_cmd_send - send command to command queue
 188 * @hw: pointer to the hw struct
 189 * @desc: prefilled descriptor for describing the command
 190 * @num : the number of descriptors to be sent
 191 *
 192 * This is the main send command for command queue, it
 193 * sends the queue, cleans the queue, etc
 194 **/
 195int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
 196{
 197        struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
 198        struct hclge_desc *desc_to_use;
 199        bool complete = false;
 200        u32 timeout = 0;
 201        int handle = 0;
 202        int retval = 0;
 203        u16 opcode, desc_ret;
 204        int ntc;
 205
 206        spin_lock_bh(&hw->cmq.csq.lock);
 207
 208        if (num > hclge_ring_space(&hw->cmq.csq) ||
 209            test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
 210                spin_unlock_bh(&hw->cmq.csq.lock);
 211                return -EBUSY;
 212        }
 213
 214        /**
 215         * Record the location of desc in the ring for this time
 216         * which will be use for hardware to write back
 217         */
 218        ntc = hw->cmq.csq.next_to_use;
 219        opcode = le16_to_cpu(desc[0].opcode);
 220        while (handle < num) {
 221                desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
 222                *desc_to_use = desc[handle];
 223                (hw->cmq.csq.next_to_use)++;
 224                if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
 225                        hw->cmq.csq.next_to_use = 0;
 226                handle++;
 227        }
 228
 229        /* Write to hardware */
 230        hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
 231
 232        /**
 233         * If the command is sync, wait for the firmware to write back,
 234         * if multi descriptors to be sent, use the first one to check
 235         */
 236        if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
 237                do {
 238                        if (hclge_cmd_csq_done(hw)) {
 239                                complete = true;
 240                                break;
 241                        }
 242                        udelay(1);
 243                        timeout++;
 244                } while (timeout < hw->cmq.tx_timeout);
 245        }
 246
 247        if (!complete) {
 248                retval = -EAGAIN;
 249        } else {
 250                handle = 0;
 251                while (handle < num) {
 252                        /* Get the result of hardware write back */
 253                        desc_to_use = &hw->cmq.csq.desc[ntc];
 254                        desc[handle] = *desc_to_use;
 255
 256                        if (likely(!hclge_is_special_opcode(opcode)))
 257                                desc_ret = le16_to_cpu(desc[handle].retval);
 258                        else
 259                                desc_ret = le16_to_cpu(desc[0].retval);
 260
 261                        if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
 262                                retval = 0;
 263                        else
 264                                retval = -EIO;
 265                        hw->cmq.last_status = desc_ret;
 266                        ntc++;
 267                        handle++;
 268                        if (ntc == hw->cmq.csq.desc_num)
 269                                ntc = 0;
 270                }
 271        }
 272
 273        /* Clean the command send queue */
 274        handle = hclge_cmd_csq_clean(hw);
 275        if (handle < 0)
 276                retval = handle;
 277        else if (handle != num)
 278                dev_warn(&hdev->pdev->dev,
 279                         "cleaned %d, need to clean %d\n", handle, num);
 280
 281        spin_unlock_bh(&hw->cmq.csq.lock);
 282
 283        return retval;
 284}
 285
 286static enum hclge_cmd_status hclge_cmd_query_firmware_version(
 287                struct hclge_hw *hw, u32 *version)
 288{
 289        struct hclge_query_version_cmd *resp;
 290        struct hclge_desc desc;
 291        int ret;
 292
 293        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
 294        resp = (struct hclge_query_version_cmd *)desc.data;
 295
 296        ret = hclge_cmd_send(hw, &desc, 1);
 297        if (!ret)
 298                *version = le32_to_cpu(resp->firmware);
 299
 300        return ret;
 301}
 302
 303int hclge_cmd_queue_init(struct hclge_dev *hdev)
 304{
 305        int ret;
 306
 307        /* Setup the queue entries for use cmd queue */
 308        hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
 309        hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
 310
 311        /* Setup Tx write back timeout */
 312        hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
 313
 314        /* Setup queue rings */
 315        ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
 316        if (ret) {
 317                dev_err(&hdev->pdev->dev,
 318                        "CSQ ring setup error %d\n", ret);
 319                return ret;
 320        }
 321
 322        ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
 323        if (ret) {
 324                dev_err(&hdev->pdev->dev,
 325                        "CRQ ring setup error %d\n", ret);
 326                goto err_csq;
 327        }
 328
 329        return 0;
 330err_csq:
 331        hclge_free_cmd_desc(&hdev->hw.cmq.csq);
 332        return ret;
 333}
 334
 335int hclge_cmd_init(struct hclge_dev *hdev)
 336{
 337        u32 version;
 338        int ret;
 339
 340        hdev->hw.cmq.csq.next_to_clean = 0;
 341        hdev->hw.cmq.csq.next_to_use = 0;
 342        hdev->hw.cmq.crq.next_to_clean = 0;
 343        hdev->hw.cmq.crq.next_to_use = 0;
 344
 345        /* Setup the lock for command queue */
 346        spin_lock_init(&hdev->hw.cmq.csq.lock);
 347        spin_lock_init(&hdev->hw.cmq.crq.lock);
 348
 349        hclge_cmd_init_regs(&hdev->hw);
 350        clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 351
 352        ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
 353        if (ret) {
 354                dev_err(&hdev->pdev->dev,
 355                        "firmware version query failed %d\n", ret);
 356                return ret;
 357        }
 358        hdev->fw_version = version;
 359
 360        dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
 361
 362        return 0;
 363}
 364
 365static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
 366{
 367        spin_lock(&ring->lock);
 368        hclge_free_cmd_desc(ring);
 369        spin_unlock(&ring->lock);
 370}
 371
 372void hclge_destroy_cmd_queue(struct hclge_hw *hw)
 373{
 374        hclge_destroy_queue(&hw->cmq.csq);
 375        hclge_destroy_queue(&hw->cmq.crq);
 376}
 377