linux/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Huawei HiNIC PCI Express Linux driver
   4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/errno.h>
  10#include <linux/pci.h>
  11#include <linux/device.h>
  12#include <linux/slab.h>
  13#include <linux/vmalloc.h>
  14#include <linux/spinlock.h>
  15#include <linux/sizes.h>
  16#include <linux/atomic.h>
  17#include <linux/log2.h>
  18#include <linux/io.h>
  19#include <linux/completion.h>
  20#include <linux/err.h>
  21#include <asm/byteorder.h>
  22#include <asm/barrier.h>
  23
  24#include "hinic_common.h"
  25#include "hinic_hw_if.h"
  26#include "hinic_hw_eqs.h"
  27#include "hinic_hw_mgmt.h"
  28#include "hinic_hw_wqe.h"
  29#include "hinic_hw_wq.h"
  30#include "hinic_hw_cmdq.h"
  31#include "hinic_hw_io.h"
  32#include "hinic_hw_dev.h"
  33
  34#define CMDQ_CEQE_TYPE_SHIFT                    0
  35
  36#define CMDQ_CEQE_TYPE_MASK                     0x7
  37
  38#define CMDQ_CEQE_GET(val, member)              \
  39                        (((val) >> CMDQ_CEQE_##member##_SHIFT) \
  40                         & CMDQ_CEQE_##member##_MASK)
  41
  42#define CMDQ_WQE_ERRCODE_VAL_SHIFT              20
  43
  44#define CMDQ_WQE_ERRCODE_VAL_MASK               0xF
  45
  46#define CMDQ_WQE_ERRCODE_GET(val, member)       \
  47                        (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
  48                         & CMDQ_WQE_ERRCODE_##member##_MASK)
  49
  50#define CMDQ_DB_PI_OFF(pi)              (((u16)LOWER_8_BITS(pi)) << 3)
  51
  52#define CMDQ_DB_ADDR(db_base, pi)       ((db_base) + CMDQ_DB_PI_OFF(pi))
  53
  54#define CMDQ_WQE_HEADER(wqe)            ((struct hinic_cmdq_header *)(wqe))
  55
  56#define CMDQ_WQE_COMPLETED(ctrl_info)   \
  57                        HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
  58
  59#define FIRST_DATA_TO_WRITE_LAST        sizeof(u64)
  60
  61#define CMDQ_DB_OFF                     SZ_2K
  62
  63#define CMDQ_WQEBB_SIZE                 64
  64#define CMDQ_WQE_SIZE                   64
  65#define CMDQ_DEPTH                      SZ_4K
  66
  67#define CMDQ_WQ_PAGE_SIZE               SZ_256K
  68
  69#define WQE_LCMD_SIZE                   64
  70#define WQE_SCMD_SIZE                   64
  71
  72#define COMPLETE_LEN                    3
  73
  74#define CMDQ_TIMEOUT                    1000
  75
  76#define CMDQ_PFN(addr, page_size)       ((addr) >> (ilog2(page_size)))
  77
  78#define cmdq_to_cmdqs(cmdq)     container_of((cmdq) - (cmdq)->cmdq_type, \
  79                                             struct hinic_cmdqs, cmdq[0])
  80
  81#define cmdqs_to_func_to_io(cmdqs)      container_of(cmdqs, \
  82                                                     struct hinic_func_to_io, \
  83                                                     cmdqs)
  84
  85enum cmdq_wqe_type {
  86        WQE_LCMD_TYPE = 0,
  87        WQE_SCMD_TYPE = 1,
  88};
  89
  90enum completion_format {
  91        COMPLETE_DIRECT = 0,
  92        COMPLETE_SGE    = 1,
  93};
  94
  95enum data_format {
  96        DATA_SGE        = 0,
  97        DATA_DIRECT     = 1,
  98};
  99
 100enum bufdesc_len {
 101        BUFDESC_LCMD_LEN = 2,   /* 16 bytes - 2(8 byte unit) */
 102        BUFDESC_SCMD_LEN = 3,   /* 24 bytes - 3(8 byte unit) */
 103};
 104
 105enum ctrl_sect_len {
 106        CTRL_SECT_LEN        = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
 107        CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
 108};
 109
 110enum cmdq_scmd_type {
 111        CMDQ_SET_ARM_CMD = 2,
 112};
 113
 114enum cmdq_cmd_type {
 115        CMDQ_CMD_SYNC_DIRECT_RESP = 0,
 116        CMDQ_CMD_SYNC_SGE_RESP    = 1,
 117};
 118
 119enum completion_request {
 120        NO_CEQ  = 0,
 121        CEQ_SET = 1,
 122};
 123
 124/**
 125 * hinic_alloc_cmdq_buf - alloc buffer for sending command
 126 * @cmdqs: the cmdqs
 127 * @cmdq_buf: the buffer returned in this struct
 128 *
 129 * Return 0 - Success, negative - Failure
 130 **/
 131int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
 132                         struct hinic_cmdq_buf *cmdq_buf)
 133{
 134        struct hinic_hwif *hwif = cmdqs->hwif;
 135        struct pci_dev *pdev = hwif->pdev;
 136
 137        cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
 138                                       &cmdq_buf->dma_addr);
 139        if (!cmdq_buf->buf) {
 140                dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
 141                return -ENOMEM;
 142        }
 143
 144        return 0;
 145}
 146
 147/**
 148 * hinic_free_cmdq_buf - free buffer
 149 * @cmdqs: the cmdqs
 150 * @cmdq_buf: the buffer to free that is in this struct
 151 **/
 152void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
 153                         struct hinic_cmdq_buf *cmdq_buf)
 154{
 155        dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
 156}
 157
 158static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
 159{
 160        unsigned int wqe_size = 0;
 161
 162        switch (len) {
 163        case BUFDESC_LCMD_LEN:
 164                wqe_size = WQE_LCMD_SIZE;
 165                break;
 166        case BUFDESC_SCMD_LEN:
 167                wqe_size = WQE_SCMD_SIZE;
 168                break;
 169        }
 170
 171        return wqe_size;
 172}
 173
 174static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
 175                                    struct hinic_cmdq_buf *buf_out)
 176{
 177        struct hinic_sge_resp *sge_resp = &completion->sge_resp;
 178
 179        hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
 180}
 181
 182static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
 183                                  enum hinic_cmd_ack_type ack_type,
 184                                  enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
 185                                  enum completion_format complete_format,
 186                                  enum data_format data_format,
 187                                  enum bufdesc_len buf_len)
 188{
 189        struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
 190        struct hinic_cmdq_wqe_scmd *wqe_scmd;
 191        enum ctrl_sect_len ctrl_len;
 192        struct hinic_ctrl *ctrl;
 193        u32 saved_data;
 194
 195        if (data_format == DATA_SGE) {
 196                wqe_lcmd = &wqe->wqe_lcmd;
 197
 198                wqe_lcmd->status.status_info = 0;
 199                ctrl = &wqe_lcmd->ctrl;
 200                ctrl_len = CTRL_SECT_LEN;
 201        } else {
 202                wqe_scmd = &wqe->direct_wqe.wqe_scmd;
 203
 204                wqe_scmd->status.status_info = 0;
 205                ctrl = &wqe_scmd->ctrl;
 206                ctrl_len = CTRL_DIRECT_SECT_LEN;
 207        }
 208
 209        ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI)             |
 210                          HINIC_CMDQ_CTRL_SET(cmd, CMD)                 |
 211                          HINIC_CMDQ_CTRL_SET(mod, MOD)                 |
 212                          HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
 213
 214        CMDQ_WQE_HEADER(wqe)->header_info =
 215                HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN)            |
 216                HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT)   |
 217                HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT)           |
 218                HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ)           |
 219                HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
 220                HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN)              |
 221                HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
 222
 223        saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
 224        saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
 225
 226        if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
 227                CMDQ_WQE_HEADER(wqe)->saved_data |=
 228                                                HINIC_SAVED_DATA_SET(1, ARM);
 229        else
 230                CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
 231}
 232
 233static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
 234                                  struct hinic_cmdq_buf *buf_in)
 235{
 236        hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
 237}
 238
 239static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
 240                                     void *buf_in, u32 in_size)
 241{
 242        struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
 243
 244        wqe_scmd->buf_desc.buf_len = in_size;
 245        memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
 246}
 247
 248static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
 249                              enum cmdq_cmd_type cmd_type,
 250                              struct hinic_cmdq_buf *buf_in,
 251                              struct hinic_cmdq_buf *buf_out, int wrapped,
 252                              enum hinic_cmd_ack_type ack_type,
 253                              enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
 254{
 255        struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
 256        enum completion_format complete_format;
 257
 258        switch (cmd_type) {
 259        case CMDQ_CMD_SYNC_SGE_RESP:
 260                complete_format = COMPLETE_SGE;
 261                cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
 262                break;
 263        case CMDQ_CMD_SYNC_DIRECT_RESP:
 264                complete_format = COMPLETE_DIRECT;
 265                wqe_lcmd->completion.direct_resp = 0;
 266                break;
 267        }
 268
 269        cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
 270                              prod_idx, complete_format, DATA_SGE,
 271                              BUFDESC_LCMD_LEN);
 272
 273        cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
 274}
 275
 276static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
 277                                enum cmdq_cmd_type cmd_type,
 278                                void *buf_in, u16 in_size,
 279                                struct hinic_cmdq_buf *buf_out, int wrapped,
 280                                enum hinic_cmd_ack_type ack_type,
 281                                enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
 282{
 283        struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
 284        enum completion_format complete_format;
 285        struct hinic_cmdq_wqe_scmd *wqe_scmd;
 286
 287        wqe_scmd = &direct_wqe->wqe_scmd;
 288
 289        switch (cmd_type) {
 290        case CMDQ_CMD_SYNC_SGE_RESP:
 291                complete_format = COMPLETE_SGE;
 292                cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
 293                break;
 294        case CMDQ_CMD_SYNC_DIRECT_RESP:
 295                complete_format = COMPLETE_DIRECT;
 296                wqe_scmd->completion.direct_resp = 0;
 297                break;
 298        }
 299
 300        cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
 301                              complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
 302
 303        cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
 304}
 305
 306static void cmdq_wqe_fill(void *dst, void *src)
 307{
 308        memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
 309               CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
 310
 311        wmb();          /* The first 8 bytes should be written last */
 312
 313        *(u64 *)dst = *(u64 *)src;
 314}
 315
 316static void cmdq_fill_db(u32 *db_info,
 317                         enum hinic_cmdq_type cmdq_type, u16 prod_idx)
 318{
 319        *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
 320                   HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH)               |
 321                   HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE)                |
 322                   HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
 323}
 324
 325static void cmdq_set_db(struct hinic_cmdq *cmdq,
 326                        enum hinic_cmdq_type cmdq_type, u16 prod_idx)
 327{
 328        u32 db_info;
 329
 330        cmdq_fill_db(&db_info, cmdq_type, prod_idx);
 331
 332        /* The data that is written to HW should be in Big Endian Format */
 333        db_info = cpu_to_be32(db_info);
 334
 335        wmb();  /* write all before the doorbell */
 336
 337        writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
 338}
 339
 340static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
 341                                     enum hinic_mod_type mod, u8 cmd,
 342                                     struct hinic_cmdq_buf *buf_in,
 343                                     u64 *resp)
 344{
 345        struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
 346        u16 curr_prod_idx, next_prod_idx;
 347        int errcode, wrapped, num_wqebbs;
 348        struct hinic_wq *wq = cmdq->wq;
 349        struct hinic_hw_wqe *hw_wqe;
 350        struct completion done;
 351
 352        /* Keep doorbell index correct. bh - for tasklet(ceq). */
 353        spin_lock_bh(&cmdq->cmdq_lock);
 354
 355        /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
 356        hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
 357        if (IS_ERR(hw_wqe)) {
 358                spin_unlock_bh(&cmdq->cmdq_lock);
 359                return -EBUSY;
 360        }
 361
 362        curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
 363
 364        wrapped = cmdq->wrapped;
 365
 366        num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
 367        next_prod_idx = curr_prod_idx + num_wqebbs;
 368        if (next_prod_idx >= wq->q_depth) {
 369                cmdq->wrapped = !cmdq->wrapped;
 370                next_prod_idx -= wq->q_depth;
 371        }
 372
 373        cmdq->errcode[curr_prod_idx] = &errcode;
 374
 375        init_completion(&done);
 376        cmdq->done[curr_prod_idx] = &done;
 377
 378        cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
 379                          wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
 380                          curr_prod_idx);
 381
 382        /* The data that is written to HW should be in Big Endian Format */
 383        hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
 384
 385        /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
 386        cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
 387
 388        cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
 389
 390        spin_unlock_bh(&cmdq->cmdq_lock);
 391
 392        if (!wait_for_completion_timeout(&done,
 393                                         msecs_to_jiffies(CMDQ_TIMEOUT))) {
 394                spin_lock_bh(&cmdq->cmdq_lock);
 395
 396                if (cmdq->errcode[curr_prod_idx] == &errcode)
 397                        cmdq->errcode[curr_prod_idx] = NULL;
 398
 399                if (cmdq->done[curr_prod_idx] == &done)
 400                        cmdq->done[curr_prod_idx] = NULL;
 401
 402                spin_unlock_bh(&cmdq->cmdq_lock);
 403
 404                return -ETIMEDOUT;
 405        }
 406
 407        smp_rmb();      /* read error code after completion */
 408
 409        if (resp) {
 410                struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
 411
 412                *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
 413        }
 414
 415        if (errcode != 0)
 416                return -EFAULT;
 417
 418        return 0;
 419}
 420
 421static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
 422                            u16 in_size)
 423{
 424        struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
 425        u16 curr_prod_idx, next_prod_idx;
 426        struct hinic_wq *wq = cmdq->wq;
 427        struct hinic_hw_wqe *hw_wqe;
 428        int wrapped, num_wqebbs;
 429
 430        /* Keep doorbell index correct */
 431        spin_lock(&cmdq->cmdq_lock);
 432
 433        /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
 434        hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
 435        if (IS_ERR(hw_wqe)) {
 436                spin_unlock(&cmdq->cmdq_lock);
 437                return -EBUSY;
 438        }
 439
 440        curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
 441
 442        wrapped = cmdq->wrapped;
 443
 444        num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
 445        next_prod_idx = curr_prod_idx + num_wqebbs;
 446        if (next_prod_idx >= wq->q_depth) {
 447                cmdq->wrapped = !cmdq->wrapped;
 448                next_prod_idx -= wq->q_depth;
 449        }
 450
 451        cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
 452                            in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
 453                            HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
 454
 455        /* The data that is written to HW should be in Big Endian Format */
 456        hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
 457
 458        /* cmdq wqe is not shadow, therefore wqe will be written to wq */
 459        cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
 460
 461        cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
 462
 463        spin_unlock(&cmdq->cmdq_lock);
 464        return 0;
 465}
 466
 467static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
 468{
 469        if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
 470                return -EINVAL;
 471
 472        return 0;
 473}
 474
 475/**
 476 * hinic_cmdq_direct_resp - send command with direct data as resp
 477 * @cmdqs: the cmdqs
 478 * @mod: module on the card that will handle the command
 479 * @cmd: the command
 480 * @buf_in: the buffer for the command
 481 * @resp: the response to return
 482 *
 483 * Return 0 - Success, negative - Failure
 484 **/
 485int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
 486                           enum hinic_mod_type mod, u8 cmd,
 487                           struct hinic_cmdq_buf *buf_in, u64 *resp)
 488{
 489        struct hinic_hwif *hwif = cmdqs->hwif;
 490        struct pci_dev *pdev = hwif->pdev;
 491        int err;
 492
 493        err = cmdq_params_valid(buf_in);
 494        if (err) {
 495                dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
 496                return err;
 497        }
 498
 499        return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
 500                                         mod, cmd, buf_in, resp);
 501}
 502
 503/**
 504 * hinic_set_arm_bit - set arm bit for enable interrupt again
 505 * @cmdqs: the cmdqs
 506 * @q_type: type of queue to set the arm bit for
 507 * @q_id: the queue number
 508 *
 509 * Return 0 - Success, negative - Failure
 510 **/
 511int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
 512                      enum hinic_set_arm_qtype q_type, u32 q_id)
 513{
 514        struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
 515        struct hinic_hwif *hwif = cmdqs->hwif;
 516        struct pci_dev *pdev = hwif->pdev;
 517        struct hinic_cmdq_arm_bit arm_bit;
 518        int err;
 519
 520        arm_bit.q_type = q_type;
 521        arm_bit.q_id   = q_id;
 522
 523        err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
 524        if (err) {
 525                dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
 526                return err;
 527        }
 528
 529        return 0;
 530}
 531
 532static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
 533                                   struct hinic_cmdq_wqe *wqe)
 534{
 535        u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
 536        unsigned int bufdesc_len, wqe_size;
 537        struct hinic_ctrl *ctrl;
 538
 539        bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
 540        wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
 541        if (wqe_size == WQE_LCMD_SIZE) {
 542                struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
 543
 544                ctrl = &wqe_lcmd->ctrl;
 545        } else {
 546                struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
 547                struct hinic_cmdq_wqe_scmd *wqe_scmd;
 548
 549                wqe_scmd = &direct_wqe->wqe_scmd;
 550                ctrl = &wqe_scmd->ctrl;
 551        }
 552
 553        /* clear HW busy bit */
 554        ctrl->ctrl_info = 0;
 555
 556        wmb();  /* verify wqe is clear */
 557}
 558
 559/**
 560 * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
 561 * @cmdq: the cmdq of the arm command
 562 * @wqe: the wqe of the arm command
 563 *
 564 * Return 0 - Success, negative - Failure
 565 **/
 566static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
 567                                struct hinic_cmdq_wqe *wqe)
 568{
 569        struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
 570        struct hinic_cmdq_wqe_scmd *wqe_scmd;
 571        struct hinic_ctrl *ctrl;
 572        u32 ctrl_info;
 573
 574        wqe_scmd = &direct_wqe->wqe_scmd;
 575        ctrl = &wqe_scmd->ctrl;
 576        ctrl_info = be32_to_cpu(ctrl->ctrl_info);
 577
 578        /* HW should toggle the HW BUSY BIT */
 579        if (!CMDQ_WQE_COMPLETED(ctrl_info))
 580                return -EBUSY;
 581
 582        clear_wqe_complete_bit(cmdq, wqe);
 583
 584        hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
 585        return 0;
 586}
 587
 588static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
 589                                int errcode)
 590{
 591        if (cmdq->errcode[prod_idx])
 592                *cmdq->errcode[prod_idx] = errcode;
 593}
 594
 595/**
 596 * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
 597 * @cmdq: the cmdq of the command
 598 * @cons_idx: the consumer index to update the error code for
 599 * @errcode: the error code
 600 **/
 601static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
 602                                  int errcode)
 603{
 604        u16 prod_idx = cons_idx;
 605
 606        spin_lock(&cmdq->cmdq_lock);
 607        cmdq_update_errcode(cmdq, prod_idx, errcode);
 608
 609        wmb();  /* write all before update for the command request */
 610
 611        if (cmdq->done[prod_idx])
 612                complete(cmdq->done[prod_idx]);
 613        spin_unlock(&cmdq->cmdq_lock);
 614}
 615
 616static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
 617                                struct hinic_cmdq_wqe *cmdq_wqe)
 618{
 619        struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
 620        struct hinic_status *status = &wqe_lcmd->status;
 621        struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
 622        int errcode;
 623
 624        if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
 625                return -EBUSY;
 626
 627        dma_rmb();
 628
 629        errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
 630
 631        cmdq_sync_cmd_handler(cmdq, ci, errcode);
 632
 633        clear_wqe_complete_bit(cmdq, cmdq_wqe);
 634        hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
 635        return 0;
 636}
 637
 638/**
 639 * cmdq_ceq_handler - cmdq completion event handler
 640 * @handle: private data for the handler(cmdqs)
 641 * @ceqe_data: ceq element data
 642 **/
 643static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
 644{
 645        enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
 646        struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
 647        struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
 648        struct hinic_cmdq_header *header;
 649        struct hinic_hw_wqe *hw_wqe;
 650        int err, set_arm = 0;
 651        u32 saved_data;
 652        u16 ci;
 653
 654        /* Read the smallest wqe size for getting wqe size */
 655        while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
 656                if (IS_ERR(hw_wqe))
 657                        break;
 658
 659                header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
 660                saved_data = be32_to_cpu(header->saved_data);
 661
 662                if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
 663                        /* arm_bit was set until here */
 664                        set_arm = 0;
 665
 666                        if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
 667                                break;
 668                } else {
 669                        set_arm = 1;
 670
 671                        hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
 672                        if (IS_ERR(hw_wqe))
 673                                break;
 674
 675                        if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
 676                                break;
 677                }
 678        }
 679
 680        if (set_arm) {
 681                struct hinic_hwif *hwif = cmdqs->hwif;
 682                struct pci_dev *pdev = hwif->pdev;
 683
 684                err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
 685                if (err)
 686                        dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
 687        }
 688}
 689
 690/**
 691 * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
 692 * @cmdq_ctxt: cmdq ctxt to initialize
 693 * @cmdq: the cmdq
 694 * @cmdq_pages: the memory of the queue
 695 **/
 696static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
 697                                 struct hinic_cmdq *cmdq,
 698                                 struct hinic_cmdq_pages *cmdq_pages)
 699{
 700        struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
 701        u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
 702        struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
 703        struct hinic_wq *wq = cmdq->wq;
 704
 705        /* The data in the HW is in Big Endian Format */
 706        wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
 707
 708        pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
 709
 710        ctxt_info->curr_wqe_page_pfn =
 711                HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN)   |
 712                HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
 713                HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM)               |
 714                HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN)                |
 715                HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
 716
 717        if (wq->num_q_pages != 1) {
 718                /* block PFN - Read Modify Write */
 719                cmdq_first_block_paddr = cmdq_pages->page_paddr;
 720
 721                pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
 722        }
 723
 724        ctxt_info->wq_block_pfn =
 725                HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
 726                HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
 727
 728        cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
 729        cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
 730        cmdq_ctxt->cmdq_type  = cmdq->cmdq_type;
 731}
 732
 733/**
 734 * init_cmdq - initialize cmdq
 735 * @cmdq: the cmdq
 736 * @wq: the wq attaced to the cmdq
 737 * @q_type: the cmdq type of the cmdq
 738 * @db_area: doorbell area for the cmdq
 739 *
 740 * Return 0 - Success, negative - Failure
 741 **/
 742static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
 743                     enum hinic_cmdq_type q_type, void __iomem *db_area)
 744{
 745        int err;
 746
 747        cmdq->wq = wq;
 748        cmdq->cmdq_type = q_type;
 749        cmdq->wrapped = 1;
 750
 751        spin_lock_init(&cmdq->cmdq_lock);
 752
 753        cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
 754        if (!cmdq->done)
 755                return -ENOMEM;
 756
 757        cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
 758                                           wq->q_depth));
 759        if (!cmdq->errcode) {
 760                err = -ENOMEM;
 761                goto err_errcode;
 762        }
 763
 764        cmdq->db_base = db_area + CMDQ_DB_OFF;
 765        return 0;
 766
 767err_errcode:
 768        vfree(cmdq->done);
 769        return err;
 770}
 771
 772/**
 773 * free_cmdq - Free cmdq
 774 * @cmdq: the cmdq to free
 775 **/
 776static void free_cmdq(struct hinic_cmdq *cmdq)
 777{
 778        vfree(cmdq->errcode);
 779        vfree(cmdq->done);
 780}
 781
 782/**
 783 * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
 784 * @hwdev: the NIC HW device
 785 * @cmdqs: cmdqs to write the ctxts for
 786 * &db_area: db_area for all the cmdqs
 787 *
 788 * Return 0 - Success, negative - Failure
 789 **/
 790static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
 791                           struct hinic_cmdqs *cmdqs, void __iomem **db_area)
 792{
 793        struct hinic_hwif *hwif = hwdev->hwif;
 794        enum hinic_cmdq_type type, cmdq_type;
 795        struct hinic_cmdq_ctxt *cmdq_ctxts;
 796        struct pci_dev *pdev = hwif->pdev;
 797        struct hinic_pfhwdev *pfhwdev;
 798        size_t cmdq_ctxts_size;
 799        int err;
 800
 801        cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
 802        cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
 803        if (!cmdq_ctxts)
 804                return -ENOMEM;
 805
 806        pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
 807
 808        cmdq_type = HINIC_CMDQ_SYNC;
 809        for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
 810                err = init_cmdq(&cmdqs->cmdq[cmdq_type],
 811                                &cmdqs->saved_wqs[cmdq_type], cmdq_type,
 812                                db_area[cmdq_type]);
 813                if (err) {
 814                        dev_err(&pdev->dev, "Failed to initialize cmdq\n");
 815                        goto err_init_cmdq;
 816                }
 817
 818                cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
 819                                     &cmdqs->cmdq[cmdq_type],
 820                                     &cmdqs->cmdq_pages);
 821        }
 822
 823        /* Write the CMDQ ctxts */
 824        cmdq_type = HINIC_CMDQ_SYNC;
 825        for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
 826                err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
 827                                        HINIC_COMM_CMD_CMDQ_CTXT_SET,
 828                                        &cmdq_ctxts[cmdq_type],
 829                                        sizeof(cmdq_ctxts[cmdq_type]),
 830                                        NULL, NULL, HINIC_MGMT_MSG_SYNC);
 831                if (err) {
 832                        dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
 833                                cmdq_type);
 834                        goto err_write_cmdq_ctxt;
 835                }
 836        }
 837
 838        devm_kfree(&pdev->dev, cmdq_ctxts);
 839        return 0;
 840
 841err_write_cmdq_ctxt:
 842        cmdq_type = HINIC_MAX_CMDQ_TYPES;
 843
 844err_init_cmdq:
 845        for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
 846                free_cmdq(&cmdqs->cmdq[type]);
 847
 848        devm_kfree(&pdev->dev, cmdq_ctxts);
 849        return err;
 850}
 851
 852static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
 853{
 854        struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
 855        struct hinic_pfhwdev *pfhwdev;
 856
 857        pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
 858
 859        hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
 860        hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
 861
 862        hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
 863        hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
 864
 865        return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
 866                                 HINIC_COMM_CMD_HWCTXT_SET,
 867                                 &hw_ioctxt, sizeof(hw_ioctxt), NULL,
 868                                 NULL, HINIC_MGMT_MSG_SYNC);
 869}
 870
 871/**
 872 * hinic_init_cmdqs - init all cmdqs
 873 * @cmdqs: cmdqs to init
 874 * @hwif: HW interface for accessing cmdqs
 875 * @db_area: doorbell areas for all the cmdqs
 876 *
 877 * Return 0 - Success, negative - Failure
 878 **/
 879int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
 880                     void __iomem **db_area)
 881{
 882        struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
 883        struct pci_dev *pdev = hwif->pdev;
 884        struct hinic_hwdev *hwdev;
 885        size_t saved_wqs_size;
 886        u16 max_wqe_size;
 887        int err;
 888
 889        cmdqs->hwif = hwif;
 890        cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
 891                                               HINIC_CMDQ_BUF_SIZE,
 892                                               HINIC_CMDQ_BUF_SIZE, 0);
 893        if (!cmdqs->cmdq_buf_pool)
 894                return -ENOMEM;
 895
 896        saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
 897        cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
 898        if (!cmdqs->saved_wqs) {
 899                err = -ENOMEM;
 900                goto err_saved_wqs;
 901        }
 902
 903        max_wqe_size = WQE_LCMD_SIZE;
 904        err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
 905                                   HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
 906                                   CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
 907        if (err) {
 908                dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
 909                goto err_cmdq_wqs;
 910        }
 911
 912        hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
 913        err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
 914        if (err) {
 915                dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
 916                goto err_cmdq_ctxt;
 917        }
 918
 919        hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
 920                              cmdq_ceq_handler);
 921
 922        err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
 923        if (err) {
 924                dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
 925                goto err_set_cmdq_depth;
 926        }
 927
 928        return 0;
 929
 930err_set_cmdq_depth:
 931        hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
 932
 933err_cmdq_ctxt:
 934        hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
 935                            HINIC_MAX_CMDQ_TYPES);
 936
 937err_cmdq_wqs:
 938        devm_kfree(&pdev->dev, cmdqs->saved_wqs);
 939
 940err_saved_wqs:
 941        dma_pool_destroy(cmdqs->cmdq_buf_pool);
 942        return err;
 943}
 944
 945/**
 946 * hinic_free_cmdqs - free all cmdqs
 947 * @cmdqs: cmdqs to free
 948 **/
 949void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
 950{
 951        struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
 952        struct hinic_hwif *hwif = cmdqs->hwif;
 953        struct pci_dev *pdev = hwif->pdev;
 954        enum hinic_cmdq_type cmdq_type;
 955
 956        hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
 957
 958        cmdq_type = HINIC_CMDQ_SYNC;
 959        for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
 960                free_cmdq(&cmdqs->cmdq[cmdq_type]);
 961
 962        hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
 963                            HINIC_MAX_CMDQ_TYPES);
 964
 965        devm_kfree(&pdev->dev, cmdqs->saved_wqs);
 966
 967        dma_pool_destroy(cmdqs->cmdq_buf_pool);
 968}
 969