linux/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Huawei HiNIC PCI Express Linux driver
   4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/errno.h>
  10#include <linux/pci.h>
  11#include <linux/device.h>
  12#include <linux/slab.h>
  13#include <linux/vmalloc.h>
  14#include <linux/spinlock.h>
  15#include <linux/sizes.h>
  16#include <linux/atomic.h>
  17#include <linux/log2.h>
  18#include <linux/io.h>
  19#include <linux/completion.h>
  20#include <linux/err.h>
  21#include <asm/byteorder.h>
  22#include <asm/barrier.h>
  23
  24#include "hinic_common.h"
  25#include "hinic_hw_if.h"
  26#include "hinic_hw_eqs.h"
  27#include "hinic_hw_mgmt.h"
  28#include "hinic_hw_wqe.h"
  29#include "hinic_hw_wq.h"
  30#include "hinic_hw_cmdq.h"
  31#include "hinic_hw_io.h"
  32#include "hinic_hw_dev.h"
  33
  34#define CMDQ_CEQE_TYPE_SHIFT                    0
  35
  36#define CMDQ_CEQE_TYPE_MASK                     0x7
  37
  38#define CMDQ_CEQE_GET(val, member)              \
  39                        (((val) >> CMDQ_CEQE_##member##_SHIFT) \
  40                         & CMDQ_CEQE_##member##_MASK)
  41
  42#define CMDQ_WQE_ERRCODE_VAL_SHIFT              20
  43
  44#define CMDQ_WQE_ERRCODE_VAL_MASK               0xF
  45
  46#define CMDQ_WQE_ERRCODE_GET(val, member)       \
  47                        (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
  48                         & CMDQ_WQE_ERRCODE_##member##_MASK)
  49
  50#define CMDQ_DB_PI_OFF(pi)              (((u16)LOWER_8_BITS(pi)) << 3)
  51
  52#define CMDQ_DB_ADDR(db_base, pi)       ((db_base) + CMDQ_DB_PI_OFF(pi))
  53
  54#define CMDQ_WQE_HEADER(wqe)            ((struct hinic_cmdq_header *)(wqe))
  55
  56#define CMDQ_WQE_COMPLETED(ctrl_info)   \
  57                        HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
  58
  59#define FIRST_DATA_TO_WRITE_LAST        sizeof(u64)
  60
  61#define CMDQ_DB_OFF                     SZ_2K
  62
  63#define CMDQ_WQEBB_SIZE                 64
  64#define CMDQ_WQE_SIZE                   64
  65#define CMDQ_DEPTH                      SZ_4K
  66
  67#define CMDQ_WQ_PAGE_SIZE               SZ_256K
  68
  69#define WQE_LCMD_SIZE                   64
  70#define WQE_SCMD_SIZE                   64
  71
  72#define COMPLETE_LEN                    3
  73
  74#define CMDQ_TIMEOUT                    1000
  75
  76#define CMDQ_PFN(addr, page_size)       ((addr) >> (ilog2(page_size)))
  77
  78#define cmdq_to_cmdqs(cmdq)     container_of((cmdq) - (cmdq)->cmdq_type, \
  79                                             struct hinic_cmdqs, cmdq[0])
  80
  81#define cmdqs_to_func_to_io(cmdqs)      container_of(cmdqs, \
  82                                                     struct hinic_func_to_io, \
  83                                                     cmdqs)
  84
  85enum cmdq_wqe_type {
  86        WQE_LCMD_TYPE = 0,
  87        WQE_SCMD_TYPE = 1,
  88};
  89
  90enum completion_format {
  91        COMPLETE_DIRECT = 0,
  92        COMPLETE_SGE    = 1,
  93};
  94
  95enum data_format {
  96        DATA_SGE        = 0,
  97        DATA_DIRECT     = 1,
  98};
  99
 100enum bufdesc_len {
 101        BUFDESC_LCMD_LEN = 2,   /* 16 bytes - 2(8 byte unit) */
 102        BUFDESC_SCMD_LEN = 3,   /* 24 bytes - 3(8 byte unit) */
 103};
 104
 105enum ctrl_sect_len {
 106        CTRL_SECT_LEN        = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
 107        CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
 108};
 109
 110enum cmdq_scmd_type {
 111        CMDQ_SET_ARM_CMD = 2,
 112};
 113
 114enum cmdq_cmd_type {
 115        CMDQ_CMD_SYNC_DIRECT_RESP = 0,
 116        CMDQ_CMD_SYNC_SGE_RESP    = 1,
 117};
 118
 119enum completion_request {
 120        NO_CEQ  = 0,
 121        CEQ_SET = 1,
 122};
 123
 124/**
 125 * hinic_alloc_cmdq_buf - alloc buffer for sending command
 126 * @cmdqs: the cmdqs
 127 * @cmdq_buf: the buffer returned in this struct
 128 *
 129 * Return 0 - Success, negative - Failure
 130 **/
 131int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
 132                         struct hinic_cmdq_buf *cmdq_buf)
 133{
 134        struct hinic_hwif *hwif = cmdqs->hwif;
 135        struct pci_dev *pdev = hwif->pdev;
 136
 137        cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
 138                                       &cmdq_buf->dma_addr);
 139        if (!cmdq_buf->buf) {
 140                dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
 141                return -ENOMEM;
 142        }
 143
 144        return 0;
 145}
 146
 147/**
 148 * hinic_free_cmdq_buf - free buffer
 149 * @cmdqs: the cmdqs
 150 * @cmdq_buf: the buffer to free that is in this struct
 151 **/
 152void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
 153                         struct hinic_cmdq_buf *cmdq_buf)
 154{
 155        dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
 156}
 157
 158static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
 159{
 160        unsigned int wqe_size = 0;
 161
 162        switch (len) {
 163        case BUFDESC_LCMD_LEN:
 164                wqe_size = WQE_LCMD_SIZE;
 165                break;
 166        case BUFDESC_SCMD_LEN:
 167                wqe_size = WQE_SCMD_SIZE;
 168                break;
 169        }
 170
 171        return wqe_size;
 172}
 173
 174static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
 175                                    struct hinic_cmdq_buf *buf_out)
 176{
 177        struct hinic_sge_resp *sge_resp = &completion->sge_resp;
 178
 179        hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
 180}
 181
 182static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
 183                                  enum hinic_cmd_ack_type ack_type,
 184                                  enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
 185                                  enum completion_format complete_format,
 186                                  enum data_format data_format,
 187                                  enum bufdesc_len buf_len)
 188{
 189        struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
 190        struct hinic_cmdq_wqe_scmd *wqe_scmd;
 191        enum ctrl_sect_len ctrl_len;
 192        struct hinic_ctrl *ctrl;
 193        u32 saved_data;
 194
 195        if (data_format == DATA_SGE) {
 196                wqe_lcmd = &wqe->wqe_lcmd;
 197
 198                wqe_lcmd->status.status_info = 0;
 199                ctrl = &wqe_lcmd->ctrl;
 200                ctrl_len = CTRL_SECT_LEN;
 201        } else {
 202                wqe_scmd = &wqe->direct_wqe.wqe_scmd;
 203
 204                wqe_scmd->status.status_info = 0;
 205                ctrl = &wqe_scmd->ctrl;
 206                ctrl_len = CTRL_DIRECT_SECT_LEN;
 207        }
 208
 209        ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI)             |
 210                          HINIC_CMDQ_CTRL_SET(cmd, CMD)                 |
 211                          HINIC_CMDQ_CTRL_SET(mod, MOD)                 |
 212                          HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
 213
 214        CMDQ_WQE_HEADER(wqe)->header_info =
 215                HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN)            |
 216                HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT)   |
 217                HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT)           |
 218                HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ)           |
 219                HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
 220                HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN)              |
 221                HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
 222
 223        saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
 224        saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
 225
 226        if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM)
 227                CMDQ_WQE_HEADER(wqe)->saved_data |=
 228                                                HINIC_SAVED_DATA_SET(1, ARM);
 229        else
 230                CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
 231}
 232
 233static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
 234                                  struct hinic_cmdq_buf *buf_in)
 235{
 236        hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
 237}
 238
 239static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
 240                                     void *buf_in, u32 in_size)
 241{
 242        struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
 243
 244        wqe_scmd->buf_desc.buf_len = in_size;
 245        memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
 246}
 247
 248static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
 249                              enum cmdq_cmd_type cmd_type,
 250                              struct hinic_cmdq_buf *buf_in,
 251                              struct hinic_cmdq_buf *buf_out, int wrapped,
 252                              enum hinic_cmd_ack_type ack_type,
 253                              enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
 254{
 255        struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
 256        enum completion_format complete_format;
 257
 258        switch (cmd_type) {
 259        case CMDQ_CMD_SYNC_SGE_RESP:
 260                complete_format = COMPLETE_SGE;
 261                cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
 262                break;
 263        case CMDQ_CMD_SYNC_DIRECT_RESP:
 264                complete_format = COMPLETE_DIRECT;
 265                wqe_lcmd->completion.direct_resp = 0;
 266                break;
 267        }
 268
 269        cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
 270                              prod_idx, complete_format, DATA_SGE,
 271                              BUFDESC_LCMD_LEN);
 272
 273        cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
 274}
 275
 276static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
 277                                enum cmdq_cmd_type cmd_type,
 278                                void *buf_in, u16 in_size,
 279                                struct hinic_cmdq_buf *buf_out, int wrapped,
 280                                enum hinic_cmd_ack_type ack_type,
 281                                enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
 282{
 283        struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
 284        enum completion_format complete_format;
 285        struct hinic_cmdq_wqe_scmd *wqe_scmd;
 286
 287        wqe_scmd = &direct_wqe->wqe_scmd;
 288
 289        switch (cmd_type) {
 290        case CMDQ_CMD_SYNC_SGE_RESP:
 291                complete_format = COMPLETE_SGE;
 292                cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
 293                break;
 294        case CMDQ_CMD_SYNC_DIRECT_RESP:
 295                complete_format = COMPLETE_DIRECT;
 296                wqe_scmd->completion.direct_resp = 0;
 297                break;
 298        }
 299
 300        cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
 301                              complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
 302
 303        cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
 304}
 305
 306static void cmdq_wqe_fill(void *dst, void *src)
 307{
 308        memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
 309               CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
 310
 311        wmb();          /* The first 8 bytes should be written last */
 312
 313        *(u64 *)dst = *(u64 *)src;
 314}
 315
 316static void cmdq_fill_db(u32 *db_info,
 317                         enum hinic_cmdq_type cmdq_type, u16 prod_idx)
 318{
 319        *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
 320                   HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH)               |
 321                   HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE)                |
 322                   HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
 323}
 324
 325static void cmdq_set_db(struct hinic_cmdq *cmdq,
 326                        enum hinic_cmdq_type cmdq_type, u16 prod_idx)
 327{
 328        u32 db_info;
 329
 330        cmdq_fill_db(&db_info, cmdq_type, prod_idx);
 331
 332        /* The data that is written to HW should be in Big Endian Format */
 333        db_info = cpu_to_be32(db_info);
 334
 335        wmb();  /* write all before the doorbell */
 336
 337        writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
 338}
 339
 340static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
 341                                     enum hinic_mod_type mod, u8 cmd,
 342                                     struct hinic_cmdq_buf *buf_in,
 343                                     u64 *resp)
 344{
 345        struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
 346        u16 curr_prod_idx, next_prod_idx;
 347        int errcode, wrapped, num_wqebbs;
 348        struct hinic_wq *wq = cmdq->wq;
 349        struct hinic_hw_wqe *hw_wqe;
 350        struct completion done;
 351
 352        /* Keep doorbell index correct. bh - for tasklet(ceq). */
 353        spin_lock_bh(&cmdq->cmdq_lock);
 354
 355        /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
 356        hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
 357        if (IS_ERR(hw_wqe)) {
 358                spin_unlock_bh(&cmdq->cmdq_lock);
 359                return -EBUSY;
 360        }
 361
 362        curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
 363
 364        wrapped = cmdq->wrapped;
 365
 366        num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
 367        next_prod_idx = curr_prod_idx + num_wqebbs;
 368        if (next_prod_idx >= wq->q_depth) {
 369                cmdq->wrapped = !cmdq->wrapped;
 370                next_prod_idx -= wq->q_depth;
 371        }
 372
 373        cmdq->errcode[curr_prod_idx] = &errcode;
 374
 375        init_completion(&done);
 376        cmdq->done[curr_prod_idx] = &done;
 377
 378        cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
 379                          wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
 380                          curr_prod_idx);
 381
 382        /* The data that is written to HW should be in Big Endian Format */
 383        hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
 384
 385        /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
 386        cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
 387
 388        cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
 389
 390        spin_unlock_bh(&cmdq->cmdq_lock);
 391
 392        if (!wait_for_completion_timeout(&done,
 393                                         msecs_to_jiffies(CMDQ_TIMEOUT))) {
 394                spin_lock_bh(&cmdq->cmdq_lock);
 395
 396                if (cmdq->errcode[curr_prod_idx] == &errcode)
 397                        cmdq->errcode[curr_prod_idx] = NULL;
 398
 399                if (cmdq->done[curr_prod_idx] == &done)
 400                        cmdq->done[curr_prod_idx] = NULL;
 401
 402                spin_unlock_bh(&cmdq->cmdq_lock);
 403
 404                hinic_dump_ceq_info(cmdq->hwdev);
 405                return -ETIMEDOUT;
 406        }
 407
 408        smp_rmb();      /* read error code after completion */
 409
 410        if (resp) {
 411                struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
 412
 413                *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
 414        }
 415
 416        if (errcode != 0)
 417                return -EFAULT;
 418
 419        return 0;
 420}
 421
 422static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
 423                            u16 in_size)
 424{
 425        struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
 426        u16 curr_prod_idx, next_prod_idx;
 427        struct hinic_wq *wq = cmdq->wq;
 428        struct hinic_hw_wqe *hw_wqe;
 429        int wrapped, num_wqebbs;
 430
 431        /* Keep doorbell index correct */
 432        spin_lock(&cmdq->cmdq_lock);
 433
 434        /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
 435        hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
 436        if (IS_ERR(hw_wqe)) {
 437                spin_unlock(&cmdq->cmdq_lock);
 438                return -EBUSY;
 439        }
 440
 441        curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
 442
 443        wrapped = cmdq->wrapped;
 444
 445        num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
 446        next_prod_idx = curr_prod_idx + num_wqebbs;
 447        if (next_prod_idx >= wq->q_depth) {
 448                cmdq->wrapped = !cmdq->wrapped;
 449                next_prod_idx -= wq->q_depth;
 450        }
 451
 452        cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
 453                            in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
 454                            HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
 455
 456        /* The data that is written to HW should be in Big Endian Format */
 457        hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
 458
 459        /* cmdq wqe is not shadow, therefore wqe will be written to wq */
 460        cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
 461
 462        cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
 463
 464        spin_unlock(&cmdq->cmdq_lock);
 465        return 0;
 466}
 467
 468static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
 469{
 470        if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
 471                return -EINVAL;
 472
 473        return 0;
 474}
 475
 476/**
 477 * hinic_cmdq_direct_resp - send command with direct data as resp
 478 * @cmdqs: the cmdqs
 479 * @mod: module on the card that will handle the command
 480 * @cmd: the command
 481 * @buf_in: the buffer for the command
 482 * @resp: the response to return
 483 *
 484 * Return 0 - Success, negative - Failure
 485 **/
 486int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
 487                           enum hinic_mod_type mod, u8 cmd,
 488                           struct hinic_cmdq_buf *buf_in, u64 *resp)
 489{
 490        struct hinic_hwif *hwif = cmdqs->hwif;
 491        struct pci_dev *pdev = hwif->pdev;
 492        int err;
 493
 494        err = cmdq_params_valid(buf_in);
 495        if (err) {
 496                dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
 497                return err;
 498        }
 499
 500        return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
 501                                         mod, cmd, buf_in, resp);
 502}
 503
 504/**
 505 * hinic_set_arm_bit - set arm bit for enable interrupt again
 506 * @cmdqs: the cmdqs
 507 * @q_type: type of queue to set the arm bit for
 508 * @q_id: the queue number
 509 *
 510 * Return 0 - Success, negative - Failure
 511 **/
 512int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
 513                      enum hinic_set_arm_qtype q_type, u32 q_id)
 514{
 515        struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
 516        struct hinic_hwif *hwif = cmdqs->hwif;
 517        struct pci_dev *pdev = hwif->pdev;
 518        struct hinic_cmdq_arm_bit arm_bit;
 519        int err;
 520
 521        arm_bit.q_type = q_type;
 522        arm_bit.q_id   = q_id;
 523
 524        err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
 525        if (err) {
 526                dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
 527                return err;
 528        }
 529
 530        return 0;
 531}
 532
 533static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
 534                                   struct hinic_cmdq_wqe *wqe)
 535{
 536        u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
 537        unsigned int bufdesc_len, wqe_size;
 538        struct hinic_ctrl *ctrl;
 539
 540        bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
 541        wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
 542        if (wqe_size == WQE_LCMD_SIZE) {
 543                struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
 544
 545                ctrl = &wqe_lcmd->ctrl;
 546        } else {
 547                struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
 548                struct hinic_cmdq_wqe_scmd *wqe_scmd;
 549
 550                wqe_scmd = &direct_wqe->wqe_scmd;
 551                ctrl = &wqe_scmd->ctrl;
 552        }
 553
 554        /* clear HW busy bit */
 555        ctrl->ctrl_info = 0;
 556
 557        wmb();  /* verify wqe is clear */
 558}
 559
 560/**
 561 * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
 562 * @cmdq: the cmdq of the arm command
 563 * @wqe: the wqe of the arm command
 564 *
 565 * Return 0 - Success, negative - Failure
 566 **/
 567static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
 568                                struct hinic_cmdq_wqe *wqe)
 569{
 570        struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
 571        struct hinic_cmdq_wqe_scmd *wqe_scmd;
 572        struct hinic_ctrl *ctrl;
 573        u32 ctrl_info;
 574
 575        wqe_scmd = &direct_wqe->wqe_scmd;
 576        ctrl = &wqe_scmd->ctrl;
 577        ctrl_info = be32_to_cpu(ctrl->ctrl_info);
 578
 579        /* HW should toggle the HW BUSY BIT */
 580        if (!CMDQ_WQE_COMPLETED(ctrl_info))
 581                return -EBUSY;
 582
 583        clear_wqe_complete_bit(cmdq, wqe);
 584
 585        hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
 586        return 0;
 587}
 588
 589static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
 590                                int errcode)
 591{
 592        if (cmdq->errcode[prod_idx])
 593                *cmdq->errcode[prod_idx] = errcode;
 594}
 595
 596/**
 597 * cmdq_sync_cmd_handler - cmdq completion event handler for sync command
 598 * @cmdq: the cmdq of the command
 599 * @cons_idx: the consumer index to update the error code for
 600 * @errcode: the error code
 601 **/
 602static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
 603                                  int errcode)
 604{
 605        u16 prod_idx = cons_idx;
 606
 607        spin_lock(&cmdq->cmdq_lock);
 608        cmdq_update_errcode(cmdq, prod_idx, errcode);
 609
 610        wmb();  /* write all before update for the command request */
 611
 612        if (cmdq->done[prod_idx])
 613                complete(cmdq->done[prod_idx]);
 614        spin_unlock(&cmdq->cmdq_lock);
 615}
 616
 617static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
 618                                struct hinic_cmdq_wqe *cmdq_wqe)
 619{
 620        struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
 621        struct hinic_status *status = &wqe_lcmd->status;
 622        struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
 623        int errcode;
 624
 625        if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
 626                return -EBUSY;
 627
 628        dma_rmb();
 629
 630        errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
 631
 632        cmdq_sync_cmd_handler(cmdq, ci, errcode);
 633
 634        clear_wqe_complete_bit(cmdq, cmdq_wqe);
 635        hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
 636        return 0;
 637}
 638
 639/**
 640 * cmdq_ceq_handler - cmdq completion event handler
 641 * @handle: private data for the handler(cmdqs)
 642 * @ceqe_data: ceq element data
 643 **/
 644static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
 645{
 646        enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
 647        struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
 648        struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
 649        struct hinic_cmdq_header *header;
 650        struct hinic_hw_wqe *hw_wqe;
 651        int err, set_arm = 0;
 652        u32 saved_data;
 653        u16 ci;
 654
 655        /* Read the smallest wqe size for getting wqe size */
 656        while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
 657                if (IS_ERR(hw_wqe))
 658                        break;
 659
 660                header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
 661                saved_data = be32_to_cpu(header->saved_data);
 662
 663                if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
 664                        /* arm_bit was set until here */
 665                        set_arm = 0;
 666
 667                        if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
 668                                break;
 669                } else {
 670                        set_arm = 1;
 671
 672                        hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
 673                        if (IS_ERR(hw_wqe))
 674                                break;
 675
 676                        if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
 677                                break;
 678                }
 679        }
 680
 681        if (set_arm) {
 682                struct hinic_hwif *hwif = cmdqs->hwif;
 683                struct pci_dev *pdev = hwif->pdev;
 684
 685                err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
 686                if (err)
 687                        dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
 688        }
 689}
 690
 691/**
 692 * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
 693 * @cmdq_ctxt: cmdq ctxt to initialize
 694 * @cmdq: the cmdq
 695 * @cmdq_pages: the memory of the queue
 696 **/
 697static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
 698                                 struct hinic_cmdq *cmdq,
 699                                 struct hinic_cmdq_pages *cmdq_pages)
 700{
 701        struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
 702        u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
 703        struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
 704        struct hinic_wq *wq = cmdq->wq;
 705
 706        /* The data in the HW is in Big Endian Format */
 707        wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
 708
 709        pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
 710
 711        ctxt_info->curr_wqe_page_pfn =
 712                HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN)   |
 713                HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
 714                HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM)               |
 715                HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN)                |
 716                HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
 717
 718        if (wq->num_q_pages != 1) {
 719                /* block PFN - Read Modify Write */
 720                cmdq_first_block_paddr = cmdq_pages->page_paddr;
 721
 722                pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
 723        }
 724
 725        ctxt_info->wq_block_pfn =
 726                HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
 727                HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
 728
 729        cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
 730        cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
 731        cmdq_ctxt->cmdq_type  = cmdq->cmdq_type;
 732}
 733
 734/**
 735 * init_cmdq - initialize cmdq
 736 * @cmdq: the cmdq
 737 * @wq: the wq attaced to the cmdq
 738 * @q_type: the cmdq type of the cmdq
 739 * @db_area: doorbell area for the cmdq
 740 *
 741 * Return 0 - Success, negative - Failure
 742 **/
 743static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
 744                     enum hinic_cmdq_type q_type, void __iomem *db_area)
 745{
 746        int err;
 747
 748        cmdq->wq = wq;
 749        cmdq->cmdq_type = q_type;
 750        cmdq->wrapped = 1;
 751
 752        spin_lock_init(&cmdq->cmdq_lock);
 753
 754        cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
 755        if (!cmdq->done)
 756                return -ENOMEM;
 757
 758        cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
 759                                           wq->q_depth));
 760        if (!cmdq->errcode) {
 761                err = -ENOMEM;
 762                goto err_errcode;
 763        }
 764
 765        cmdq->db_base = db_area + CMDQ_DB_OFF;
 766        return 0;
 767
 768err_errcode:
 769        vfree(cmdq->done);
 770        return err;
 771}
 772
 773/**
 774 * free_cmdq - Free cmdq
 775 * @cmdq: the cmdq to free
 776 **/
 777static void free_cmdq(struct hinic_cmdq *cmdq)
 778{
 779        vfree(cmdq->errcode);
 780        vfree(cmdq->done);
 781}
 782
 783/**
 784 * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
 785 * @hwdev: the NIC HW device
 786 * @cmdqs: cmdqs to write the ctxts for
 787 * @db_area: db_area for all the cmdqs
 788 *
 789 * Return 0 - Success, negative - Failure
 790 **/
 791static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
 792                           struct hinic_cmdqs *cmdqs, void __iomem **db_area)
 793{
 794        struct hinic_hwif *hwif = hwdev->hwif;
 795        enum hinic_cmdq_type type, cmdq_type;
 796        struct hinic_cmdq_ctxt *cmdq_ctxts;
 797        struct pci_dev *pdev = hwif->pdev;
 798        struct hinic_pfhwdev *pfhwdev;
 799        size_t cmdq_ctxts_size;
 800        int err;
 801
 802        cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
 803        cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
 804        if (!cmdq_ctxts)
 805                return -ENOMEM;
 806
 807        pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
 808
 809        cmdq_type = HINIC_CMDQ_SYNC;
 810        for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
 811                cmdqs->cmdq[cmdq_type].hwdev = hwdev;
 812                err = init_cmdq(&cmdqs->cmdq[cmdq_type],
 813                                &cmdqs->saved_wqs[cmdq_type], cmdq_type,
 814                                db_area[cmdq_type]);
 815                if (err) {
 816                        dev_err(&pdev->dev, "Failed to initialize cmdq\n");
 817                        goto err_init_cmdq;
 818                }
 819
 820                cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
 821                                     &cmdqs->cmdq[cmdq_type],
 822                                     &cmdqs->cmdq_pages);
 823        }
 824
 825        /* Write the CMDQ ctxts */
 826        cmdq_type = HINIC_CMDQ_SYNC;
 827        for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
 828                err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
 829                                        HINIC_COMM_CMD_CMDQ_CTXT_SET,
 830                                        &cmdq_ctxts[cmdq_type],
 831                                        sizeof(cmdq_ctxts[cmdq_type]),
 832                                        NULL, NULL, HINIC_MGMT_MSG_SYNC);
 833                if (err) {
 834                        dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
 835                                cmdq_type);
 836                        goto err_write_cmdq_ctxt;
 837                }
 838        }
 839
 840        devm_kfree(&pdev->dev, cmdq_ctxts);
 841        return 0;
 842
 843err_write_cmdq_ctxt:
 844        cmdq_type = HINIC_MAX_CMDQ_TYPES;
 845
 846err_init_cmdq:
 847        for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
 848                free_cmdq(&cmdqs->cmdq[type]);
 849
 850        devm_kfree(&pdev->dev, cmdq_ctxts);
 851        return err;
 852}
 853
 854static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
 855{
 856        struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
 857        struct hinic_pfhwdev *pfhwdev;
 858
 859        pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
 860
 861        hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
 862        hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
 863
 864        hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
 865        hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
 866
 867        return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
 868                                 HINIC_COMM_CMD_HWCTXT_SET,
 869                                 &hw_ioctxt, sizeof(hw_ioctxt), NULL,
 870                                 NULL, HINIC_MGMT_MSG_SYNC);
 871}
 872
 873/**
 874 * hinic_init_cmdqs - init all cmdqs
 875 * @cmdqs: cmdqs to init
 876 * @hwif: HW interface for accessing cmdqs
 877 * @db_area: doorbell areas for all the cmdqs
 878 *
 879 * Return 0 - Success, negative - Failure
 880 **/
 881int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
 882                     void __iomem **db_area)
 883{
 884        struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
 885        struct pci_dev *pdev = hwif->pdev;
 886        struct hinic_hwdev *hwdev;
 887        size_t saved_wqs_size;
 888        u16 max_wqe_size;
 889        int err;
 890
 891        cmdqs->hwif = hwif;
 892        cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
 893                                               HINIC_CMDQ_BUF_SIZE,
 894                                               HINIC_CMDQ_BUF_SIZE, 0);
 895        if (!cmdqs->cmdq_buf_pool)
 896                return -ENOMEM;
 897
 898        saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
 899        cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
 900        if (!cmdqs->saved_wqs) {
 901                err = -ENOMEM;
 902                goto err_saved_wqs;
 903        }
 904
 905        max_wqe_size = WQE_LCMD_SIZE;
 906        err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
 907                                   HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
 908                                   CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
 909        if (err) {
 910                dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
 911                goto err_cmdq_wqs;
 912        }
 913
 914        hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
 915        err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
 916        if (err) {
 917                dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
 918                goto err_cmdq_ctxt;
 919        }
 920
 921        hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
 922                              cmdq_ceq_handler);
 923
 924        err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
 925        if (err) {
 926                dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
 927                goto err_set_cmdq_depth;
 928        }
 929
 930        return 0;
 931
 932err_set_cmdq_depth:
 933        hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
 934
 935err_cmdq_ctxt:
 936        hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
 937                            HINIC_MAX_CMDQ_TYPES);
 938
 939err_cmdq_wqs:
 940        devm_kfree(&pdev->dev, cmdqs->saved_wqs);
 941
 942err_saved_wqs:
 943        dma_pool_destroy(cmdqs->cmdq_buf_pool);
 944        return err;
 945}
 946
 947/**
 948 * hinic_free_cmdqs - free all cmdqs
 949 * @cmdqs: cmdqs to free
 950 **/
 951void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
 952{
 953        struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
 954        struct hinic_hwif *hwif = cmdqs->hwif;
 955        struct pci_dev *pdev = hwif->pdev;
 956        enum hinic_cmdq_type cmdq_type;
 957
 958        hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
 959
 960        cmdq_type = HINIC_CMDQ_SYNC;
 961        for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
 962                free_cmdq(&cmdqs->cmdq[cmdq_type]);
 963
 964        hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
 965                            HINIC_MAX_CMDQ_TYPES);
 966
 967        devm_kfree(&pdev->dev, cmdqs->saved_wqs);
 968
 969        dma_pool_destroy(cmdqs->cmdq_buf_pool);
 970}
 971