linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c
<<
>>
Prefs
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: Fast Path Operators
  37 */
  38
  39#include <linux/interrupt.h>
  40#include <linux/spinlock.h>
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/pci.h>
  44#include <linux/prefetch.h>
  45
  46#include "roce_hsi.h"
  47
  48#include "qplib_res.h"
  49#include "qplib_rcfw.h"
  50#include "qplib_sp.h"
  51#include "qplib_fp.h"
  52
  53static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
  54
  55static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
  56                                       struct bnxt_qplib_qp *qp)
  57{
  58        struct bnxt_qplib_q *rq = &qp->rq;
  59        struct bnxt_qplib_q *sq = &qp->sq;
  60
  61        if (qp->rq_hdr_buf)
  62                dma_free_coherent(&res->pdev->dev,
  63                                  rq->hwq.max_elements * qp->rq_hdr_buf_size,
  64                                  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
  65        if (qp->sq_hdr_buf)
  66                dma_free_coherent(&res->pdev->dev,
  67                                  sq->hwq.max_elements * qp->sq_hdr_buf_size,
  68                                  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
  69        qp->rq_hdr_buf = NULL;
  70        qp->sq_hdr_buf = NULL;
  71        qp->rq_hdr_buf_map = 0;
  72        qp->sq_hdr_buf_map = 0;
  73        qp->sq_hdr_buf_size = 0;
  74        qp->rq_hdr_buf_size = 0;
  75}
  76
  77static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
  78                                       struct bnxt_qplib_qp *qp)
  79{
  80        struct bnxt_qplib_q *rq = &qp->rq;
  81        struct bnxt_qplib_q *sq = &qp->rq;
  82        int rc = 0;
  83
  84        if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
  85                qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
  86                                        sq->hwq.max_elements *
  87                                        qp->sq_hdr_buf_size,
  88                                        &qp->sq_hdr_buf_map, GFP_KERNEL);
  89                if (!qp->sq_hdr_buf) {
  90                        rc = -ENOMEM;
  91                        dev_err(&res->pdev->dev,
  92                                "QPLIB: Failed to create sq_hdr_buf");
  93                        goto fail;
  94                }
  95        }
  96
  97        if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
  98                qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
  99                                                    rq->hwq.max_elements *
 100                                                    qp->rq_hdr_buf_size,
 101                                                    &qp->rq_hdr_buf_map,
 102                                                    GFP_KERNEL);
 103                if (!qp->rq_hdr_buf) {
 104                        rc = -ENOMEM;
 105                        dev_err(&res->pdev->dev,
 106                                "QPLIB: Failed to create rq_hdr_buf");
 107                        goto fail;
 108                }
 109        }
 110        return 0;
 111
 112fail:
 113        bnxt_qplib_free_qp_hdr_buf(res, qp);
 114        return rc;
 115}
 116
 117static void bnxt_qplib_service_nq(unsigned long data)
 118{
 119        struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
 120        struct bnxt_qplib_hwq *hwq = &nq->hwq;
 121        struct nq_base *nqe, **nq_ptr;
 122        int num_cqne_processed = 0;
 123        u32 sw_cons, raw_cons;
 124        u16 type;
 125        int budget = nq->budget;
 126        u64 q_handle;
 127
 128        /* Service the NQ until empty */
 129        raw_cons = hwq->cons;
 130        while (budget--) {
 131                sw_cons = HWQ_CMP(raw_cons, hwq);
 132                nq_ptr = (struct nq_base **)hwq->pbl_ptr;
 133                nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
 134                if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
 135                        break;
 136
 137                type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
 138                switch (type) {
 139                case NQ_BASE_TYPE_CQ_NOTIFICATION:
 140                {
 141                        struct nq_cn *nqcne = (struct nq_cn *)nqe;
 142
 143                        q_handle = le32_to_cpu(nqcne->cq_handle_low);
 144                        q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
 145                                                     << 32;
 146                        bnxt_qplib_arm_cq_enable((struct bnxt_qplib_cq *)
 147                                                 ((unsigned long)q_handle));
 148                        if (!nq->cqn_handler(nq, (struct bnxt_qplib_cq *)
 149                                                 ((unsigned long)q_handle)))
 150                                num_cqne_processed++;
 151                        else
 152                                dev_warn(&nq->pdev->dev,
 153                                         "QPLIB: cqn - type 0x%x not handled",
 154                                         type);
 155                        break;
 156                }
 157                case NQ_BASE_TYPE_DBQ_EVENT:
 158                        break;
 159                default:
 160                        dev_warn(&nq->pdev->dev,
 161                                 "QPLIB: nqe with type = 0x%x not handled",
 162                                 type);
 163                        break;
 164                }
 165                raw_cons++;
 166        }
 167        if (hwq->cons != raw_cons) {
 168                hwq->cons = raw_cons;
 169                NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
 170        }
 171}
 172
 173static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
 174{
 175        struct bnxt_qplib_nq *nq = dev_instance;
 176        struct bnxt_qplib_hwq *hwq = &nq->hwq;
 177        struct nq_base **nq_ptr;
 178        u32 sw_cons;
 179
 180        /* Prefetch the NQ element */
 181        sw_cons = HWQ_CMP(hwq->cons, hwq);
 182        nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
 183        prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
 184
 185        /* Fan out to CPU affinitized kthreads? */
 186        tasklet_schedule(&nq->worker);
 187
 188        return IRQ_HANDLED;
 189}
 190
 191void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 192{
 193        /* Make sure the HW is stopped! */
 194        synchronize_irq(nq->vector);
 195        tasklet_disable(&nq->worker);
 196        tasklet_kill(&nq->worker);
 197
 198        if (nq->requested) {
 199                free_irq(nq->vector, nq);
 200                nq->requested = false;
 201        }
 202        if (nq->bar_reg_iomem)
 203                iounmap(nq->bar_reg_iomem);
 204        nq->bar_reg_iomem = NULL;
 205
 206        nq->cqn_handler = NULL;
 207        nq->srqn_handler = NULL;
 208        nq->vector = 0;
 209}
 210
 211int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 212                         int msix_vector, int bar_reg_offset,
 213                         int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 214                                            struct bnxt_qplib_cq *),
 215                         int (*srqn_handler)(struct bnxt_qplib_nq *nq,
 216                                             void *, u8 event))
 217{
 218        resource_size_t nq_base;
 219        int rc;
 220
 221        nq->pdev = pdev;
 222        nq->vector = msix_vector;
 223
 224        nq->cqn_handler = cqn_handler;
 225
 226        nq->srqn_handler = srqn_handler;
 227
 228        tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
 229
 230        nq->requested = false;
 231        rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, "bnxt_qplib_nq", nq);
 232        if (rc) {
 233                dev_err(&nq->pdev->dev,
 234                        "Failed to request IRQ for NQ: %#x", rc);
 235                bnxt_qplib_disable_nq(nq);
 236                goto fail;
 237        }
 238        nq->requested = true;
 239        nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
 240        nq->bar_reg_off = bar_reg_offset;
 241        nq_base = pci_resource_start(pdev, nq->bar_reg);
 242        if (!nq_base) {
 243                rc = -ENOMEM;
 244                goto fail;
 245        }
 246        nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
 247        if (!nq->bar_reg_iomem) {
 248                rc = -ENOMEM;
 249                goto fail;
 250        }
 251        NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
 252
 253        return 0;
 254fail:
 255        bnxt_qplib_disable_nq(nq);
 256        return rc;
 257}
 258
 259void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
 260{
 261        if (nq->hwq.max_elements)
 262                bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
 263}
 264
 265int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
 266{
 267        nq->pdev = pdev;
 268        if (!nq->hwq.max_elements ||
 269            nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
 270                nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
 271
 272        if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
 273                                      &nq->hwq.max_elements,
 274                                      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
 275                                      PAGE_SIZE, HWQ_TYPE_L2_CMPL))
 276                return -ENOMEM;
 277
 278        nq->budget = 8;
 279        return 0;
 280}
 281
 282/* QP */
 283int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 284{
 285        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 286        struct cmdq_create_qp1 req;
 287        struct creq_create_qp1_resp resp;
 288        struct bnxt_qplib_pbl *pbl;
 289        struct bnxt_qplib_q *sq = &qp->sq;
 290        struct bnxt_qplib_q *rq = &qp->rq;
 291        int rc;
 292        u16 cmd_flags = 0;
 293        u32 qp_flags = 0;
 294
 295        RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
 296
 297        /* General */
 298        req.type = qp->type;
 299        req.dpi = cpu_to_le32(qp->dpi->dpi);
 300        req.qp_handle = cpu_to_le64(qp->qp_handle);
 301
 302        /* SQ */
 303        sq->hwq.max_elements = sq->max_wqe;
 304        rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
 305                                       &sq->hwq.max_elements,
 306                                       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
 307                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
 308        if (rc)
 309                goto exit;
 310
 311        sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
 312        if (!sq->swq) {
 313                rc = -ENOMEM;
 314                goto fail_sq;
 315        }
 316        pbl = &sq->hwq.pbl[PBL_LVL_0];
 317        req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 318        req.sq_pg_size_sq_lvl =
 319                ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
 320                                <<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
 321                (pbl->pg_size == ROCE_PG_SIZE_4K ?
 322                                CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
 323                 pbl->pg_size == ROCE_PG_SIZE_8K ?
 324                                CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
 325                 pbl->pg_size == ROCE_PG_SIZE_64K ?
 326                                CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
 327                 pbl->pg_size == ROCE_PG_SIZE_2M ?
 328                                CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
 329                 pbl->pg_size == ROCE_PG_SIZE_8M ?
 330                                CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
 331                 pbl->pg_size == ROCE_PG_SIZE_1G ?
 332                                CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
 333                 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
 334
 335        if (qp->scq)
 336                req.scq_cid = cpu_to_le32(qp->scq->id);
 337
 338        qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
 339
 340        /* RQ */
 341        if (rq->max_wqe) {
 342                rq->hwq.max_elements = qp->rq.max_wqe;
 343                rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
 344                                               &rq->hwq.max_elements,
 345                                               BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
 346                                               PAGE_SIZE, HWQ_TYPE_QUEUE);
 347                if (rc)
 348                        goto fail_sq;
 349
 350                rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
 351                                  GFP_KERNEL);
 352                if (!rq->swq) {
 353                        rc = -ENOMEM;
 354                        goto fail_rq;
 355                }
 356                pbl = &rq->hwq.pbl[PBL_LVL_0];
 357                req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 358                req.rq_pg_size_rq_lvl =
 359                        ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
 360                         CMDQ_CREATE_QP1_RQ_LVL_SFT) |
 361                                (pbl->pg_size == ROCE_PG_SIZE_4K ?
 362                                        CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
 363                                 pbl->pg_size == ROCE_PG_SIZE_8K ?
 364                                        CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
 365                                 pbl->pg_size == ROCE_PG_SIZE_64K ?
 366                                        CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
 367                                 pbl->pg_size == ROCE_PG_SIZE_2M ?
 368                                        CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
 369                                 pbl->pg_size == ROCE_PG_SIZE_8M ?
 370                                        CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
 371                                 pbl->pg_size == ROCE_PG_SIZE_1G ?
 372                                        CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
 373                                 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
 374                if (qp->rcq)
 375                        req.rcq_cid = cpu_to_le32(qp->rcq->id);
 376        }
 377
 378        /* Header buffer - allow hdr_buf pass in */
 379        rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
 380        if (rc) {
 381                rc = -ENOMEM;
 382                goto fail;
 383        }
 384        req.qp_flags = cpu_to_le32(qp_flags);
 385        req.sq_size = cpu_to_le32(sq->hwq.max_elements);
 386        req.rq_size = cpu_to_le32(rq->hwq.max_elements);
 387
 388        req.sq_fwo_sq_sge =
 389                cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
 390                            CMDQ_CREATE_QP1_SQ_SGE_SFT);
 391        req.rq_fwo_rq_sge =
 392                cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
 393                            CMDQ_CREATE_QP1_RQ_SGE_SFT);
 394
 395        req.pd_id = cpu_to_le32(qp->pd->id);
 396
 397        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 398                                          (void *)&resp, NULL, 0);
 399        if (rc)
 400                goto fail;
 401
 402        qp->id = le32_to_cpu(resp.xid);
 403        qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 404        sq->flush_in_progress = false;
 405        rq->flush_in_progress = false;
 406
 407        return 0;
 408
 409fail:
 410        bnxt_qplib_free_qp_hdr_buf(res, qp);
 411fail_rq:
 412        bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
 413        kfree(rq->swq);
 414fail_sq:
 415        bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
 416        kfree(sq->swq);
 417exit:
 418        return rc;
 419}
 420
 421int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 422{
 423        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 424        struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
 425        struct cmdq_create_qp req;
 426        struct creq_create_qp_resp resp;
 427        struct bnxt_qplib_pbl *pbl;
 428        struct sq_psn_search **psn_search_ptr;
 429        unsigned long int psn_search, poff = 0;
 430        struct bnxt_qplib_q *sq = &qp->sq;
 431        struct bnxt_qplib_q *rq = &qp->rq;
 432        struct bnxt_qplib_hwq *xrrq;
 433        int i, rc, req_size, psn_sz;
 434        u16 cmd_flags = 0, max_ssge;
 435        u32 sw_prod, qp_flags = 0;
 436
 437        RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
 438
 439        /* General */
 440        req.type = qp->type;
 441        req.dpi = cpu_to_le32(qp->dpi->dpi);
 442        req.qp_handle = cpu_to_le64(qp->qp_handle);
 443
 444        /* SQ */
 445        psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
 446                 sizeof(struct sq_psn_search) : 0;
 447        sq->hwq.max_elements = sq->max_wqe;
 448        rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
 449                                       sq->nmap, &sq->hwq.max_elements,
 450                                       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
 451                                       psn_sz,
 452                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
 453        if (rc)
 454                goto exit;
 455
 456        sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
 457        if (!sq->swq) {
 458                rc = -ENOMEM;
 459                goto fail_sq;
 460        }
 461        hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
 462        if (psn_sz) {
 463                psn_search_ptr = (struct sq_psn_search **)
 464                                  &hw_sq_send_ptr[get_sqe_pg
 465                                        (sq->hwq.max_elements)];
 466                psn_search = (unsigned long int)
 467                              &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
 468                              [get_sqe_idx(sq->hwq.max_elements)];
 469                if (psn_search & ~PAGE_MASK) {
 470                        /* If the psn_search does not start on a page boundary,
 471                         * then calculate the offset
 472                         */
 473                        poff = (psn_search & ~PAGE_MASK) /
 474                                BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
 475                }
 476                for (i = 0; i < sq->hwq.max_elements; i++)
 477                        sq->swq[i].psn_search =
 478                                &psn_search_ptr[get_psne_pg(i + poff)]
 479                                               [get_psne_idx(i + poff)];
 480        }
 481        pbl = &sq->hwq.pbl[PBL_LVL_0];
 482        req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 483        req.sq_pg_size_sq_lvl =
 484                ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
 485                                 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
 486                (pbl->pg_size == ROCE_PG_SIZE_4K ?
 487                                CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
 488                 pbl->pg_size == ROCE_PG_SIZE_8K ?
 489                                CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
 490                 pbl->pg_size == ROCE_PG_SIZE_64K ?
 491                                CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
 492                 pbl->pg_size == ROCE_PG_SIZE_2M ?
 493                                CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
 494                 pbl->pg_size == ROCE_PG_SIZE_8M ?
 495                                CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
 496                 pbl->pg_size == ROCE_PG_SIZE_1G ?
 497                                CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
 498                 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
 499
 500        /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
 501        hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
 502        for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
 503                hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
 504                                                [get_sqe_idx(sw_prod)];
 505                hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
 506        }
 507
 508        if (qp->scq)
 509                req.scq_cid = cpu_to_le32(qp->scq->id);
 510
 511        qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
 512        qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
 513        if (qp->sig_type)
 514                qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
 515
 516        /* RQ */
 517        if (rq->max_wqe) {
 518                rq->hwq.max_elements = rq->max_wqe;
 519                rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
 520                                               rq->nmap, &rq->hwq.max_elements,
 521                                               BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
 522                                               PAGE_SIZE, HWQ_TYPE_QUEUE);
 523                if (rc)
 524                        goto fail_sq;
 525
 526                rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
 527                                  GFP_KERNEL);
 528                if (!rq->swq) {
 529                        rc = -ENOMEM;
 530                        goto fail_rq;
 531                }
 532                pbl = &rq->hwq.pbl[PBL_LVL_0];
 533                req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 534                req.rq_pg_size_rq_lvl =
 535                        ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
 536                         CMDQ_CREATE_QP_RQ_LVL_SFT) |
 537                                (pbl->pg_size == ROCE_PG_SIZE_4K ?
 538                                        CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
 539                                 pbl->pg_size == ROCE_PG_SIZE_8K ?
 540                                        CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
 541                                 pbl->pg_size == ROCE_PG_SIZE_64K ?
 542                                        CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
 543                                 pbl->pg_size == ROCE_PG_SIZE_2M ?
 544                                        CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
 545                                 pbl->pg_size == ROCE_PG_SIZE_8M ?
 546                                        CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
 547                                 pbl->pg_size == ROCE_PG_SIZE_1G ?
 548                                        CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
 549                                 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
 550        }
 551
 552        if (qp->rcq)
 553                req.rcq_cid = cpu_to_le32(qp->rcq->id);
 554        req.qp_flags = cpu_to_le32(qp_flags);
 555        req.sq_size = cpu_to_le32(sq->hwq.max_elements);
 556        req.rq_size = cpu_to_le32(rq->hwq.max_elements);
 557        qp->sq_hdr_buf = NULL;
 558        qp->rq_hdr_buf = NULL;
 559
 560        rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
 561        if (rc)
 562                goto fail_rq;
 563
 564        /* CTRL-22434: Irrespective of the requested SGE count on the SQ
 565         * always create the QP with max send sges possible if the requested
 566         * inline size is greater than 0.
 567         */
 568        max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
 569        req.sq_fwo_sq_sge = cpu_to_le16(
 570                                ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
 571                                 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
 572        req.rq_fwo_rq_sge = cpu_to_le16(
 573                                ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
 574                                 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
 575        /* ORRQ and IRRQ */
 576        if (psn_sz) {
 577                xrrq = &qp->orrq;
 578                xrrq->max_elements =
 579                        ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
 580                req_size = xrrq->max_elements *
 581                           BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
 582                req_size &= ~(PAGE_SIZE - 1);
 583                rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
 584                                               &xrrq->max_elements,
 585                                               BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
 586                                               0, req_size, HWQ_TYPE_CTX);
 587                if (rc)
 588                        goto fail_buf_free;
 589                pbl = &xrrq->pbl[PBL_LVL_0];
 590                req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
 591
 592                xrrq = &qp->irrq;
 593                xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
 594                                                qp->max_dest_rd_atomic);
 595                req_size = xrrq->max_elements *
 596                           BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
 597                req_size &= ~(PAGE_SIZE - 1);
 598
 599                rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
 600                                               &xrrq->max_elements,
 601                                               BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
 602                                               0, req_size, HWQ_TYPE_CTX);
 603                if (rc)
 604                        goto fail_orrq;
 605
 606                pbl = &xrrq->pbl[PBL_LVL_0];
 607                req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
 608        }
 609        req.pd_id = cpu_to_le32(qp->pd->id);
 610
 611        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 612                                          (void *)&resp, NULL, 0);
 613        if (rc)
 614                goto fail;
 615
 616        qp->id = le32_to_cpu(resp.xid);
 617        qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 618        sq->flush_in_progress = false;
 619        rq->flush_in_progress = false;
 620
 621        return 0;
 622
 623fail:
 624        if (qp->irrq.max_elements)
 625                bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
 626fail_orrq:
 627        if (qp->orrq.max_elements)
 628                bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
 629fail_buf_free:
 630        bnxt_qplib_free_qp_hdr_buf(res, qp);
 631fail_rq:
 632        bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
 633        kfree(rq->swq);
 634fail_sq:
 635        bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
 636        kfree(sq->swq);
 637exit:
 638        return rc;
 639}
 640
 641static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
 642{
 643        switch (qp->state) {
 644        case CMDQ_MODIFY_QP_NEW_STATE_RTR:
 645                /* INIT->RTR, configure the path_mtu to the default
 646                 * 2048 if not being requested
 647                 */
 648                if (!(qp->modify_flags &
 649                    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
 650                        qp->modify_flags |=
 651                                CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
 652                        qp->path_mtu =
 653                                CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
 654                }
 655                qp->modify_flags &=
 656                        ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
 657                /* Bono FW require the max_dest_rd_atomic to be >= 1 */
 658                if (qp->max_dest_rd_atomic < 1)
 659                        qp->max_dest_rd_atomic = 1;
 660                qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
 661                /* Bono FW 20.6.5 requires SGID_INDEX configuration */
 662                if (!(qp->modify_flags &
 663                    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
 664                        qp->modify_flags |=
 665                                CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
 666                        qp->ah.sgid_index = 0;
 667                }
 668                break;
 669        default:
 670                break;
 671        }
 672}
 673
 674static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
 675{
 676        switch (qp->state) {
 677        case CMDQ_MODIFY_QP_NEW_STATE_RTS:
 678                /* Bono FW requires the max_rd_atomic to be >= 1 */
 679                if (qp->max_rd_atomic < 1)
 680                        qp->max_rd_atomic = 1;
 681                /* Bono FW does not allow PKEY_INDEX,
 682                 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
 683                 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
 684                 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
 685                 * modification
 686                 */
 687                qp->modify_flags &=
 688                        ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
 689                          CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
 690                          CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
 691                          CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
 692                          CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
 693                          CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
 694                          CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
 695                          CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
 696                          CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
 697                          CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
 698                          CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
 699                          CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
 700                break;
 701        default:
 702                break;
 703        }
 704}
 705
 706static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
 707{
 708        switch (qp->cur_qp_state) {
 709        case CMDQ_MODIFY_QP_NEW_STATE_RESET:
 710                break;
 711        case CMDQ_MODIFY_QP_NEW_STATE_INIT:
 712                __modify_flags_from_init_state(qp);
 713                break;
 714        case CMDQ_MODIFY_QP_NEW_STATE_RTR:
 715                __modify_flags_from_rtr_state(qp);
 716                break;
 717        case CMDQ_MODIFY_QP_NEW_STATE_RTS:
 718                break;
 719        case CMDQ_MODIFY_QP_NEW_STATE_SQD:
 720                break;
 721        case CMDQ_MODIFY_QP_NEW_STATE_SQE:
 722                break;
 723        case CMDQ_MODIFY_QP_NEW_STATE_ERR:
 724                break;
 725        default:
 726                break;
 727        }
 728}
 729
 730int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 731{
 732        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 733        struct cmdq_modify_qp req;
 734        struct creq_modify_qp_resp resp;
 735        u16 cmd_flags = 0, pkey;
 736        u32 temp32[4];
 737        u32 bmask;
 738        int rc;
 739
 740        RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
 741
 742        /* Filter out the qp_attr_mask based on the state->new transition */
 743        __filter_modify_flags(qp);
 744        bmask = qp->modify_flags;
 745        req.modify_mask = cpu_to_le32(qp->modify_flags);
 746        req.qp_cid = cpu_to_le32(qp->id);
 747        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
 748                req.network_type_en_sqd_async_notify_new_state =
 749                                (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
 750                                (qp->en_sqd_async_notify ?
 751                                        CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
 752        }
 753        req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
 754
 755        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
 756                req.access = qp->access;
 757
 758        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
 759                if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
 760                                         qp->pkey_index, &pkey))
 761                        req.pkey = cpu_to_le16(pkey);
 762        }
 763        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
 764                req.qkey = cpu_to_le32(qp->qkey);
 765
 766        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
 767                memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
 768                req.dgid[0] = cpu_to_le32(temp32[0]);
 769                req.dgid[1] = cpu_to_le32(temp32[1]);
 770                req.dgid[2] = cpu_to_le32(temp32[2]);
 771                req.dgid[3] = cpu_to_le32(temp32[3]);
 772        }
 773        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
 774                req.flow_label = cpu_to_le32(qp->ah.flow_label);
 775
 776        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
 777                req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
 778                                             [qp->ah.sgid_index]);
 779
 780        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
 781                req.hop_limit = qp->ah.hop_limit;
 782
 783        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
 784                req.traffic_class = qp->ah.traffic_class;
 785
 786        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
 787                memcpy(req.dest_mac, qp->ah.dmac, 6);
 788
 789        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
 790                req.path_mtu = qp->path_mtu;
 791
 792        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
 793                req.timeout = qp->timeout;
 794
 795        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
 796                req.retry_cnt = qp->retry_cnt;
 797
 798        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
 799                req.rnr_retry = qp->rnr_retry;
 800
 801        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
 802                req.min_rnr_timer = qp->min_rnr_timer;
 803
 804        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
 805                req.rq_psn = cpu_to_le32(qp->rq.psn);
 806
 807        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
 808                req.sq_psn = cpu_to_le32(qp->sq.psn);
 809
 810        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
 811                req.max_rd_atomic =
 812                        ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
 813
 814        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
 815                req.max_dest_rd_atomic =
 816                        IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
 817
 818        req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
 819        req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
 820        req.sq_sge = cpu_to_le16(qp->sq.max_sge);
 821        req.rq_sge = cpu_to_le16(qp->rq.max_sge);
 822        req.max_inline_data = cpu_to_le32(qp->max_inline_data);
 823        if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
 824                req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
 825
 826        req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
 827
 828        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 829                                          (void *)&resp, NULL, 0);
 830        if (rc)
 831                return rc;
 832        qp->cur_qp_state = qp->state;
 833        return 0;
 834}
 835
 836int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 837{
 838        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 839        struct cmdq_query_qp req;
 840        struct creq_query_qp_resp resp;
 841        struct bnxt_qplib_rcfw_sbuf *sbuf;
 842        struct creq_query_qp_resp_sb *sb;
 843        u16 cmd_flags = 0;
 844        u32 temp32[4];
 845        int i, rc = 0;
 846
 847        RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
 848
 849        sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
 850        if (!sbuf)
 851                return -ENOMEM;
 852        sb = sbuf->sb;
 853
 854        req.qp_cid = cpu_to_le32(qp->id);
 855        req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
 856        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
 857                                          (void *)sbuf, 0);
 858        if (rc)
 859                goto bail;
 860        /* Extract the context from the side buffer */
 861        qp->state = sb->en_sqd_async_notify_state &
 862                        CREQ_QUERY_QP_RESP_SB_STATE_MASK;
 863        qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
 864                                  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
 865                                  true : false;
 866        qp->access = sb->access;
 867        qp->pkey_index = le16_to_cpu(sb->pkey);
 868        qp->qkey = le32_to_cpu(sb->qkey);
 869
 870        temp32[0] = le32_to_cpu(sb->dgid[0]);
 871        temp32[1] = le32_to_cpu(sb->dgid[1]);
 872        temp32[2] = le32_to_cpu(sb->dgid[2]);
 873        temp32[3] = le32_to_cpu(sb->dgid[3]);
 874        memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
 875
 876        qp->ah.flow_label = le32_to_cpu(sb->flow_label);
 877
 878        qp->ah.sgid_index = 0;
 879        for (i = 0; i < res->sgid_tbl.max; i++) {
 880                if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
 881                        qp->ah.sgid_index = i;
 882                        break;
 883                }
 884        }
 885        if (i == res->sgid_tbl.max)
 886                dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
 887
 888        qp->ah.hop_limit = sb->hop_limit;
 889        qp->ah.traffic_class = sb->traffic_class;
 890        memcpy(qp->ah.dmac, sb->dest_mac, 6);
 891        qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
 892                                CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
 893                                CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
 894        qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
 895                                    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
 896                                    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
 897        qp->timeout = sb->timeout;
 898        qp->retry_cnt = sb->retry_cnt;
 899        qp->rnr_retry = sb->rnr_retry;
 900        qp->min_rnr_timer = sb->min_rnr_timer;
 901        qp->rq.psn = le32_to_cpu(sb->rq_psn);
 902        qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
 903        qp->sq.psn = le32_to_cpu(sb->sq_psn);
 904        qp->max_dest_rd_atomic =
 905                        IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
 906        qp->sq.max_wqe = qp->sq.hwq.max_elements;
 907        qp->rq.max_wqe = qp->rq.hwq.max_elements;
 908        qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
 909        qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
 910        qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
 911        qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
 912        memcpy(qp->smac, sb->src_mac, 6);
 913        qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
 914bail:
 915        bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
 916        return rc;
 917}
 918
 919static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
 920{
 921        struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
 922        struct cq_base *hw_cqe, **hw_cqe_ptr;
 923        int i;
 924
 925        for (i = 0; i < cq_hwq->max_elements; i++) {
 926                hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
 927                hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
 928                if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
 929                        continue;
 930                switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
 931                case CQ_BASE_CQE_TYPE_REQ:
 932                case CQ_BASE_CQE_TYPE_TERMINAL:
 933                {
 934                        struct cq_req *cqe = (struct cq_req *)hw_cqe;
 935
 936                        if (qp == le64_to_cpu(cqe->qp_handle))
 937                                cqe->qp_handle = 0;
 938                        break;
 939                }
 940                case CQ_BASE_CQE_TYPE_RES_RC:
 941                case CQ_BASE_CQE_TYPE_RES_UD:
 942                case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
 943                {
 944                        struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
 945
 946                        if (qp == le64_to_cpu(cqe->qp_handle))
 947                                cqe->qp_handle = 0;
 948                        break;
 949                }
 950                default:
 951                        break;
 952                }
 953        }
 954}
 955
 956int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
 957                          struct bnxt_qplib_qp *qp)
 958{
 959        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 960        struct cmdq_destroy_qp req;
 961        struct creq_destroy_qp_resp resp;
 962        unsigned long flags;
 963        u16 cmd_flags = 0;
 964        int rc;
 965
 966        RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
 967
 968        req.qp_cid = cpu_to_le32(qp->id);
 969        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 970                                          (void *)&resp, NULL, 0);
 971        if (rc)
 972                return rc;
 973
 974        /* Must walk the associated CQs to nullified the QP ptr */
 975        spin_lock_irqsave(&qp->scq->hwq.lock, flags);
 976
 977        __clean_cq(qp->scq, (u64)(unsigned long)qp);
 978
 979        if (qp->rcq && qp->rcq != qp->scq) {
 980                spin_lock(&qp->rcq->hwq.lock);
 981                __clean_cq(qp->rcq, (u64)(unsigned long)qp);
 982                spin_unlock(&qp->rcq->hwq.lock);
 983        }
 984
 985        spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
 986
 987        bnxt_qplib_free_qp_hdr_buf(res, qp);
 988        bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
 989        kfree(qp->sq.swq);
 990
 991        bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
 992        kfree(qp->rq.swq);
 993
 994        if (qp->irrq.max_elements)
 995                bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
 996        if (qp->orrq.max_elements)
 997                bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
 998
 999        return 0;
1000}
1001
1002void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1003                                struct bnxt_qplib_sge *sge)
1004{
1005        struct bnxt_qplib_q *sq = &qp->sq;
1006        u32 sw_prod;
1007
1008        memset(sge, 0, sizeof(*sge));
1009
1010        if (qp->sq_hdr_buf) {
1011                sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1012                sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1013                                         sw_prod * qp->sq_hdr_buf_size);
1014                sge->lkey = 0xFFFFFFFF;
1015                sge->size = qp->sq_hdr_buf_size;
1016                return qp->sq_hdr_buf + sw_prod * sge->size;
1017        }
1018        return NULL;
1019}
1020
1021u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1022{
1023        struct bnxt_qplib_q *rq = &qp->rq;
1024
1025        return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1026}
1027
1028dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1029{
1030        return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1031}
1032
1033void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1034                                struct bnxt_qplib_sge *sge)
1035{
1036        struct bnxt_qplib_q *rq = &qp->rq;
1037        u32 sw_prod;
1038
1039        memset(sge, 0, sizeof(*sge));
1040
1041        if (qp->rq_hdr_buf) {
1042                sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1043                sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1044                                         sw_prod * qp->rq_hdr_buf_size);
1045                sge->lkey = 0xFFFFFFFF;
1046                sge->size = qp->rq_hdr_buf_size;
1047                return qp->rq_hdr_buf + sw_prod * sge->size;
1048        }
1049        return NULL;
1050}
1051
1052void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1053{
1054        struct bnxt_qplib_q *sq = &qp->sq;
1055        struct dbr_dbr db_msg = { 0 };
1056        u32 sw_prod;
1057
1058        sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1059
1060        db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1061                                   DBR_DBR_INDEX_MASK);
1062        db_msg.type_xid =
1063                cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1064                            DBR_DBR_TYPE_SQ);
1065        /* Flush all the WQE writes to HW */
1066        wmb();
1067        __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1068}
1069
1070int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1071                         struct bnxt_qplib_swqe *wqe)
1072{
1073        struct bnxt_qplib_q *sq = &qp->sq;
1074        struct bnxt_qplib_swq *swq;
1075        struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1076        struct sq_sge *hw_sge;
1077        u32 sw_prod;
1078        u8 wqe_size16;
1079        int i, rc = 0, data_len = 0, pkt_num = 0;
1080        __le32 temp32;
1081
1082        if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1083                rc = -EINVAL;
1084                goto done;
1085        }
1086
1087        if (bnxt_qplib_queue_full(sq)) {
1088                dev_err(&sq->hwq.pdev->dev,
1089                        "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1090                        sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1091                        sq->q_full_delta);
1092                rc = -ENOMEM;
1093                goto done;
1094        }
1095        sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1096        swq = &sq->swq[sw_prod];
1097        swq->wr_id = wqe->wr_id;
1098        swq->type = wqe->type;
1099        swq->flags = wqe->flags;
1100        if (qp->sig_type)
1101                swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1102        swq->start_psn = sq->psn & BTH_PSN_MASK;
1103
1104        hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1105        hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1106                                        [get_sqe_idx(sw_prod)];
1107
1108        memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1109
1110        if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1111                /* Copy the inline data */
1112                if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1113                        dev_warn(&sq->hwq.pdev->dev,
1114                                 "QPLIB: Inline data length > 96 detected");
1115                        data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1116                } else {
1117                        data_len = wqe->inline_len;
1118                }
1119                memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1120                wqe_size16 = (data_len + 15) >> 4;
1121        } else {
1122                for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1123                     i < wqe->num_sge; i++, hw_sge++) {
1124                        hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1125                        hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1126                        hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1127                        data_len += wqe->sg_list[i].size;
1128                }
1129                /* Each SGE entry = 1 WQE size16 */
1130                wqe_size16 = wqe->num_sge;
1131                /* HW requires wqe size has room for atleast one SGE even if
1132                 * none was supplied by ULP
1133                 */
1134                if (!wqe->num_sge)
1135                        wqe_size16++;
1136        }
1137
1138        /* Specifics */
1139        switch (wqe->type) {
1140        case BNXT_QPLIB_SWQE_TYPE_SEND:
1141                if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1142                        /* Assemble info for Raw Ethertype QPs */
1143                        struct sq_send_raweth_qp1 *sqe =
1144                                (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1145
1146                        sqe->wqe_type = wqe->type;
1147                        sqe->flags = wqe->flags;
1148                        sqe->wqe_size = wqe_size16 +
1149                                ((offsetof(typeof(*sqe), data) + 15) >> 4);
1150                        sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1151                        sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1152                        sqe->length = cpu_to_le32(data_len);
1153                        sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1154                                SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1155                                SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1156
1157                        break;
1158                }
1159                /* else, just fall thru */
1160        case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1161        case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1162        {
1163                struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1164
1165                sqe->wqe_type = wqe->type;
1166                sqe->flags = wqe->flags;
1167                sqe->wqe_size = wqe_size16 +
1168                                ((offsetof(typeof(*sqe), data) + 15) >> 4);
1169                sqe->inv_key_or_imm_data = cpu_to_le32(
1170                                                wqe->send.inv_key);
1171                if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1172                        sqe->q_key = cpu_to_le32(wqe->send.q_key);
1173                        sqe->dst_qp = cpu_to_le32(
1174                                        wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1175                        sqe->length = cpu_to_le32(data_len);
1176                        sqe->avid = cpu_to_le32(wqe->send.avid &
1177                                                SQ_SEND_AVID_MASK);
1178                        sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1179                } else {
1180                        sqe->length = cpu_to_le32(data_len);
1181                        sqe->dst_qp = 0;
1182                        sqe->avid = 0;
1183                        if (qp->mtu)
1184                                pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1185                        if (!pkt_num)
1186                                pkt_num = 1;
1187                        sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1188                }
1189                break;
1190        }
1191        case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1192        case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1193        case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1194        {
1195                struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1196
1197                sqe->wqe_type = wqe->type;
1198                sqe->flags = wqe->flags;
1199                sqe->wqe_size = wqe_size16 +
1200                                ((offsetof(typeof(*sqe), data) + 15) >> 4);
1201                sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1202                sqe->length = cpu_to_le32((u32)data_len);
1203                sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1204                sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1205                if (qp->mtu)
1206                        pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1207                if (!pkt_num)
1208                        pkt_num = 1;
1209                sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1210                break;
1211        }
1212        case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1213        case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1214        {
1215                struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1216
1217                sqe->wqe_type = wqe->type;
1218                sqe->flags = wqe->flags;
1219                sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1220                sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1221                sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1222                sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1223                if (qp->mtu)
1224                        pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1225                if (!pkt_num)
1226                        pkt_num = 1;
1227                sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1228                break;
1229        }
1230        case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1231        {
1232                struct sq_localinvalidate *sqe =
1233                                (struct sq_localinvalidate *)hw_sq_send_hdr;
1234
1235                sqe->wqe_type = wqe->type;
1236                sqe->flags = wqe->flags;
1237                sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1238
1239                break;
1240        }
1241        case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1242        {
1243                struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1244
1245                sqe->wqe_type = wqe->type;
1246                sqe->flags = wqe->flags;
1247                sqe->access_cntl = wqe->frmr.access_cntl |
1248                                   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1249                sqe->zero_based_page_size_log =
1250                        (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1251                        SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1252                        (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1253                sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1254                temp32 = cpu_to_le32(wqe->frmr.length);
1255                memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1256                sqe->numlevels_pbl_page_size_log =
1257                        ((wqe->frmr.pbl_pg_sz_log <<
1258                                        SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1259                                        SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1260                        ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1261                                        SQ_FR_PMR_NUMLEVELS_MASK);
1262
1263                for (i = 0; i < wqe->frmr.page_list_len; i++)
1264                        wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1265                                                wqe->frmr.page_list[i] |
1266                                                PTU_PTE_VALID);
1267                sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1268                sqe->va = cpu_to_le64(wqe->frmr.va);
1269
1270                break;
1271        }
1272        case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1273        {
1274                struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1275
1276                sqe->wqe_type = wqe->type;
1277                sqe->flags = wqe->flags;
1278                sqe->access_cntl = wqe->bind.access_cntl;
1279                sqe->mw_type_zero_based = wqe->bind.mw_type |
1280                        (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1281                sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1282                sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1283                sqe->va = cpu_to_le64(wqe->bind.va);
1284                temp32 = cpu_to_le32(wqe->bind.length);
1285                memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1286                break;
1287        }
1288        default:
1289                /* Bad wqe, return error */
1290                rc = -EINVAL;
1291                goto done;
1292        }
1293        swq->next_psn = sq->psn & BTH_PSN_MASK;
1294        if (swq->psn_search) {
1295                swq->psn_search->opcode_start_psn = cpu_to_le32(
1296                        ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1297                         SQ_PSN_SEARCH_START_PSN_MASK) |
1298                        ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1299                         SQ_PSN_SEARCH_OPCODE_MASK));
1300                swq->psn_search->flags_next_psn = cpu_to_le32(
1301                        ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1302                         SQ_PSN_SEARCH_NEXT_PSN_MASK));
1303        }
1304
1305        sq->hwq.prod++;
1306
1307        qp->wqe_cnt++;
1308
1309done:
1310        return rc;
1311}
1312
1313void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1314{
1315        struct bnxt_qplib_q *rq = &qp->rq;
1316        struct dbr_dbr db_msg = { 0 };
1317        u32 sw_prod;
1318
1319        sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1320        db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1321                                   DBR_DBR_INDEX_MASK);
1322        db_msg.type_xid =
1323                cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1324                            DBR_DBR_TYPE_RQ);
1325
1326        /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1327        wmb();
1328        __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1329}
1330
1331int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1332                         struct bnxt_qplib_swqe *wqe)
1333{
1334        struct bnxt_qplib_q *rq = &qp->rq;
1335        struct rq_wqe *rqe, **rqe_ptr;
1336        struct sq_sge *hw_sge;
1337        u32 sw_prod;
1338        int i, rc = 0;
1339
1340        if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1341                dev_err(&rq->hwq.pdev->dev,
1342                        "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1343                        qp->id, qp->state);
1344                rc = -EINVAL;
1345                goto done;
1346        }
1347        if (bnxt_qplib_queue_full(rq)) {
1348                dev_err(&rq->hwq.pdev->dev,
1349                        "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1350                rc = -EINVAL;
1351                goto done;
1352        }
1353        sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1354        rq->swq[sw_prod].wr_id = wqe->wr_id;
1355
1356        rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1357        rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1358
1359        memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1360
1361        /* Calculate wqe_size16 and data_len */
1362        for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1363             i < wqe->num_sge; i++, hw_sge++) {
1364                hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1365                hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1366                hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1367        }
1368        rqe->wqe_type = wqe->type;
1369        rqe->flags = wqe->flags;
1370        rqe->wqe_size = wqe->num_sge +
1371                        ((offsetof(typeof(*rqe), data) + 15) >> 4);
1372        /* HW requires wqe size has room for atleast one SGE even if none
1373         * was supplied by ULP
1374         */
1375        if (!wqe->num_sge)
1376                rqe->wqe_size++;
1377
1378        /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1379        rqe->wr_id[0] = cpu_to_le32(sw_prod);
1380
1381        rq->hwq.prod++;
1382done:
1383        return rc;
1384}
1385
1386/* CQ */
1387
1388/* Spinlock must be held */
1389static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1390{
1391        struct dbr_dbr db_msg = { 0 };
1392
1393        db_msg.type_xid =
1394                cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1395                            DBR_DBR_TYPE_CQ_ARMENA);
1396        /* Flush memory writes before enabling the CQ */
1397        wmb();
1398        __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1399}
1400
1401static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1402{
1403        struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1404        struct dbr_dbr db_msg = { 0 };
1405        u32 sw_cons;
1406
1407        /* Ring DB */
1408        sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1409        db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1410                                    DBR_DBR_INDEX_MASK);
1411        db_msg.type_xid =
1412                cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1413                            arm_type);
1414        /* flush memory writes before arming the CQ */
1415        wmb();
1416        __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1417}
1418
1419int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1420{
1421        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1422        struct cmdq_create_cq req;
1423        struct creq_create_cq_resp resp;
1424        struct bnxt_qplib_pbl *pbl;
1425        u16 cmd_flags = 0;
1426        int rc;
1427
1428        cq->hwq.max_elements = cq->max_wqe;
1429        rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1430                                       cq->nmap, &cq->hwq.max_elements,
1431                                       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1432                                       PAGE_SIZE, HWQ_TYPE_QUEUE);
1433        if (rc)
1434                goto exit;
1435
1436        RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1437
1438        if (!cq->dpi) {
1439                dev_err(&rcfw->pdev->dev,
1440                        "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1441                return -EINVAL;
1442        }
1443        req.dpi = cpu_to_le32(cq->dpi->dpi);
1444        req.cq_handle = cpu_to_le64(cq->cq_handle);
1445
1446        req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1447        pbl = &cq->hwq.pbl[PBL_LVL_0];
1448        req.pg_size_lvl = cpu_to_le32(
1449            ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1450                                                CMDQ_CREATE_CQ_LVL_SFT) |
1451            (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1452             pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1453             pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1454             pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1455             pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1456             pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1457             CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1458
1459        req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1460
1461        req.cq_fco_cnq_id = cpu_to_le32(
1462                        (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1463                         CMDQ_CREATE_CQ_CNQ_ID_SFT);
1464
1465        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1466                                          (void *)&resp, NULL, 0);
1467        if (rc)
1468                goto fail;
1469
1470        cq->id = le32_to_cpu(resp.xid);
1471        cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1472        cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1473        init_waitqueue_head(&cq->waitq);
1474
1475        bnxt_qplib_arm_cq_enable(cq);
1476        return 0;
1477
1478fail:
1479        bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1480exit:
1481        return rc;
1482}
1483
1484int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1485{
1486        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1487        struct cmdq_destroy_cq req;
1488        struct creq_destroy_cq_resp resp;
1489        u16 cmd_flags = 0;
1490        int rc;
1491
1492        RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1493
1494        req.cq_cid = cpu_to_le32(cq->id);
1495        rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1496                                          (void *)&resp, NULL, 0);
1497        if (rc)
1498                return rc;
1499        bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1500        return 0;
1501}
1502
1503static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
1504                      struct bnxt_qplib_cqe **pcqe, int *budget)
1505{
1506        u32 sw_prod, sw_cons;
1507        struct bnxt_qplib_cqe *cqe;
1508        int rc = 0;
1509
1510        /* Now complete all outstanding SQEs with FLUSHED_ERR */
1511        sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1512        cqe = *pcqe;
1513        while (*budget) {
1514                sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1515                if (sw_cons == sw_prod) {
1516                        sq->flush_in_progress = false;
1517                        break;
1518                }
1519                memset(cqe, 0, sizeof(*cqe));
1520                cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
1521                cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1522                cqe->qp_handle = (u64)(unsigned long)qp;
1523                cqe->wr_id = sq->swq[sw_cons].wr_id;
1524                cqe->src_qp = qp->id;
1525                cqe->type = sq->swq[sw_cons].type;
1526                cqe++;
1527                (*budget)--;
1528                sq->hwq.cons++;
1529        }
1530        *pcqe = cqe;
1531        if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
1532                /* Out of budget */
1533                rc = -EAGAIN;
1534
1535        return rc;
1536}
1537
1538static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
1539                      int opcode, struct bnxt_qplib_cqe **pcqe, int *budget)
1540{
1541        struct bnxt_qplib_cqe *cqe;
1542        u32 sw_prod, sw_cons;
1543        int rc = 0;
1544
1545        /* Flush the rest of the RQ */
1546        sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1547        cqe = *pcqe;
1548        while (*budget) {
1549                sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
1550                if (sw_cons == sw_prod)
1551                        break;
1552                memset(cqe, 0, sizeof(*cqe));
1553                cqe->status =
1554                    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
1555                cqe->opcode = opcode;
1556                cqe->qp_handle = (unsigned long)qp;
1557                cqe->wr_id = rq->swq[sw_cons].wr_id;
1558                cqe++;
1559                (*budget)--;
1560                rq->hwq.cons++;
1561        }
1562        *pcqe = cqe;
1563        if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
1564                /* Out of budget */
1565                rc = -EAGAIN;
1566
1567        return rc;
1568}
1569
1570/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1571 *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1572 */
1573static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
1574                     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
1575{
1576        struct bnxt_qplib_q *sq = &qp->sq;
1577        struct bnxt_qplib_swq *swq;
1578        u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
1579        struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
1580        struct cq_req *peek_req_hwcqe;
1581        struct bnxt_qplib_qp *peek_qp;
1582        struct bnxt_qplib_q *peek_sq;
1583        int i, rc = 0;
1584
1585        /* Normal mode */
1586        /* Check for the psn_search marking before completing */
1587        swq = &sq->swq[sw_sq_cons];
1588        if (swq->psn_search &&
1589            le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
1590                /* Unmark */
1591                swq->psn_search->flags_next_psn = cpu_to_le32
1592                        (le32_to_cpu(swq->psn_search->flags_next_psn)
1593                                     & ~0x80000000);
1594                dev_dbg(&cq->hwq.pdev->dev,
1595                        "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1596                        cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1597                sq->condition = true;
1598                sq->send_phantom = true;
1599
1600                /* TODO: Only ARM if the previous SQE is ARMALL */
1601                bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
1602
1603                rc = -EAGAIN;
1604                goto out;
1605        }
1606        if (sq->condition) {
1607                /* Peek at the completions */
1608                peek_raw_cq_cons = cq->hwq.cons;
1609                peek_sw_cq_cons = cq_cons;
1610                i = cq->hwq.max_elements;
1611                while (i--) {
1612                        peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
1613                        peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1614                        peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
1615                                                     [CQE_IDX(peek_sw_cq_cons)];
1616                        /* If the next hwcqe is VALID */
1617                        if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
1618                                          cq->hwq.max_elements)) {
1619                                /* If the next hwcqe is a REQ */
1620                                if ((peek_hwcqe->cqe_type_toggle &
1621                                    CQ_BASE_CQE_TYPE_MASK) ==
1622                                    CQ_BASE_CQE_TYPE_REQ) {
1623                                        peek_req_hwcqe = (struct cq_req *)
1624                                                         peek_hwcqe;
1625                                        peek_qp = (struct bnxt_qplib_qp *)
1626                                                ((unsigned long)
1627                                                 le64_to_cpu
1628                                                 (peek_req_hwcqe->qp_handle));
1629                                        peek_sq = &peek_qp->sq;
1630                                        peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
1631                                                peek_req_hwcqe->sq_cons_idx) - 1
1632                                                , &sq->hwq);
1633                                        /* If the hwcqe's sq's wr_id matches */
1634                                        if (peek_sq == sq &&
1635                                            sq->swq[peek_sq_cons_idx].wr_id ==
1636                                            BNXT_QPLIB_FENCE_WRID) {
1637                                                /*
1638                                                 *  Unbreak only if the phantom
1639                                                 *  comes back
1640                                                 */
1641                                                dev_dbg(&cq->hwq.pdev->dev,
1642                                                        "FP:Got Phantom CQE");
1643                                                sq->condition = false;
1644                                                sq->single = true;
1645                                                rc = 0;
1646                                                goto out;
1647                                        }
1648                                }
1649                                /* Valid but not the phantom, so keep looping */
1650                        } else {
1651                                /* Not valid yet, just exit and wait */
1652                                rc = -EINVAL;
1653                                goto out;
1654                        }
1655                        peek_sw_cq_cons++;
1656                        peek_raw_cq_cons++;
1657                }
1658                dev_err(&cq->hwq.pdev->dev,
1659                        "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1660                        cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1661                rc = -EINVAL;
1662        }
1663out:
1664        return rc;
1665}
1666
1667static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1668                                     struct cq_req *hwcqe,
1669                                     struct bnxt_qplib_cqe **pcqe, int *budget,
1670                                     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
1671{
1672        struct bnxt_qplib_qp *qp;
1673        struct bnxt_qplib_q *sq;
1674        struct bnxt_qplib_cqe *cqe;
1675        u32 sw_sq_cons, cqe_sq_cons;
1676        struct bnxt_qplib_swq *swq;
1677        int rc = 0;
1678
1679        qp = (struct bnxt_qplib_qp *)((unsigned long)
1680                                      le64_to_cpu(hwcqe->qp_handle));
1681        if (!qp) {
1682                dev_err(&cq->hwq.pdev->dev,
1683                        "QPLIB: FP: Process Req qp is NULL");
1684                return -EINVAL;
1685        }
1686        sq = &qp->sq;
1687
1688        cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
1689        if (cqe_sq_cons > sq->hwq.max_elements) {
1690                dev_err(&cq->hwq.pdev->dev,
1691                        "QPLIB: FP: CQ Process req reported ");
1692                dev_err(&cq->hwq.pdev->dev,
1693                        "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
1694                        cqe_sq_cons, sq->hwq.max_elements);
1695                return -EINVAL;
1696        }
1697        /* If we were in the middle of flushing the SQ, continue */
1698        if (sq->flush_in_progress)
1699                goto flush;
1700
1701        /* Require to walk the sq's swq to fabricate CQEs for all previously
1702         * signaled SWQEs due to CQE aggregation from the current sq cons
1703         * to the cqe_sq_cons
1704         */
1705        cqe = *pcqe;
1706        while (*budget) {
1707                sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1708                if (sw_sq_cons == cqe_sq_cons)
1709                        /* Done */
1710                        break;
1711
1712                swq = &sq->swq[sw_sq_cons];
1713                memset(cqe, 0, sizeof(*cqe));
1714                cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1715                cqe->qp_handle = (u64)(unsigned long)qp;
1716                cqe->src_qp = qp->id;
1717                cqe->wr_id = swq->wr_id;
1718                if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
1719                        goto skip;
1720                cqe->type = swq->type;
1721
1722                /* For the last CQE, check for status.  For errors, regardless
1723                 * of the request being signaled or not, it must complete with
1724                 * the hwcqe error status
1725                 */
1726                if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
1727                    hwcqe->status != CQ_REQ_STATUS_OK) {
1728                        cqe->status = hwcqe->status;
1729                        dev_err(&cq->hwq.pdev->dev,
1730                                "QPLIB: FP: CQ Processed Req ");
1731                        dev_err(&cq->hwq.pdev->dev,
1732                                "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
1733                                sw_sq_cons, cqe->wr_id, cqe->status);
1734                        cqe++;
1735                        (*budget)--;
1736                        sq->flush_in_progress = true;
1737                        /* Must block new posting of SQ and RQ */
1738                        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
1739                        sq->condition = false;
1740                        sq->single = false;
1741                } else {
1742                        if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
1743                                /* Before we complete, do WA 9060 */
1744                                if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
1745                                              cqe_sq_cons)) {
1746                                        *lib_qp = qp;
1747                                        goto out;
1748                                }
1749                                cqe->status = CQ_REQ_STATUS_OK;
1750                                cqe++;
1751                                (*budget)--;
1752                        }
1753                }
1754skip:
1755                sq->hwq.cons++;
1756                if (sq->single)
1757                        break;
1758        }
1759out:
1760        *pcqe = cqe;
1761        if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
1762                /* Out of budget */
1763                rc = -EAGAIN;
1764                goto done;
1765        }
1766        /*
1767         * Back to normal completion mode only after it has completed all of
1768         * the WC for this CQE
1769         */
1770        sq->single = false;
1771        if (!sq->flush_in_progress)
1772                goto done;
1773flush:
1774        /* Require to walk the sq's swq to fabricate CQEs for all
1775         * previously posted SWQEs due to the error CQE received
1776         */
1777        rc = __flush_sq(sq, qp, pcqe, budget);
1778        if (!rc)
1779                sq->flush_in_progress = false;
1780done:
1781        return rc;
1782}
1783
1784static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
1785                                        struct cq_res_rc *hwcqe,
1786                                        struct bnxt_qplib_cqe **pcqe,
1787                                        int *budget)
1788{
1789        struct bnxt_qplib_qp *qp;
1790        struct bnxt_qplib_q *rq;
1791        struct bnxt_qplib_cqe *cqe;
1792        u32 wr_id_idx;
1793        int rc = 0;
1794
1795        qp = (struct bnxt_qplib_qp *)((unsigned long)
1796                                      le64_to_cpu(hwcqe->qp_handle));
1797        if (!qp) {
1798                dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
1799                return -EINVAL;
1800        }
1801        cqe = *pcqe;
1802        cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
1803        cqe->length = le32_to_cpu(hwcqe->length);
1804        cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
1805        cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
1806        cqe->flags = le16_to_cpu(hwcqe->flags);
1807        cqe->status = hwcqe->status;
1808        cqe->qp_handle = (u64)(unsigned long)qp;
1809
1810        wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
1811                                CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
1812        rq = &qp->rq;
1813        if (wr_id_idx > rq->hwq.max_elements) {
1814                dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
1815                dev_err(&cq->hwq.pdev->dev,
1816                        "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
1817                        wr_id_idx, rq->hwq.max_elements);
1818                return -EINVAL;
1819        }
1820        if (rq->flush_in_progress)
1821                goto flush_rq;
1822
1823        cqe->wr_id = rq->swq[wr_id_idx].wr_id;
1824        cqe++;
1825        (*budget)--;
1826        rq->hwq.cons++;
1827        *pcqe = cqe;
1828
1829        if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
1830                rq->flush_in_progress = true;
1831flush_rq:
1832                rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RC, pcqe, budget);
1833                if (!rc)
1834                        rq->flush_in_progress = false;
1835        }
1836        return rc;
1837}
1838
1839static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
1840                                        struct cq_res_ud *hwcqe,
1841                                        struct bnxt_qplib_cqe **pcqe,
1842                                        int *budget)
1843{
1844        struct bnxt_qplib_qp *qp;
1845        struct bnxt_qplib_q *rq;
1846        struct bnxt_qplib_cqe *cqe;
1847        u32 wr_id_idx;
1848        int rc = 0;
1849
1850        qp = (struct bnxt_qplib_qp *)((unsigned long)
1851                                      le64_to_cpu(hwcqe->qp_handle));
1852        if (!qp) {
1853                dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
1854                return -EINVAL;
1855        }
1856        cqe = *pcqe;
1857        cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
1858        cqe->length = le32_to_cpu(hwcqe->length);
1859        cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
1860        cqe->flags = le16_to_cpu(hwcqe->flags);
1861        cqe->status = hwcqe->status;
1862        cqe->qp_handle = (u64)(unsigned long)qp;
1863        memcpy(cqe->smac, hwcqe->src_mac, 6);
1864        wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
1865                                & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
1866        cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
1867                                  ((le32_to_cpu(
1868                                  hwcqe->src_qp_high_srq_or_rq_wr_id) &
1869                                 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
1870
1871        rq = &qp->rq;
1872        if (wr_id_idx > rq->hwq.max_elements) {
1873                dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
1874                dev_err(&cq->hwq.pdev->dev,
1875                        "QPLIB: wr_id idx %#x exceeded RQ max %#x",
1876                        wr_id_idx, rq->hwq.max_elements);
1877                return -EINVAL;
1878        }
1879        if (rq->flush_in_progress)
1880                goto flush_rq;
1881
1882        cqe->wr_id = rq->swq[wr_id_idx].wr_id;
1883        cqe++;
1884        (*budget)--;
1885        rq->hwq.cons++;
1886        *pcqe = cqe;
1887
1888        if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
1889                rq->flush_in_progress = true;
1890flush_rq:
1891                rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_UD, pcqe, budget);
1892                if (!rc)
1893                        rq->flush_in_progress = false;
1894        }
1895        return rc;
1896}
1897
1898bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
1899{
1900        struct cq_base *hw_cqe, **hw_cqe_ptr;
1901        unsigned long flags;
1902        u32 sw_cons, raw_cons;
1903        bool rc = true;
1904
1905        spin_lock_irqsave(&cq->hwq.lock, flags);
1906        raw_cons = cq->hwq.cons;
1907        sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
1908        hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1909        hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
1910
1911         /* Check for Valid bit. If the CQE is valid, return false */
1912        rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
1913        spin_unlock_irqrestore(&cq->hwq.lock, flags);
1914        return rc;
1915}
1916
1917static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
1918                                                struct cq_res_raweth_qp1 *hwcqe,
1919                                                struct bnxt_qplib_cqe **pcqe,
1920                                                int *budget)
1921{
1922        struct bnxt_qplib_qp *qp;
1923        struct bnxt_qplib_q *rq;
1924        struct bnxt_qplib_cqe *cqe;
1925        u32 wr_id_idx;
1926        int rc = 0;
1927
1928        qp = (struct bnxt_qplib_qp *)((unsigned long)
1929                                      le64_to_cpu(hwcqe->qp_handle));
1930        if (!qp) {
1931                dev_err(&cq->hwq.pdev->dev,
1932                        "QPLIB: process_cq Raw/QP1 qp is NULL");
1933                return -EINVAL;
1934        }
1935        cqe = *pcqe;
1936        cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
1937        cqe->flags = le16_to_cpu(hwcqe->flags);
1938        cqe->qp_handle = (u64)(unsigned long)qp;
1939
1940        wr_id_idx =
1941                le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
1942                                & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
1943        cqe->src_qp = qp->id;
1944        if (qp->id == 1 && !cqe->length) {
1945                /* Add workaround for the length misdetection */
1946                cqe->length = 296;
1947        } else {
1948                cqe->length = le16_to_cpu(hwcqe->length);
1949        }
1950        cqe->pkey_index = qp->pkey_index;
1951        memcpy(cqe->smac, qp->smac, 6);
1952
1953        cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
1954        cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
1955
1956        rq = &qp->rq;
1957        if (wr_id_idx > rq->hwq.max_elements) {
1958                dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
1959                dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
1960                        wr_id_idx, rq->hwq.max_elements);
1961                return -EINVAL;
1962        }
1963        if (rq->flush_in_progress)
1964                goto flush_rq;
1965
1966        cqe->wr_id = rq->swq[wr_id_idx].wr_id;
1967        cqe++;
1968        (*budget)--;
1969        rq->hwq.cons++;
1970        *pcqe = cqe;
1971
1972        if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
1973                rq->flush_in_progress = true;
1974flush_rq:
1975                rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RAWETH_QP1, pcqe,
1976                                budget);
1977                if (!rc)
1978                        rq->flush_in_progress = false;
1979        }
1980        return rc;
1981}
1982
1983static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
1984                                          struct cq_terminal *hwcqe,
1985                                          struct bnxt_qplib_cqe **pcqe,
1986                                          int *budget)
1987{
1988        struct bnxt_qplib_qp *qp;
1989        struct bnxt_qplib_q *sq, *rq;
1990        struct bnxt_qplib_cqe *cqe;
1991        u32 sw_cons = 0, cqe_cons;
1992        int rc = 0;
1993        u8 opcode = 0;
1994
1995        /* Check the Status */
1996        if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
1997                dev_warn(&cq->hwq.pdev->dev,
1998                         "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
1999                         hwcqe->status);
2000
2001        qp = (struct bnxt_qplib_qp *)((unsigned long)
2002                                      le64_to_cpu(hwcqe->qp_handle));
2003        if (!qp) {
2004                dev_err(&cq->hwq.pdev->dev,
2005                        "QPLIB: FP: CQ Process terminal qp is NULL");
2006                return -EINVAL;
2007        }
2008        /* Must block new posting of SQ and RQ */
2009        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2010
2011        sq = &qp->sq;
2012        rq = &qp->rq;
2013
2014        cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2015        if (cqe_cons == 0xFFFF)
2016                goto do_rq;
2017
2018        if (cqe_cons > sq->hwq.max_elements) {
2019                dev_err(&cq->hwq.pdev->dev,
2020                        "QPLIB: FP: CQ Process terminal reported ");
2021                dev_err(&cq->hwq.pdev->dev,
2022                        "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2023                        cqe_cons, sq->hwq.max_elements);
2024                goto do_rq;
2025        }
2026        /* If we were in the middle of flushing, continue */
2027        if (sq->flush_in_progress)
2028                goto flush_sq;
2029
2030        /* Terminal CQE can also include aggregated successful CQEs prior.
2031         * So we must complete all CQEs from the current sq's cons to the
2032         * cq_cons with status OK
2033         */
2034        cqe = *pcqe;
2035        while (*budget) {
2036                sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2037                if (sw_cons == cqe_cons)
2038                        break;
2039                if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2040                        memset(cqe, 0, sizeof(*cqe));
2041                        cqe->status = CQ_REQ_STATUS_OK;
2042                        cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2043                        cqe->qp_handle = (u64)(unsigned long)qp;
2044                        cqe->src_qp = qp->id;
2045                        cqe->wr_id = sq->swq[sw_cons].wr_id;
2046                        cqe->type = sq->swq[sw_cons].type;
2047                        cqe++;
2048                        (*budget)--;
2049                }
2050                sq->hwq.cons++;
2051        }
2052        *pcqe = cqe;
2053        if (!(*budget) && sw_cons != cqe_cons) {
2054                /* Out of budget */
2055                rc = -EAGAIN;
2056                goto sq_done;
2057        }
2058        sq->flush_in_progress = true;
2059flush_sq:
2060        rc = __flush_sq(sq, qp, pcqe, budget);
2061        if (!rc)
2062                sq->flush_in_progress = false;
2063sq_done:
2064        if (rc)
2065                return rc;
2066do_rq:
2067        cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2068        if (cqe_cons == 0xFFFF) {
2069                goto done;
2070        } else if (cqe_cons > rq->hwq.max_elements) {
2071                dev_err(&cq->hwq.pdev->dev,
2072                        "QPLIB: FP: CQ Processed terminal ");
2073                dev_err(&cq->hwq.pdev->dev,
2074                        "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2075                        cqe_cons, rq->hwq.max_elements);
2076                goto done;
2077        }
2078        /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2079         * from the current rq->cons to the rq->prod regardless what the
2080         * rq->cons the terminal CQE indicates
2081         */
2082        rq->flush_in_progress = true;
2083        switch (qp->type) {
2084        case CMDQ_CREATE_QP1_TYPE_GSI:
2085                opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2086                break;
2087        case CMDQ_CREATE_QP_TYPE_RC:
2088                opcode = CQ_BASE_CQE_TYPE_RES_RC;
2089                break;
2090        case CMDQ_CREATE_QP_TYPE_UD:
2091                opcode = CQ_BASE_CQE_TYPE_RES_UD;
2092                break;
2093        }
2094
2095        rc = __flush_rq(rq, qp, opcode, pcqe, budget);
2096        if (!rc)
2097                rq->flush_in_progress = false;
2098done:
2099        return rc;
2100}
2101
2102static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2103                                        struct cq_cutoff *hwcqe)
2104{
2105        /* Check the Status */
2106        if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2107                dev_err(&cq->hwq.pdev->dev,
2108                        "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2109                        hwcqe->status);
2110                return -EINVAL;
2111        }
2112        clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2113        wake_up_interruptible(&cq->waitq);
2114
2115        return 0;
2116}
2117
2118int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2119                       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2120{
2121        struct cq_base *hw_cqe, **hw_cqe_ptr;
2122        unsigned long flags;
2123        u32 sw_cons, raw_cons;
2124        int budget, rc = 0;
2125
2126        spin_lock_irqsave(&cq->hwq.lock, flags);
2127        raw_cons = cq->hwq.cons;
2128        budget = num_cqes;
2129
2130        while (budget) {
2131                sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2132                hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2133                hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2134
2135                /* Check for Valid bit */
2136                if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2137                        break;
2138
2139                /* From the device's respective CQE format to qplib_wc*/
2140                switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2141                case CQ_BASE_CQE_TYPE_REQ:
2142                        rc = bnxt_qplib_cq_process_req(cq,
2143                                                       (struct cq_req *)hw_cqe,
2144                                                       &cqe, &budget,
2145                                                       sw_cons, lib_qp);
2146                        break;
2147                case CQ_BASE_CQE_TYPE_RES_RC:
2148                        rc = bnxt_qplib_cq_process_res_rc(cq,
2149                                                          (struct cq_res_rc *)
2150                                                          hw_cqe, &cqe,
2151                                                          &budget);
2152                        break;
2153                case CQ_BASE_CQE_TYPE_RES_UD:
2154                        rc = bnxt_qplib_cq_process_res_ud
2155                                        (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2156                                         &budget);
2157                        break;
2158                case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2159                        rc = bnxt_qplib_cq_process_res_raweth_qp1
2160                                        (cq, (struct cq_res_raweth_qp1 *)
2161                                         hw_cqe, &cqe, &budget);
2162                        break;
2163                case CQ_BASE_CQE_TYPE_TERMINAL:
2164                        rc = bnxt_qplib_cq_process_terminal
2165                                        (cq, (struct cq_terminal *)hw_cqe,
2166                                         &cqe, &budget);
2167                        break;
2168                case CQ_BASE_CQE_TYPE_CUT_OFF:
2169                        bnxt_qplib_cq_process_cutoff
2170                                        (cq, (struct cq_cutoff *)hw_cqe);
2171                        /* Done processing this CQ */
2172                        goto exit;
2173                default:
2174                        dev_err(&cq->hwq.pdev->dev,
2175                                "QPLIB: process_cq unknown type 0x%lx",
2176                                hw_cqe->cqe_type_toggle &
2177                                CQ_BASE_CQE_TYPE_MASK);
2178                        rc = -EINVAL;
2179                        break;
2180                }
2181                if (rc < 0) {
2182                        if (rc == -EAGAIN)
2183                                break;
2184                        /* Error while processing the CQE, just skip to the
2185                         * next one
2186                         */
2187                        dev_err(&cq->hwq.pdev->dev,
2188                                "QPLIB: process_cqe error rc = 0x%x", rc);
2189                }
2190                raw_cons++;
2191        }
2192        if (cq->hwq.cons != raw_cons) {
2193                cq->hwq.cons = raw_cons;
2194                bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2195        }
2196exit:
2197        spin_unlock_irqrestore(&cq->hwq.lock, flags);
2198        return num_cqes - budget;
2199}
2200
2201void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2202{
2203        unsigned long flags;
2204
2205        spin_lock_irqsave(&cq->hwq.lock, flags);
2206        if (arm_type)
2207                bnxt_qplib_arm_cq(cq, arm_type);
2208
2209        spin_unlock_irqrestore(&cq->hwq.lock, flags);
2210}
2211