linux/drivers/infiniband/hw/cxgb4/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "iw_cxgb4.h"
  34
  35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  36                      struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
  37                      struct c4iw_wr_wait *wr_waitp)
  38{
  39        struct fw_ri_res_wr *res_wr;
  40        struct fw_ri_res *res;
  41        int wr_len;
  42        int ret;
  43
  44        wr_len = sizeof *res_wr + sizeof *res;
  45        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  46
  47        res_wr = __skb_put_zero(skb, wr_len);
  48        res_wr->op_nres = cpu_to_be32(
  49                        FW_WR_OP_V(FW_RI_RES_WR) |
  50                        FW_RI_RES_WR_NRES_V(1) |
  51                        FW_WR_COMPL_F);
  52        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  53        res_wr->cookie = (uintptr_t)wr_waitp;
  54        res = res_wr->res;
  55        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  56        res->u.cq.op = FW_RI_RES_OP_RESET;
  57        res->u.cq.iqid = cpu_to_be32(cq->cqid);
  58
  59        c4iw_init_wr_wait(wr_waitp);
  60        ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
  61
  62        kfree(cq->sw_queue);
  63        dma_free_coherent(&(rdev->lldi.pdev->dev),
  64                          cq->memsize, cq->queue,
  65                          dma_unmap_addr(cq, mapping));
  66        c4iw_put_cqid(rdev, cq->cqid, uctx);
  67        return ret;
  68}
  69
  70static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  71                     struct c4iw_dev_ucontext *uctx,
  72                     struct c4iw_wr_wait *wr_waitp)
  73{
  74        struct fw_ri_res_wr *res_wr;
  75        struct fw_ri_res *res;
  76        int wr_len;
  77        int user = (uctx != &rdev->uctx);
  78        int ret;
  79        struct sk_buff *skb;
  80        struct c4iw_ucontext *ucontext = NULL;
  81
  82        if (user)
  83                ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
  84
  85        cq->cqid = c4iw_get_cqid(rdev, uctx);
  86        if (!cq->cqid) {
  87                ret = -ENOMEM;
  88                goto err1;
  89        }
  90
  91        if (!user) {
  92                cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  93                if (!cq->sw_queue) {
  94                        ret = -ENOMEM;
  95                        goto err2;
  96                }
  97        }
  98        cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
  99                                       &cq->dma_addr, GFP_KERNEL);
 100        if (!cq->queue) {
 101                ret = -ENOMEM;
 102                goto err3;
 103        }
 104        dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 105        memset(cq->queue, 0, cq->memsize);
 106
 107        if (user && ucontext->is_32b_cqe) {
 108                cq->qp_errp = &((struct t4_status_page *)
 109                ((u8 *)cq->queue + (cq->size - 1) *
 110                 (sizeof(*cq->queue) / 2)))->qp_err;
 111        } else {
 112                cq->qp_errp = &((struct t4_status_page *)
 113                ((u8 *)cq->queue + (cq->size - 1) *
 114                 sizeof(*cq->queue)))->qp_err;
 115        }
 116
 117        /* build fw_ri_res_wr */
 118        wr_len = sizeof *res_wr + sizeof *res;
 119
 120        skb = alloc_skb(wr_len, GFP_KERNEL);
 121        if (!skb) {
 122                ret = -ENOMEM;
 123                goto err4;
 124        }
 125        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 126
 127        res_wr = __skb_put_zero(skb, wr_len);
 128        res_wr->op_nres = cpu_to_be32(
 129                        FW_WR_OP_V(FW_RI_RES_WR) |
 130                        FW_RI_RES_WR_NRES_V(1) |
 131                        FW_WR_COMPL_F);
 132        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 133        res_wr->cookie = (uintptr_t)wr_waitp;
 134        res = res_wr->res;
 135        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 136        res->u.cq.op = FW_RI_RES_OP_WRITE;
 137        res->u.cq.iqid = cpu_to_be32(cq->cqid);
 138        res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
 139                        FW_RI_RES_WR_IQANUS_V(0) |
 140                        FW_RI_RES_WR_IQANUD_V(1) |
 141                        FW_RI_RES_WR_IQANDST_F |
 142                        FW_RI_RES_WR_IQANDSTINDEX_V(
 143                                rdev->lldi.ciq_ids[cq->vector]));
 144        res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
 145                        FW_RI_RES_WR_IQDROPRSS_F |
 146                        FW_RI_RES_WR_IQPCIECH_V(2) |
 147                        FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
 148                        FW_RI_RES_WR_IQO_F |
 149                        ((user && ucontext->is_32b_cqe) ?
 150                         FW_RI_RES_WR_IQESIZE_V(1) :
 151                         FW_RI_RES_WR_IQESIZE_V(2)));
 152        res->u.cq.iqsize = cpu_to_be16(cq->size);
 153        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 154
 155        c4iw_init_wr_wait(wr_waitp);
 156        ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
 157        if (ret)
 158                goto err4;
 159
 160        cq->gen = 1;
 161        cq->gts = rdev->lldi.gts_reg;
 162        cq->rdev = rdev;
 163
 164        cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
 165                                      &cq->bar2_qid,
 166                                      user ? &cq->bar2_pa : NULL);
 167        if (user && !cq->bar2_pa) {
 168                pr_warn("%s: cqid %u not in BAR2 range\n",
 169                        pci_name(rdev->lldi.pdev), cq->cqid);
 170                ret = -EINVAL;
 171                goto err4;
 172        }
 173        return 0;
 174err4:
 175        dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
 176                          dma_unmap_addr(cq, mapping));
 177err3:
 178        kfree(cq->sw_queue);
 179err2:
 180        c4iw_put_cqid(rdev, cq->cqid, uctx);
 181err1:
 182        return ret;
 183}
 184
 185static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
 186{
 187        struct t4_cqe cqe;
 188
 189        pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
 190                 wq, cq, cq->sw_cidx, cq->sw_pidx);
 191        memset(&cqe, 0, sizeof(cqe));
 192        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 193                                 CQE_OPCODE_V(FW_RI_SEND) |
 194                                 CQE_TYPE_V(0) |
 195                                 CQE_SWCQE_V(1) |
 196                                 CQE_QPID_V(wq->sq.qid));
 197        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 198        if (srqidx)
 199                cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
 200        cq->sw_queue[cq->sw_pidx] = cqe;
 201        t4_swcq_produce(cq);
 202}
 203
 204int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
 205{
 206        int flushed = 0;
 207        int in_use = wq->rq.in_use - count;
 208
 209        pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
 210                 wq, cq, wq->rq.in_use, count);
 211        while (in_use--) {
 212                insert_recv_cqe(wq, cq, 0);
 213                flushed++;
 214        }
 215        return flushed;
 216}
 217
 218static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
 219                          struct t4_swsqe *swcqe)
 220{
 221        struct t4_cqe cqe;
 222
 223        pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
 224                 wq, cq, cq->sw_cidx, cq->sw_pidx);
 225        memset(&cqe, 0, sizeof(cqe));
 226        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 227                                 CQE_OPCODE_V(swcqe->opcode) |
 228                                 CQE_TYPE_V(1) |
 229                                 CQE_SWCQE_V(1) |
 230                                 CQE_QPID_V(wq->sq.qid));
 231        CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
 232        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 233        cq->sw_queue[cq->sw_pidx] = cqe;
 234        t4_swcq_produce(cq);
 235}
 236
 237static void advance_oldest_read(struct t4_wq *wq);
 238
 239int c4iw_flush_sq(struct c4iw_qp *qhp)
 240{
 241        int flushed = 0;
 242        struct t4_wq *wq = &qhp->wq;
 243        struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
 244        struct t4_cq *cq = &chp->cq;
 245        int idx;
 246        struct t4_swsqe *swsqe;
 247
 248        if (wq->sq.flush_cidx == -1)
 249                wq->sq.flush_cidx = wq->sq.cidx;
 250        idx = wq->sq.flush_cidx;
 251        while (idx != wq->sq.pidx) {
 252                swsqe = &wq->sq.sw_sq[idx];
 253                swsqe->flushed = 1;
 254                insert_sq_cqe(wq, cq, swsqe);
 255                if (wq->sq.oldest_read == swsqe) {
 256                        advance_oldest_read(wq);
 257                }
 258                flushed++;
 259                if (++idx == wq->sq.size)
 260                        idx = 0;
 261        }
 262        wq->sq.flush_cidx += flushed;
 263        if (wq->sq.flush_cidx >= wq->sq.size)
 264                wq->sq.flush_cidx -= wq->sq.size;
 265        return flushed;
 266}
 267
 268static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
 269{
 270        struct t4_swsqe *swsqe;
 271        int cidx;
 272
 273        if (wq->sq.flush_cidx == -1)
 274                wq->sq.flush_cidx = wq->sq.cidx;
 275        cidx = wq->sq.flush_cidx;
 276
 277        while (cidx != wq->sq.pidx) {
 278                swsqe = &wq->sq.sw_sq[cidx];
 279                if (!swsqe->signaled) {
 280                        if (++cidx == wq->sq.size)
 281                                cidx = 0;
 282                } else if (swsqe->complete) {
 283
 284                        /*
 285                         * Insert this completed cqe into the swcq.
 286                         */
 287                        pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
 288                                 cidx, cq->sw_pidx);
 289                        swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
 290                        cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
 291                        t4_swcq_produce(cq);
 292                        swsqe->flushed = 1;
 293                        if (++cidx == wq->sq.size)
 294                                cidx = 0;
 295                        wq->sq.flush_cidx = cidx;
 296                } else
 297                        break;
 298        }
 299}
 300
 301static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
 302                struct t4_cqe *read_cqe)
 303{
 304        read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
 305        read_cqe->len = htonl(wq->sq.oldest_read->read_len);
 306        read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
 307                        CQE_SWCQE_V(SW_CQE(hw_cqe)) |
 308                        CQE_OPCODE_V(FW_RI_READ_REQ) |
 309                        CQE_TYPE_V(1));
 310        read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
 311}
 312
 313static void advance_oldest_read(struct t4_wq *wq)
 314{
 315
 316        u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
 317
 318        if (rptr == wq->sq.size)
 319                rptr = 0;
 320        while (rptr != wq->sq.pidx) {
 321                wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
 322
 323                if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
 324                        return;
 325                if (++rptr == wq->sq.size)
 326                        rptr = 0;
 327        }
 328        wq->sq.oldest_read = NULL;
 329}
 330
 331/*
 332 * Move all CQEs from the HWCQ into the SWCQ.
 333 * Deal with out-of-order and/or completions that complete
 334 * prior unsignalled WRs.
 335 */
 336void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
 337{
 338        struct t4_cqe *hw_cqe, *swcqe, read_cqe;
 339        struct c4iw_qp *qhp;
 340        struct t4_swsqe *swsqe;
 341        int ret;
 342
 343        pr_debug("cqid 0x%x\n", chp->cq.cqid);
 344        ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 345
 346        /*
 347         * This logic is similar to poll_cq(), but not quite the same
 348         * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
 349         * also do any translation magic that poll_cq() normally does.
 350         */
 351        while (!ret) {
 352                qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
 353
 354                /*
 355                 * drop CQEs with no associated QP
 356                 */
 357                if (qhp == NULL)
 358                        goto next_cqe;
 359
 360                if (flush_qhp != qhp) {
 361                        spin_lock(&qhp->lock);
 362
 363                        if (qhp->wq.flushed == 1)
 364                                goto next_cqe;
 365                }
 366
 367                if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
 368                        goto next_cqe;
 369
 370                if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
 371
 372                        /* If we have reached here because of async
 373                         * event or other error, and have egress error
 374                         * then drop
 375                         */
 376                        if (CQE_TYPE(hw_cqe) == 1)
 377                                goto next_cqe;
 378
 379                        /* drop peer2peer RTR reads.
 380                         */
 381                        if (CQE_WRID_STAG(hw_cqe) == 1)
 382                                goto next_cqe;
 383
 384                        /*
 385                         * Eat completions for unsignaled read WRs.
 386                         */
 387                        if (!qhp->wq.sq.oldest_read->signaled) {
 388                                advance_oldest_read(&qhp->wq);
 389                                goto next_cqe;
 390                        }
 391
 392                        /*
 393                         * Don't write to the HWCQ, create a new read req CQE
 394                         * in local memory and move it into the swcq.
 395                         */
 396                        create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
 397                        hw_cqe = &read_cqe;
 398                        advance_oldest_read(&qhp->wq);
 399                }
 400
 401                /* if its a SQ completion, then do the magic to move all the
 402                 * unsignaled and now in-order completions into the swcq.
 403                 */
 404                if (SQ_TYPE(hw_cqe)) {
 405                        swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 406                        swsqe->cqe = *hw_cqe;
 407                        swsqe->complete = 1;
 408                        flush_completed_wrs(&qhp->wq, &chp->cq);
 409                } else {
 410                        swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
 411                        *swcqe = *hw_cqe;
 412                        swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
 413                        t4_swcq_produce(&chp->cq);
 414                }
 415next_cqe:
 416                t4_hwcq_consume(&chp->cq);
 417                ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 418                if (qhp && flush_qhp != qhp)
 419                        spin_unlock(&qhp->lock);
 420        }
 421}
 422
 423static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 424{
 425        if (DRAIN_CQE(cqe)) {
 426                WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
 427                return 0;
 428        }
 429
 430        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
 431                return 0;
 432
 433        if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
 434                return 0;
 435
 436        if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
 437                return 0;
 438
 439        if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
 440                return 0;
 441        return 1;
 442}
 443
 444void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 445{
 446        struct t4_cqe *cqe;
 447        u32 ptr;
 448
 449        *count = 0;
 450        pr_debug("count zero %d\n", *count);
 451        ptr = cq->sw_cidx;
 452        while (ptr != cq->sw_pidx) {
 453                cqe = &cq->sw_queue[ptr];
 454                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
 455                    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
 456                        (*count)++;
 457                if (++ptr == cq->size)
 458                        ptr = 0;
 459        }
 460        pr_debug("cq %p count %d\n", cq, *count);
 461}
 462
 463static void post_pending_srq_wrs(struct t4_srq *srq)
 464{
 465        struct t4_srq_pending_wr *pwr;
 466        u16 idx = 0;
 467
 468        while (srq->pending_in_use) {
 469                pwr = &srq->pending_wrs[srq->pending_cidx];
 470                srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
 471                srq->sw_rq[srq->pidx].valid = 1;
 472
 473                pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
 474                         __func__,
 475                         srq->cidx, srq->pidx, srq->wq_pidx,
 476                         srq->in_use, srq->size,
 477                         (unsigned long long)pwr->wr_id);
 478
 479                c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
 480                t4_srq_consume_pending_wr(srq);
 481                t4_srq_produce(srq, pwr->len16);
 482                idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
 483        }
 484
 485        if (idx) {
 486                t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
 487                srq->queue[srq->size].status.host_wq_pidx =
 488                        srq->wq_pidx;
 489        }
 490}
 491
 492static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
 493{
 494        int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
 495        u64 wr_id;
 496
 497        srq->sw_rq[rel_idx].valid = 0;
 498        wr_id = srq->sw_rq[rel_idx].wr_id;
 499
 500        if (rel_idx == srq->cidx) {
 501                pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
 502                         __func__, rel_idx, srq->cidx, srq->pidx,
 503                         srq->wq_pidx, srq->in_use, srq->size,
 504                         (unsigned long long)srq->sw_rq[rel_idx].wr_id);
 505                t4_srq_consume(srq);
 506                while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
 507                        pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
 508                                 __func__, srq->cidx, srq->pidx,
 509                                 srq->wq_pidx, srq->in_use,
 510                                 srq->size, srq->ooo_count,
 511                                 (unsigned long long)
 512                                 srq->sw_rq[srq->cidx].wr_id);
 513                        t4_srq_consume_ooo(srq);
 514                }
 515                if (srq->ooo_count == 0 && srq->pending_in_use)
 516                        post_pending_srq_wrs(srq);
 517        } else {
 518                pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
 519                         __func__, rel_idx, srq->cidx,
 520                         srq->pidx, srq->wq_pidx,
 521                         srq->in_use, srq->size,
 522                         srq->ooo_count,
 523                         (unsigned long long)srq->sw_rq[rel_idx].wr_id);
 524                t4_srq_produce_ooo(srq);
 525        }
 526        return wr_id;
 527}
 528
 529/*
 530 * poll_cq
 531 *
 532 * Caller must:
 533 *     check the validity of the first CQE,
 534 *     supply the wq assicated with the qpid.
 535 *
 536 * credit: cq credit to return to sge.
 537 * cqe_flushed: 1 iff the CQE is flushed.
 538 * cqe: copy of the polled CQE.
 539 *
 540 * return value:
 541 *    0             CQE returned ok.
 542 *    -EAGAIN       CQE skipped, try again.
 543 *    -EOVERFLOW    CQ overflow detected.
 544 */
 545static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 546                   u8 *cqe_flushed, u64 *cookie, u32 *credit,
 547                   struct t4_srq *srq)
 548{
 549        int ret = 0;
 550        struct t4_cqe *hw_cqe, read_cqe;
 551
 552        *cqe_flushed = 0;
 553        *credit = 0;
 554        ret = t4_next_cqe(cq, &hw_cqe);
 555        if (ret)
 556                return ret;
 557
 558        pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
 559                 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
 560                 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
 561                 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
 562                 CQE_WRID_LOW(hw_cqe));
 563
 564        /*
 565         * skip cqe's not affiliated with a QP.
 566         */
 567        if (wq == NULL) {
 568                ret = -EAGAIN;
 569                goto skip_cqe;
 570        }
 571
 572        /*
 573        * skip hw cqe's if the wq is flushed.
 574        */
 575        if (wq->flushed && !SW_CQE(hw_cqe)) {
 576                ret = -EAGAIN;
 577                goto skip_cqe;
 578        }
 579
 580        /*
 581         * skip TERMINATE cqes...
 582         */
 583        if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
 584                ret = -EAGAIN;
 585                goto skip_cqe;
 586        }
 587
 588        /*
 589         * Special cqe for drain WR completions...
 590         */
 591        if (DRAIN_CQE(hw_cqe)) {
 592                *cookie = CQE_DRAIN_COOKIE(hw_cqe);
 593                *cqe = *hw_cqe;
 594                goto skip_cqe;
 595        }
 596
 597        /*
 598         * Gotta tweak READ completions:
 599         *      1) the cqe doesn't contain the sq_wptr from the wr.
 600         *      2) opcode not reflected from the wr.
 601         *      3) read_len not reflected from the wr.
 602         *      4) cq_type is RQ_TYPE not SQ_TYPE.
 603         */
 604        if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
 605
 606                /* If we have reached here because of async
 607                 * event or other error, and have egress error
 608                 * then drop
 609                 */
 610                if (CQE_TYPE(hw_cqe) == 1) {
 611                        if (CQE_STATUS(hw_cqe))
 612                                t4_set_wq_in_error(wq, 0);
 613                        ret = -EAGAIN;
 614                        goto skip_cqe;
 615                }
 616
 617                /* If this is an unsolicited read response, then the read
 618                 * was generated by the kernel driver as part of peer-2-peer
 619                 * connection setup.  So ignore the completion.
 620                 */
 621                if (CQE_WRID_STAG(hw_cqe) == 1) {
 622                        if (CQE_STATUS(hw_cqe))
 623                                t4_set_wq_in_error(wq, 0);
 624                        ret = -EAGAIN;
 625                        goto skip_cqe;
 626                }
 627
 628                /*
 629                 * Eat completions for unsignaled read WRs.
 630                 */
 631                if (!wq->sq.oldest_read->signaled) {
 632                        advance_oldest_read(wq);
 633                        ret = -EAGAIN;
 634                        goto skip_cqe;
 635                }
 636
 637                /*
 638                 * Don't write to the HWCQ, so create a new read req CQE
 639                 * in local memory.
 640                 */
 641                create_read_req_cqe(wq, hw_cqe, &read_cqe);
 642                hw_cqe = &read_cqe;
 643                advance_oldest_read(wq);
 644        }
 645
 646        if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
 647                *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
 648                t4_set_wq_in_error(wq, 0);
 649        }
 650
 651        /*
 652         * RECV completion.
 653         */
 654        if (RQ_TYPE(hw_cqe)) {
 655
 656                /*
 657                 * HW only validates 4 bits of MSN.  So we must validate that
 658                 * the MSN in the SEND is the next expected MSN.  If its not,
 659                 * then we complete this with T4_ERR_MSN and mark the wq in
 660                 * error.
 661                 */
 662                if (unlikely(!CQE_STATUS(hw_cqe) &&
 663                             CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
 664                        t4_set_wq_in_error(wq, 0);
 665                        hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
 666                }
 667                goto proc_cqe;
 668        }
 669
 670        /*
 671         * If we get here its a send completion.
 672         *
 673         * Handle out of order completion. These get stuffed
 674         * in the SW SQ. Then the SW SQ is walked to move any
 675         * now in-order completions into the SW CQ.  This handles
 676         * 2 cases:
 677         *      1) reaping unsignaled WRs when the first subsequent
 678         *         signaled WR is completed.
 679         *      2) out of order read completions.
 680         */
 681        if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
 682                struct t4_swsqe *swsqe;
 683
 684                pr_debug("out of order completion going in sw_sq at idx %u\n",
 685                         CQE_WRID_SQ_IDX(hw_cqe));
 686                swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 687                swsqe->cqe = *hw_cqe;
 688                swsqe->complete = 1;
 689                ret = -EAGAIN;
 690                goto flush_wq;
 691        }
 692
 693proc_cqe:
 694        *cqe = *hw_cqe;
 695
 696        /*
 697         * Reap the associated WR(s) that are freed up with this
 698         * completion.
 699         */
 700        if (SQ_TYPE(hw_cqe)) {
 701                int idx = CQE_WRID_SQ_IDX(hw_cqe);
 702
 703                /*
 704                * Account for any unsignaled completions completed by
 705                * this signaled completion.  In this case, cidx points
 706                * to the first unsignaled one, and idx points to the
 707                * signaled one.  So adjust in_use based on this delta.
 708                * if this is not completing any unsigned wrs, then the
 709                * delta will be 0. Handle wrapping also!
 710                */
 711                if (idx < wq->sq.cidx)
 712                        wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
 713                else
 714                        wq->sq.in_use -= idx - wq->sq.cidx;
 715
 716                wq->sq.cidx = (uint16_t)idx;
 717                pr_debug("completing sq idx %u\n", wq->sq.cidx);
 718                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
 719                if (c4iw_wr_log)
 720                        c4iw_log_wr_stats(wq, hw_cqe);
 721                t4_sq_consume(wq);
 722        } else {
 723                if (!srq) {
 724                        pr_debug("completing rq idx %u\n", wq->rq.cidx);
 725                        *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
 726                        if (c4iw_wr_log)
 727                                c4iw_log_wr_stats(wq, hw_cqe);
 728                        t4_rq_consume(wq);
 729                } else {
 730                        *cookie = reap_srq_cqe(hw_cqe, srq);
 731                }
 732                wq->rq.msn++;
 733                goto skip_cqe;
 734        }
 735
 736flush_wq:
 737        /*
 738         * Flush any completed cqes that are now in-order.
 739         */
 740        flush_completed_wrs(wq, cq);
 741
 742skip_cqe:
 743        if (SW_CQE(hw_cqe)) {
 744                pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
 745                         cq, cq->cqid, cq->sw_cidx);
 746                t4_swcq_consume(cq);
 747        } else {
 748                pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
 749                         cq, cq->cqid, cq->cidx);
 750                t4_hwcq_consume(cq);
 751        }
 752        return ret;
 753}
 754
 755static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
 756                              struct ib_wc *wc, struct c4iw_srq *srq)
 757{
 758        struct t4_cqe uninitialized_var(cqe);
 759        struct t4_wq *wq = qhp ? &qhp->wq : NULL;
 760        u32 credit = 0;
 761        u8 cqe_flushed;
 762        u64 cookie = 0;
 763        int ret;
 764
 765        ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
 766                      srq ? &srq->wq : NULL);
 767        if (ret)
 768                goto out;
 769
 770        wc->wr_id = cookie;
 771        wc->qp = qhp ? &qhp->ibqp : NULL;
 772        wc->vendor_err = CQE_STATUS(&cqe);
 773        wc->wc_flags = 0;
 774
 775        /*
 776         * Simulate a SRQ_LIMIT_REACHED HW notification if required.
 777         */
 778        if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
 779            srq->wq.in_use < srq->srq_limit)
 780                c4iw_dispatch_srq_limit_reached_event(srq);
 781
 782        pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
 783                 CQE_QPID(&cqe),
 784                 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
 785                 CQE_STATUS(&cqe), CQE_LEN(&cqe),
 786                 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
 787                 (unsigned long long)cookie);
 788
 789        if (CQE_TYPE(&cqe) == 0) {
 790                if (!CQE_STATUS(&cqe))
 791                        wc->byte_len = CQE_LEN(&cqe);
 792                else
 793                        wc->byte_len = 0;
 794
 795                switch (CQE_OPCODE(&cqe)) {
 796                case FW_RI_SEND:
 797                        wc->opcode = IB_WC_RECV;
 798                        break;
 799                case FW_RI_SEND_WITH_INV:
 800                case FW_RI_SEND_WITH_SE_INV:
 801                        wc->opcode = IB_WC_RECV;
 802                        wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
 803                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 804                        c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
 805                        break;
 806                case FW_RI_WRITE_IMMEDIATE:
 807                        wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
 808                        wc->ex.imm_data = CQE_IMM_DATA(&cqe);
 809                        wc->wc_flags |= IB_WC_WITH_IMM;
 810                        break;
 811                default:
 812                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
 813                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
 814                        ret = -EINVAL;
 815                        goto out;
 816                }
 817        } else {
 818                switch (CQE_OPCODE(&cqe)) {
 819                case FW_RI_WRITE_IMMEDIATE:
 820                case FW_RI_RDMA_WRITE:
 821                        wc->opcode = IB_WC_RDMA_WRITE;
 822                        break;
 823                case FW_RI_READ_REQ:
 824                        wc->opcode = IB_WC_RDMA_READ;
 825                        wc->byte_len = CQE_LEN(&cqe);
 826                        break;
 827                case FW_RI_SEND_WITH_INV:
 828                case FW_RI_SEND_WITH_SE_INV:
 829                        wc->opcode = IB_WC_SEND;
 830                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 831                        break;
 832                case FW_RI_SEND:
 833                case FW_RI_SEND_WITH_SE:
 834                        wc->opcode = IB_WC_SEND;
 835                        break;
 836
 837                case FW_RI_LOCAL_INV:
 838                        wc->opcode = IB_WC_LOCAL_INV;
 839                        break;
 840                case FW_RI_FAST_REGISTER:
 841                        wc->opcode = IB_WC_REG_MR;
 842
 843                        /* Invalidate the MR if the fastreg failed */
 844                        if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
 845                                c4iw_invalidate_mr(qhp->rhp,
 846                                                   CQE_WRID_FR_STAG(&cqe));
 847                        break;
 848                default:
 849                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
 850                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
 851                        ret = -EINVAL;
 852                        goto out;
 853                }
 854        }
 855
 856        if (cqe_flushed)
 857                wc->status = IB_WC_WR_FLUSH_ERR;
 858        else {
 859
 860                switch (CQE_STATUS(&cqe)) {
 861                case T4_ERR_SUCCESS:
 862                        wc->status = IB_WC_SUCCESS;
 863                        break;
 864                case T4_ERR_STAG:
 865                        wc->status = IB_WC_LOC_ACCESS_ERR;
 866                        break;
 867                case T4_ERR_PDID:
 868                        wc->status = IB_WC_LOC_PROT_ERR;
 869                        break;
 870                case T4_ERR_QPID:
 871                case T4_ERR_ACCESS:
 872                        wc->status = IB_WC_LOC_ACCESS_ERR;
 873                        break;
 874                case T4_ERR_WRAP:
 875                        wc->status = IB_WC_GENERAL_ERR;
 876                        break;
 877                case T4_ERR_BOUND:
 878                        wc->status = IB_WC_LOC_LEN_ERR;
 879                        break;
 880                case T4_ERR_INVALIDATE_SHARED_MR:
 881                case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 882                        wc->status = IB_WC_MW_BIND_ERR;
 883                        break;
 884                case T4_ERR_CRC:
 885                case T4_ERR_MARKER:
 886                case T4_ERR_PDU_LEN_ERR:
 887                case T4_ERR_OUT_OF_RQE:
 888                case T4_ERR_DDP_VERSION:
 889                case T4_ERR_RDMA_VERSION:
 890                case T4_ERR_DDP_QUEUE_NUM:
 891                case T4_ERR_MSN:
 892                case T4_ERR_TBIT:
 893                case T4_ERR_MO:
 894                case T4_ERR_MSN_RANGE:
 895                case T4_ERR_IRD_OVERFLOW:
 896                case T4_ERR_OPCODE:
 897                case T4_ERR_INTERNAL_ERR:
 898                        wc->status = IB_WC_FATAL_ERR;
 899                        break;
 900                case T4_ERR_SWFLUSH:
 901                        wc->status = IB_WC_WR_FLUSH_ERR;
 902                        break;
 903                default:
 904                        pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
 905                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
 906                        wc->status = IB_WC_FATAL_ERR;
 907                }
 908        }
 909out:
 910        return ret;
 911}
 912
 913/*
 914 * Get one cq entry from c4iw and map it to openib.
 915 *
 916 * Returns:
 917 *      0                       cqe returned
 918 *      -ENODATA                EMPTY;
 919 *      -EAGAIN                 caller must try again
 920 *      any other -errno        fatal error
 921 */
 922static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 923{
 924        struct c4iw_srq *srq = NULL;
 925        struct c4iw_qp *qhp = NULL;
 926        struct t4_cqe *rd_cqe;
 927        int ret;
 928
 929        ret = t4_next_cqe(&chp->cq, &rd_cqe);
 930
 931        if (ret)
 932                return ret;
 933
 934        qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
 935        if (qhp) {
 936                spin_lock(&qhp->lock);
 937                srq = qhp->srq;
 938                if (srq)
 939                        spin_lock(&srq->lock);
 940                ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
 941                spin_unlock(&qhp->lock);
 942                if (srq)
 943                        spin_unlock(&srq->lock);
 944        } else {
 945                ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
 946        }
 947        return ret;
 948}
 949
 950int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 951{
 952        struct c4iw_cq *chp;
 953        unsigned long flags;
 954        int npolled;
 955        int err = 0;
 956
 957        chp = to_c4iw_cq(ibcq);
 958
 959        spin_lock_irqsave(&chp->lock, flags);
 960        for (npolled = 0; npolled < num_entries; ++npolled) {
 961                do {
 962                        err = c4iw_poll_cq_one(chp, wc + npolled);
 963                } while (err == -EAGAIN);
 964                if (err)
 965                        break;
 966        }
 967        spin_unlock_irqrestore(&chp->lock, flags);
 968        return !err || err == -ENODATA ? npolled : err;
 969}
 970
 971int c4iw_destroy_cq(struct ib_cq *ib_cq)
 972{
 973        struct c4iw_cq *chp;
 974        struct c4iw_ucontext *ucontext;
 975
 976        pr_debug("ib_cq %p\n", ib_cq);
 977        chp = to_c4iw_cq(ib_cq);
 978
 979        remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
 980        atomic_dec(&chp->refcnt);
 981        wait_event(chp->wait, !atomic_read(&chp->refcnt));
 982
 983        ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
 984                                  : NULL;
 985        destroy_cq(&chp->rhp->rdev, &chp->cq,
 986                   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
 987                   chp->destroy_skb, chp->wr_waitp);
 988        c4iw_put_wr_wait(chp->wr_waitp);
 989        kfree(chp);
 990        return 0;
 991}
 992
 993struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 994                             const struct ib_cq_init_attr *attr,
 995                             struct ib_ucontext *ib_context,
 996                             struct ib_udata *udata)
 997{
 998        int entries = attr->cqe;
 999        int vector = attr->comp_vector;
1000        struct c4iw_dev *rhp;
1001        struct c4iw_cq *chp;
1002        struct c4iw_create_cq ucmd;
1003        struct c4iw_create_cq_resp uresp;
1004        struct c4iw_ucontext *ucontext = NULL;
1005        int ret, wr_len;
1006        size_t memsize, hwentries;
1007        struct c4iw_mm_entry *mm, *mm2;
1008
1009        pr_debug("ib_dev %p entries %d\n", ibdev, entries);
1010        if (attr->flags)
1011                return ERR_PTR(-EINVAL);
1012
1013        rhp = to_c4iw_dev(ibdev);
1014
1015        if (vector >= rhp->rdev.lldi.nciq)
1016                return ERR_PTR(-EINVAL);
1017
1018        if (ib_context) {
1019                ucontext = to_c4iw_ucontext(ib_context);
1020                if (udata->inlen < sizeof(ucmd))
1021                        ucontext->is_32b_cqe = 1;
1022        }
1023
1024        chp = kzalloc(sizeof(*chp), GFP_KERNEL);
1025        if (!chp)
1026                return ERR_PTR(-ENOMEM);
1027
1028        chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1029        if (!chp->wr_waitp) {
1030                ret = -ENOMEM;
1031                goto err_free_chp;
1032        }
1033        c4iw_init_wr_wait(chp->wr_waitp);
1034
1035        wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
1036        chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
1037        if (!chp->destroy_skb) {
1038                ret = -ENOMEM;
1039                goto err_free_wr_wait;
1040        }
1041
1042        /* account for the status page. */
1043        entries++;
1044
1045        /* IQ needs one extra entry to differentiate full vs empty. */
1046        entries++;
1047
1048        /*
1049         * entries must be multiple of 16 for HW.
1050         */
1051        entries = roundup(entries, 16);
1052
1053        /*
1054         * Make actual HW queue 2x to avoid cdix_inc overflows.
1055         */
1056        hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
1057
1058        /*
1059         * Make HW queue at least 64 entries so GTS updates aren't too
1060         * frequent.
1061         */
1062        if (hwentries < 64)
1063                hwentries = 64;
1064
1065        memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
1066                        (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
1067
1068        /*
1069         * memsize must be a multiple of the page size if its a user cq.
1070         */
1071        if (ucontext)
1072                memsize = roundup(memsize, PAGE_SIZE);
1073
1074        chp->cq.size = hwentries;
1075        chp->cq.memsize = memsize;
1076        chp->cq.vector = vector;
1077
1078        ret = create_cq(&rhp->rdev, &chp->cq,
1079                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1080                        chp->wr_waitp);
1081        if (ret)
1082                goto err_free_skb;
1083
1084        chp->rhp = rhp;
1085        chp->cq.size--;                         /* status page */
1086        chp->ibcq.cqe = entries - 2;
1087        spin_lock_init(&chp->lock);
1088        spin_lock_init(&chp->comp_handler_lock);
1089        atomic_set(&chp->refcnt, 1);
1090        init_waitqueue_head(&chp->wait);
1091        ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
1092        if (ret)
1093                goto err_destroy_cq;
1094
1095        if (ucontext) {
1096                ret = -ENOMEM;
1097                mm = kmalloc(sizeof *mm, GFP_KERNEL);
1098                if (!mm)
1099                        goto err_remove_handle;
1100                mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1101                if (!mm2)
1102                        goto err_free_mm;
1103
1104                memset(&uresp, 0, sizeof(uresp));
1105                uresp.qid_mask = rhp->rdev.cqmask;
1106                uresp.cqid = chp->cq.cqid;
1107                uresp.size = chp->cq.size;
1108                uresp.memsize = chp->cq.memsize;
1109                spin_lock(&ucontext->mmap_lock);
1110                uresp.key = ucontext->key;
1111                ucontext->key += PAGE_SIZE;
1112                uresp.gts_key = ucontext->key;
1113                ucontext->key += PAGE_SIZE;
1114                /* communicate to the userspace that
1115                 * kernel driver supports 64B CQE
1116                 */
1117                uresp.flags |= C4IW_64B_CQE;
1118
1119                spin_unlock(&ucontext->mmap_lock);
1120                ret = ib_copy_to_udata(udata, &uresp,
1121                                       ucontext->is_32b_cqe ?
1122                                       sizeof(uresp) - sizeof(uresp.flags) :
1123                                       sizeof(uresp));
1124                if (ret)
1125                        goto err_free_mm2;
1126
1127                mm->key = uresp.key;
1128                mm->addr = virt_to_phys(chp->cq.queue);
1129                mm->len = chp->cq.memsize;
1130                insert_mmap(ucontext, mm);
1131
1132                mm2->key = uresp.gts_key;
1133                mm2->addr = chp->cq.bar2_pa;
1134                mm2->len = PAGE_SIZE;
1135                insert_mmap(ucontext, mm2);
1136        }
1137        pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
1138                 chp->cq.cqid, chp, chp->cq.size,
1139                 chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
1140        return &chp->ibcq;
1141err_free_mm2:
1142        kfree(mm2);
1143err_free_mm:
1144        kfree(mm);
1145err_remove_handle:
1146        remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1147err_destroy_cq:
1148        destroy_cq(&chp->rhp->rdev, &chp->cq,
1149                   ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1150                   chp->destroy_skb, chp->wr_waitp);
1151err_free_skb:
1152        kfree_skb(chp->destroy_skb);
1153err_free_wr_wait:
1154        c4iw_put_wr_wait(chp->wr_waitp);
1155err_free_chp:
1156        kfree(chp);
1157        return ERR_PTR(ret);
1158}
1159
1160int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1161{
1162        struct c4iw_cq *chp;
1163        int ret = 0;
1164        unsigned long flag;
1165
1166        chp = to_c4iw_cq(ibcq);
1167        spin_lock_irqsave(&chp->lock, flag);
1168        t4_arm_cq(&chp->cq,
1169                  (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1170        if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1171                ret = t4_cq_notempty(&chp->cq);
1172        spin_unlock_irqrestore(&chp->lock, flag);
1173        return ret;
1174}
1175
1176void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
1177{
1178        struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1179        unsigned long flag;
1180
1181        /* locking heirarchy: cq lock first, then qp lock. */
1182        spin_lock_irqsave(&rchp->lock, flag);
1183        spin_lock(&qhp->lock);
1184
1185        /* create a SRQ RECV CQE for srqidx */
1186        insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
1187
1188        spin_unlock(&qhp->lock);
1189        spin_unlock_irqrestore(&rchp->lock, flag);
1190}
1191