linux/drivers/infiniband/hw/cxgb4/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "iw_cxgb4.h"
  34
  35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  36                      struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
  37{
  38        struct fw_ri_res_wr *res_wr;
  39        struct fw_ri_res *res;
  40        int wr_len;
  41        struct c4iw_wr_wait wr_wait;
  42        int ret;
  43
  44        wr_len = sizeof *res_wr + sizeof *res;
  45        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  46
  47        res_wr = __skb_put_zero(skb, wr_len);
  48        res_wr->op_nres = cpu_to_be32(
  49                        FW_WR_OP_V(FW_RI_RES_WR) |
  50                        FW_RI_RES_WR_NRES_V(1) |
  51                        FW_WR_COMPL_F);
  52        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  53        res_wr->cookie = (uintptr_t)&wr_wait;
  54        res = res_wr->res;
  55        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  56        res->u.cq.op = FW_RI_RES_OP_RESET;
  57        res->u.cq.iqid = cpu_to_be32(cq->cqid);
  58
  59        c4iw_init_wr_wait(&wr_wait);
  60        ret = c4iw_ofld_send(rdev, skb);
  61        if (!ret) {
  62                ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  63        }
  64
  65        kfree(cq->sw_queue);
  66        dma_free_coherent(&(rdev->lldi.pdev->dev),
  67                          cq->memsize, cq->queue,
  68                          dma_unmap_addr(cq, mapping));
  69        c4iw_put_cqid(rdev, cq->cqid, uctx);
  70        return ret;
  71}
  72
  73static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  74                     struct c4iw_dev_ucontext *uctx)
  75{
  76        struct fw_ri_res_wr *res_wr;
  77        struct fw_ri_res *res;
  78        int wr_len;
  79        int user = (uctx != &rdev->uctx);
  80        struct c4iw_wr_wait wr_wait;
  81        int ret;
  82        struct sk_buff *skb;
  83
  84        cq->cqid = c4iw_get_cqid(rdev, uctx);
  85        if (!cq->cqid) {
  86                ret = -ENOMEM;
  87                goto err1;
  88        }
  89
  90        if (!user) {
  91                cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  92                if (!cq->sw_queue) {
  93                        ret = -ENOMEM;
  94                        goto err2;
  95                }
  96        }
  97        cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
  98                                       &cq->dma_addr, GFP_KERNEL);
  99        if (!cq->queue) {
 100                ret = -ENOMEM;
 101                goto err3;
 102        }
 103        dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 104        memset(cq->queue, 0, cq->memsize);
 105
 106        /* build fw_ri_res_wr */
 107        wr_len = sizeof *res_wr + sizeof *res;
 108
 109        skb = alloc_skb(wr_len, GFP_KERNEL);
 110        if (!skb) {
 111                ret = -ENOMEM;
 112                goto err4;
 113        }
 114        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 115
 116        res_wr = __skb_put_zero(skb, wr_len);
 117        res_wr->op_nres = cpu_to_be32(
 118                        FW_WR_OP_V(FW_RI_RES_WR) |
 119                        FW_RI_RES_WR_NRES_V(1) |
 120                        FW_WR_COMPL_F);
 121        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 122        res_wr->cookie = (uintptr_t)&wr_wait;
 123        res = res_wr->res;
 124        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 125        res->u.cq.op = FW_RI_RES_OP_WRITE;
 126        res->u.cq.iqid = cpu_to_be32(cq->cqid);
 127        res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
 128                        FW_RI_RES_WR_IQANUS_V(0) |
 129                        FW_RI_RES_WR_IQANUD_V(1) |
 130                        FW_RI_RES_WR_IQANDST_F |
 131                        FW_RI_RES_WR_IQANDSTINDEX_V(
 132                                rdev->lldi.ciq_ids[cq->vector]));
 133        res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
 134                        FW_RI_RES_WR_IQDROPRSS_F |
 135                        FW_RI_RES_WR_IQPCIECH_V(2) |
 136                        FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
 137                        FW_RI_RES_WR_IQO_F |
 138                        FW_RI_RES_WR_IQESIZE_V(1));
 139        res->u.cq.iqsize = cpu_to_be16(cq->size);
 140        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 141
 142        c4iw_init_wr_wait(&wr_wait);
 143
 144        ret = c4iw_ofld_send(rdev, skb);
 145        if (ret)
 146                goto err4;
 147        pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
 148        ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
 149        if (ret)
 150                goto err4;
 151
 152        cq->gen = 1;
 153        cq->gts = rdev->lldi.gts_reg;
 154        cq->rdev = rdev;
 155
 156        cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
 157                                      &cq->bar2_qid,
 158                                      user ? &cq->bar2_pa : NULL);
 159        if (user && !cq->bar2_pa) {
 160                pr_warn("%s: cqid %u not in BAR2 range\n",
 161                        pci_name(rdev->lldi.pdev), cq->cqid);
 162                ret = -EINVAL;
 163                goto err4;
 164        }
 165        return 0;
 166err4:
 167        dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
 168                          dma_unmap_addr(cq, mapping));
 169err3:
 170        kfree(cq->sw_queue);
 171err2:
 172        c4iw_put_cqid(rdev, cq->cqid, uctx);
 173err1:
 174        return ret;
 175}
 176
 177static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
 178{
 179        struct t4_cqe cqe;
 180
 181        pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 182                 wq, cq, cq->sw_cidx, cq->sw_pidx);
 183        memset(&cqe, 0, sizeof(cqe));
 184        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 185                                 CQE_OPCODE_V(FW_RI_SEND) |
 186                                 CQE_TYPE_V(0) |
 187                                 CQE_SWCQE_V(1) |
 188                                 CQE_QPID_V(wq->sq.qid));
 189        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 190        cq->sw_queue[cq->sw_pidx] = cqe;
 191        t4_swcq_produce(cq);
 192}
 193
 194int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
 195{
 196        int flushed = 0;
 197        int in_use = wq->rq.in_use - count;
 198
 199        BUG_ON(in_use < 0);
 200        pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
 201                 wq, cq, wq->rq.in_use, count);
 202        while (in_use--) {
 203                insert_recv_cqe(wq, cq);
 204                flushed++;
 205        }
 206        return flushed;
 207}
 208
 209static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
 210                          struct t4_swsqe *swcqe)
 211{
 212        struct t4_cqe cqe;
 213
 214        pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 215                 wq, cq, cq->sw_cidx, cq->sw_pidx);
 216        memset(&cqe, 0, sizeof(cqe));
 217        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 218                                 CQE_OPCODE_V(swcqe->opcode) |
 219                                 CQE_TYPE_V(1) |
 220                                 CQE_SWCQE_V(1) |
 221                                 CQE_QPID_V(wq->sq.qid));
 222        CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
 223        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 224        cq->sw_queue[cq->sw_pidx] = cqe;
 225        t4_swcq_produce(cq);
 226}
 227
 228static void advance_oldest_read(struct t4_wq *wq);
 229
 230int c4iw_flush_sq(struct c4iw_qp *qhp)
 231{
 232        int flushed = 0;
 233        struct t4_wq *wq = &qhp->wq;
 234        struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
 235        struct t4_cq *cq = &chp->cq;
 236        int idx;
 237        struct t4_swsqe *swsqe;
 238
 239        if (wq->sq.flush_cidx == -1)
 240                wq->sq.flush_cidx = wq->sq.cidx;
 241        idx = wq->sq.flush_cidx;
 242        BUG_ON(idx >= wq->sq.size);
 243        while (idx != wq->sq.pidx) {
 244                swsqe = &wq->sq.sw_sq[idx];
 245                BUG_ON(swsqe->flushed);
 246                swsqe->flushed = 1;
 247                insert_sq_cqe(wq, cq, swsqe);
 248                if (wq->sq.oldest_read == swsqe) {
 249                        BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
 250                        advance_oldest_read(wq);
 251                }
 252                flushed++;
 253                if (++idx == wq->sq.size)
 254                        idx = 0;
 255        }
 256        wq->sq.flush_cidx += flushed;
 257        if (wq->sq.flush_cidx >= wq->sq.size)
 258                wq->sq.flush_cidx -= wq->sq.size;
 259        return flushed;
 260}
 261
 262static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
 263{
 264        struct t4_swsqe *swsqe;
 265        int cidx;
 266
 267        if (wq->sq.flush_cidx == -1)
 268                wq->sq.flush_cidx = wq->sq.cidx;
 269        cidx = wq->sq.flush_cidx;
 270        BUG_ON(cidx > wq->sq.size);
 271
 272        while (cidx != wq->sq.pidx) {
 273                swsqe = &wq->sq.sw_sq[cidx];
 274                if (!swsqe->signaled) {
 275                        if (++cidx == wq->sq.size)
 276                                cidx = 0;
 277                } else if (swsqe->complete) {
 278
 279                        BUG_ON(swsqe->flushed);
 280
 281                        /*
 282                         * Insert this completed cqe into the swcq.
 283                         */
 284                        pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
 285                                 __func__, cidx, cq->sw_pidx);
 286                        swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
 287                        cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
 288                        t4_swcq_produce(cq);
 289                        swsqe->flushed = 1;
 290                        if (++cidx == wq->sq.size)
 291                                cidx = 0;
 292                        wq->sq.flush_cidx = cidx;
 293                } else
 294                        break;
 295        }
 296}
 297
 298static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
 299                struct t4_cqe *read_cqe)
 300{
 301        read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
 302        read_cqe->len = htonl(wq->sq.oldest_read->read_len);
 303        read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
 304                        CQE_SWCQE_V(SW_CQE(hw_cqe)) |
 305                        CQE_OPCODE_V(FW_RI_READ_REQ) |
 306                        CQE_TYPE_V(1));
 307        read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
 308}
 309
 310static void advance_oldest_read(struct t4_wq *wq)
 311{
 312
 313        u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
 314
 315        if (rptr == wq->sq.size)
 316                rptr = 0;
 317        while (rptr != wq->sq.pidx) {
 318                wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
 319
 320                if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
 321                        return;
 322                if (++rptr == wq->sq.size)
 323                        rptr = 0;
 324        }
 325        wq->sq.oldest_read = NULL;
 326}
 327
 328/*
 329 * Move all CQEs from the HWCQ into the SWCQ.
 330 * Deal with out-of-order and/or completions that complete
 331 * prior unsignalled WRs.
 332 */
 333void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 334{
 335        struct t4_cqe *hw_cqe, *swcqe, read_cqe;
 336        struct c4iw_qp *qhp;
 337        struct t4_swsqe *swsqe;
 338        int ret;
 339
 340        pr_debug("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
 341        ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 342
 343        /*
 344         * This logic is similar to poll_cq(), but not quite the same
 345         * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
 346         * also do any translation magic that poll_cq() normally does.
 347         */
 348        while (!ret) {
 349                qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
 350
 351                /*
 352                 * drop CQEs with no associated QP
 353                 */
 354                if (qhp == NULL)
 355                        goto next_cqe;
 356
 357                if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
 358                        goto next_cqe;
 359
 360                if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
 361
 362                        /* If we have reached here because of async
 363                         * event or other error, and have egress error
 364                         * then drop
 365                         */
 366                        if (CQE_TYPE(hw_cqe) == 1)
 367                                goto next_cqe;
 368
 369                        /* drop peer2peer RTR reads.
 370                         */
 371                        if (CQE_WRID_STAG(hw_cqe) == 1)
 372                                goto next_cqe;
 373
 374                        /*
 375                         * Eat completions for unsignaled read WRs.
 376                         */
 377                        if (!qhp->wq.sq.oldest_read->signaled) {
 378                                advance_oldest_read(&qhp->wq);
 379                                goto next_cqe;
 380                        }
 381
 382                        /*
 383                         * Don't write to the HWCQ, create a new read req CQE
 384                         * in local memory and move it into the swcq.
 385                         */
 386                        create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
 387                        hw_cqe = &read_cqe;
 388                        advance_oldest_read(&qhp->wq);
 389                }
 390
 391                /* if its a SQ completion, then do the magic to move all the
 392                 * unsignaled and now in-order completions into the swcq.
 393                 */
 394                if (SQ_TYPE(hw_cqe)) {
 395                        swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 396                        swsqe->cqe = *hw_cqe;
 397                        swsqe->complete = 1;
 398                        flush_completed_wrs(&qhp->wq, &chp->cq);
 399                } else {
 400                        swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
 401                        *swcqe = *hw_cqe;
 402                        swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
 403                        t4_swcq_produce(&chp->cq);
 404                }
 405next_cqe:
 406                t4_hwcq_consume(&chp->cq);
 407                ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 408        }
 409}
 410
 411static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 412{
 413        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
 414                return 0;
 415
 416        if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
 417                return 0;
 418
 419        if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
 420                return 0;
 421
 422        if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
 423                return 0;
 424        return 1;
 425}
 426
 427void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 428{
 429        struct t4_cqe *cqe;
 430        u32 ptr;
 431
 432        *count = 0;
 433        pr_debug("%s count zero %d\n", __func__, *count);
 434        ptr = cq->sw_cidx;
 435        while (ptr != cq->sw_pidx) {
 436                cqe = &cq->sw_queue[ptr];
 437                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
 438                    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
 439                        (*count)++;
 440                if (++ptr == cq->size)
 441                        ptr = 0;
 442        }
 443        pr_debug("%s cq %p count %d\n", __func__, cq, *count);
 444}
 445
 446/*
 447 * poll_cq
 448 *
 449 * Caller must:
 450 *     check the validity of the first CQE,
 451 *     supply the wq assicated with the qpid.
 452 *
 453 * credit: cq credit to return to sge.
 454 * cqe_flushed: 1 iff the CQE is flushed.
 455 * cqe: copy of the polled CQE.
 456 *
 457 * return value:
 458 *    0             CQE returned ok.
 459 *    -EAGAIN       CQE skipped, try again.
 460 *    -EOVERFLOW    CQ overflow detected.
 461 */
 462static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 463                   u8 *cqe_flushed, u64 *cookie, u32 *credit)
 464{
 465        int ret = 0;
 466        struct t4_cqe *hw_cqe, read_cqe;
 467
 468        *cqe_flushed = 0;
 469        *credit = 0;
 470        ret = t4_next_cqe(cq, &hw_cqe);
 471        if (ret)
 472                return ret;
 473
 474        pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
 475                 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
 476                 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
 477                 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
 478                 CQE_WRID_LOW(hw_cqe));
 479
 480        /*
 481         * skip cqe's not affiliated with a QP.
 482         */
 483        if (wq == NULL) {
 484                ret = -EAGAIN;
 485                goto skip_cqe;
 486        }
 487
 488        /*
 489        * skip hw cqe's if the wq is flushed.
 490        */
 491        if (wq->flushed && !SW_CQE(hw_cqe)) {
 492                ret = -EAGAIN;
 493                goto skip_cqe;
 494        }
 495
 496        /*
 497         * skip TERMINATE cqes...
 498         */
 499        if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
 500                ret = -EAGAIN;
 501                goto skip_cqe;
 502        }
 503
 504        /*
 505         * Special cqe for drain WR completions...
 506         */
 507        if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
 508                *cookie = CQE_DRAIN_COOKIE(hw_cqe);
 509                *cqe = *hw_cqe;
 510                goto skip_cqe;
 511        }
 512
 513        /*
 514         * Gotta tweak READ completions:
 515         *      1) the cqe doesn't contain the sq_wptr from the wr.
 516         *      2) opcode not reflected from the wr.
 517         *      3) read_len not reflected from the wr.
 518         *      4) cq_type is RQ_TYPE not SQ_TYPE.
 519         */
 520        if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
 521
 522                /* If we have reached here because of async
 523                 * event or other error, and have egress error
 524                 * then drop
 525                 */
 526                if (CQE_TYPE(hw_cqe) == 1) {
 527                        if (CQE_STATUS(hw_cqe))
 528                                t4_set_wq_in_error(wq);
 529                        ret = -EAGAIN;
 530                        goto skip_cqe;
 531                }
 532
 533                /* If this is an unsolicited read response, then the read
 534                 * was generated by the kernel driver as part of peer-2-peer
 535                 * connection setup.  So ignore the completion.
 536                 */
 537                if (CQE_WRID_STAG(hw_cqe) == 1) {
 538                        if (CQE_STATUS(hw_cqe))
 539                                t4_set_wq_in_error(wq);
 540                        ret = -EAGAIN;
 541                        goto skip_cqe;
 542                }
 543
 544                /*
 545                 * Eat completions for unsignaled read WRs.
 546                 */
 547                if (!wq->sq.oldest_read->signaled) {
 548                        advance_oldest_read(wq);
 549                        ret = -EAGAIN;
 550                        goto skip_cqe;
 551                }
 552
 553                /*
 554                 * Don't write to the HWCQ, so create a new read req CQE
 555                 * in local memory.
 556                 */
 557                create_read_req_cqe(wq, hw_cqe, &read_cqe);
 558                hw_cqe = &read_cqe;
 559                advance_oldest_read(wq);
 560        }
 561
 562        if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
 563                *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
 564                t4_set_wq_in_error(wq);
 565        }
 566
 567        /*
 568         * RECV completion.
 569         */
 570        if (RQ_TYPE(hw_cqe)) {
 571
 572                /*
 573                 * HW only validates 4 bits of MSN.  So we must validate that
 574                 * the MSN in the SEND is the next expected MSN.  If its not,
 575                 * then we complete this with T4_ERR_MSN and mark the wq in
 576                 * error.
 577                 */
 578
 579                if (t4_rq_empty(wq)) {
 580                        t4_set_wq_in_error(wq);
 581                        ret = -EAGAIN;
 582                        goto skip_cqe;
 583                }
 584                if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
 585                        t4_set_wq_in_error(wq);
 586                        hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
 587                        goto proc_cqe;
 588                }
 589                goto proc_cqe;
 590        }
 591
 592        /*
 593         * If we get here its a send completion.
 594         *
 595         * Handle out of order completion. These get stuffed
 596         * in the SW SQ. Then the SW SQ is walked to move any
 597         * now in-order completions into the SW CQ.  This handles
 598         * 2 cases:
 599         *      1) reaping unsignaled WRs when the first subsequent
 600         *         signaled WR is completed.
 601         *      2) out of order read completions.
 602         */
 603        if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
 604                struct t4_swsqe *swsqe;
 605
 606                pr_debug("%s out of order completion going in sw_sq at idx %u\n",
 607                         __func__, CQE_WRID_SQ_IDX(hw_cqe));
 608                swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 609                swsqe->cqe = *hw_cqe;
 610                swsqe->complete = 1;
 611                ret = -EAGAIN;
 612                goto flush_wq;
 613        }
 614
 615proc_cqe:
 616        *cqe = *hw_cqe;
 617
 618        /*
 619         * Reap the associated WR(s) that are freed up with this
 620         * completion.
 621         */
 622        if (SQ_TYPE(hw_cqe)) {
 623                int idx = CQE_WRID_SQ_IDX(hw_cqe);
 624                BUG_ON(idx >= wq->sq.size);
 625
 626                /*
 627                * Account for any unsignaled completions completed by
 628                * this signaled completion.  In this case, cidx points
 629                * to the first unsignaled one, and idx points to the
 630                * signaled one.  So adjust in_use based on this delta.
 631                * if this is not completing any unsigned wrs, then the
 632                * delta will be 0. Handle wrapping also!
 633                */
 634                if (idx < wq->sq.cidx)
 635                        wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
 636                else
 637                        wq->sq.in_use -= idx - wq->sq.cidx;
 638                BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
 639
 640                wq->sq.cidx = (uint16_t)idx;
 641                pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
 642                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
 643                if (c4iw_wr_log)
 644                        c4iw_log_wr_stats(wq, hw_cqe);
 645                t4_sq_consume(wq);
 646        } else {
 647                pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
 648                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
 649                BUG_ON(t4_rq_empty(wq));
 650                if (c4iw_wr_log)
 651                        c4iw_log_wr_stats(wq, hw_cqe);
 652                t4_rq_consume(wq);
 653                goto skip_cqe;
 654        }
 655
 656flush_wq:
 657        /*
 658         * Flush any completed cqes that are now in-order.
 659         */
 660        flush_completed_wrs(wq, cq);
 661
 662skip_cqe:
 663        if (SW_CQE(hw_cqe)) {
 664                pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
 665                         __func__, cq, cq->cqid, cq->sw_cidx);
 666                t4_swcq_consume(cq);
 667        } else {
 668                pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
 669                         __func__, cq, cq->cqid, cq->cidx);
 670                t4_hwcq_consume(cq);
 671        }
 672        return ret;
 673}
 674
 675/*
 676 * Get one cq entry from c4iw and map it to openib.
 677 *
 678 * Returns:
 679 *      0                       cqe returned
 680 *      -ENODATA                EMPTY;
 681 *      -EAGAIN                 caller must try again
 682 *      any other -errno        fatal error
 683 */
 684static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 685{
 686        struct c4iw_qp *qhp = NULL;
 687        struct t4_cqe uninitialized_var(cqe), *rd_cqe;
 688        struct t4_wq *wq;
 689        u32 credit = 0;
 690        u8 cqe_flushed;
 691        u64 cookie = 0;
 692        int ret;
 693
 694        ret = t4_next_cqe(&chp->cq, &rd_cqe);
 695
 696        if (ret)
 697                return ret;
 698
 699        qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
 700        if (!qhp)
 701                wq = NULL;
 702        else {
 703                spin_lock(&qhp->lock);
 704                wq = &(qhp->wq);
 705        }
 706        ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
 707        if (ret)
 708                goto out;
 709
 710        wc->wr_id = cookie;
 711        wc->qp = &qhp->ibqp;
 712        wc->vendor_err = CQE_STATUS(&cqe);
 713        wc->wc_flags = 0;
 714
 715        pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
 716                 __func__, CQE_QPID(&cqe),
 717                 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
 718                 CQE_STATUS(&cqe), CQE_LEN(&cqe),
 719                 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
 720                 (unsigned long long)cookie);
 721
 722        if (CQE_TYPE(&cqe) == 0) {
 723                if (!CQE_STATUS(&cqe))
 724                        wc->byte_len = CQE_LEN(&cqe);
 725                else
 726                        wc->byte_len = 0;
 727                wc->opcode = IB_WC_RECV;
 728                if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
 729                    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
 730                        wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
 731                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 732                        c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
 733                }
 734        } else {
 735                switch (CQE_OPCODE(&cqe)) {
 736                case FW_RI_RDMA_WRITE:
 737                        wc->opcode = IB_WC_RDMA_WRITE;
 738                        break;
 739                case FW_RI_READ_REQ:
 740                        wc->opcode = IB_WC_RDMA_READ;
 741                        wc->byte_len = CQE_LEN(&cqe);
 742                        break;
 743                case FW_RI_SEND_WITH_INV:
 744                case FW_RI_SEND_WITH_SE_INV:
 745                        wc->opcode = IB_WC_SEND;
 746                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 747                        break;
 748                case FW_RI_SEND:
 749                case FW_RI_SEND_WITH_SE:
 750                        wc->opcode = IB_WC_SEND;
 751                        break;
 752
 753                case FW_RI_LOCAL_INV:
 754                        wc->opcode = IB_WC_LOCAL_INV;
 755                        break;
 756                case FW_RI_FAST_REGISTER:
 757                        wc->opcode = IB_WC_REG_MR;
 758
 759                        /* Invalidate the MR if the fastreg failed */
 760                        if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
 761                                c4iw_invalidate_mr(qhp->rhp,
 762                                                   CQE_WRID_FR_STAG(&cqe));
 763                        break;
 764                case C4IW_DRAIN_OPCODE:
 765                        wc->opcode = IB_WC_SEND;
 766                        break;
 767                default:
 768                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
 769                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
 770                        ret = -EINVAL;
 771                        goto out;
 772                }
 773        }
 774
 775        if (cqe_flushed)
 776                wc->status = IB_WC_WR_FLUSH_ERR;
 777        else {
 778
 779                switch (CQE_STATUS(&cqe)) {
 780                case T4_ERR_SUCCESS:
 781                        wc->status = IB_WC_SUCCESS;
 782                        break;
 783                case T4_ERR_STAG:
 784                        wc->status = IB_WC_LOC_ACCESS_ERR;
 785                        break;
 786                case T4_ERR_PDID:
 787                        wc->status = IB_WC_LOC_PROT_ERR;
 788                        break;
 789                case T4_ERR_QPID:
 790                case T4_ERR_ACCESS:
 791                        wc->status = IB_WC_LOC_ACCESS_ERR;
 792                        break;
 793                case T4_ERR_WRAP:
 794                        wc->status = IB_WC_GENERAL_ERR;
 795                        break;
 796                case T4_ERR_BOUND:
 797                        wc->status = IB_WC_LOC_LEN_ERR;
 798                        break;
 799                case T4_ERR_INVALIDATE_SHARED_MR:
 800                case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 801                        wc->status = IB_WC_MW_BIND_ERR;
 802                        break;
 803                case T4_ERR_CRC:
 804                case T4_ERR_MARKER:
 805                case T4_ERR_PDU_LEN_ERR:
 806                case T4_ERR_OUT_OF_RQE:
 807                case T4_ERR_DDP_VERSION:
 808                case T4_ERR_RDMA_VERSION:
 809                case T4_ERR_DDP_QUEUE_NUM:
 810                case T4_ERR_MSN:
 811                case T4_ERR_TBIT:
 812                case T4_ERR_MO:
 813                case T4_ERR_MSN_RANGE:
 814                case T4_ERR_IRD_OVERFLOW:
 815                case T4_ERR_OPCODE:
 816                case T4_ERR_INTERNAL_ERR:
 817                        wc->status = IB_WC_FATAL_ERR;
 818                        break;
 819                case T4_ERR_SWFLUSH:
 820                        wc->status = IB_WC_WR_FLUSH_ERR;
 821                        break;
 822                default:
 823                        pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
 824                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
 825                        wc->status = IB_WC_FATAL_ERR;
 826                }
 827        }
 828out:
 829        if (wq)
 830                spin_unlock(&qhp->lock);
 831        return ret;
 832}
 833
 834int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 835{
 836        struct c4iw_cq *chp;
 837        unsigned long flags;
 838        int npolled;
 839        int err = 0;
 840
 841        chp = to_c4iw_cq(ibcq);
 842
 843        spin_lock_irqsave(&chp->lock, flags);
 844        for (npolled = 0; npolled < num_entries; ++npolled) {
 845                do {
 846                        err = c4iw_poll_cq_one(chp, wc + npolled);
 847                } while (err == -EAGAIN);
 848                if (err)
 849                        break;
 850        }
 851        spin_unlock_irqrestore(&chp->lock, flags);
 852        return !err || err == -ENODATA ? npolled : err;
 853}
 854
 855int c4iw_destroy_cq(struct ib_cq *ib_cq)
 856{
 857        struct c4iw_cq *chp;
 858        struct c4iw_ucontext *ucontext;
 859
 860        pr_debug("%s ib_cq %p\n", __func__, ib_cq);
 861        chp = to_c4iw_cq(ib_cq);
 862
 863        remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
 864        atomic_dec(&chp->refcnt);
 865        wait_event(chp->wait, !atomic_read(&chp->refcnt));
 866
 867        ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
 868                                  : NULL;
 869        destroy_cq(&chp->rhp->rdev, &chp->cq,
 870                   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
 871                   chp->destroy_skb);
 872        chp->destroy_skb = NULL;
 873        kfree(chp);
 874        return 0;
 875}
 876
 877struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 878                             const struct ib_cq_init_attr *attr,
 879                             struct ib_ucontext *ib_context,
 880                             struct ib_udata *udata)
 881{
 882        int entries = attr->cqe;
 883        int vector = attr->comp_vector;
 884        struct c4iw_dev *rhp;
 885        struct c4iw_cq *chp;
 886        struct c4iw_create_cq_resp uresp;
 887        struct c4iw_ucontext *ucontext = NULL;
 888        int ret, wr_len;
 889        size_t memsize, hwentries;
 890        struct c4iw_mm_entry *mm, *mm2;
 891
 892        pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
 893        if (attr->flags)
 894                return ERR_PTR(-EINVAL);
 895
 896        rhp = to_c4iw_dev(ibdev);
 897
 898        if (vector >= rhp->rdev.lldi.nciq)
 899                return ERR_PTR(-EINVAL);
 900
 901        chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 902        if (!chp)
 903                return ERR_PTR(-ENOMEM);
 904
 905        wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
 906        chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
 907        if (!chp->destroy_skb) {
 908                ret = -ENOMEM;
 909                goto err1;
 910        }
 911
 912        if (ib_context)
 913                ucontext = to_c4iw_ucontext(ib_context);
 914
 915        /* account for the status page. */
 916        entries++;
 917
 918        /* IQ needs one extra entry to differentiate full vs empty. */
 919        entries++;
 920
 921        /*
 922         * entries must be multiple of 16 for HW.
 923         */
 924        entries = roundup(entries, 16);
 925
 926        /*
 927         * Make actual HW queue 2x to avoid cdix_inc overflows.
 928         */
 929        hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
 930
 931        /*
 932         * Make HW queue at least 64 entries so GTS updates aren't too
 933         * frequent.
 934         */
 935        if (hwentries < 64)
 936                hwentries = 64;
 937
 938        memsize = hwentries * sizeof *chp->cq.queue;
 939
 940        /*
 941         * memsize must be a multiple of the page size if its a user cq.
 942         */
 943        if (ucontext)
 944                memsize = roundup(memsize, PAGE_SIZE);
 945        chp->cq.size = hwentries;
 946        chp->cq.memsize = memsize;
 947        chp->cq.vector = vector;
 948
 949        ret = create_cq(&rhp->rdev, &chp->cq,
 950                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 951        if (ret)
 952                goto err2;
 953
 954        chp->rhp = rhp;
 955        chp->cq.size--;                         /* status page */
 956        chp->ibcq.cqe = entries - 2;
 957        spin_lock_init(&chp->lock);
 958        spin_lock_init(&chp->comp_handler_lock);
 959        atomic_set(&chp->refcnt, 1);
 960        init_waitqueue_head(&chp->wait);
 961        ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
 962        if (ret)
 963                goto err3;
 964
 965        if (ucontext) {
 966                ret = -ENOMEM;
 967                mm = kmalloc(sizeof *mm, GFP_KERNEL);
 968                if (!mm)
 969                        goto err4;
 970                mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
 971                if (!mm2)
 972                        goto err5;
 973
 974                uresp.qid_mask = rhp->rdev.cqmask;
 975                uresp.cqid = chp->cq.cqid;
 976                uresp.size = chp->cq.size;
 977                uresp.memsize = chp->cq.memsize;
 978                spin_lock(&ucontext->mmap_lock);
 979                uresp.key = ucontext->key;
 980                ucontext->key += PAGE_SIZE;
 981                uresp.gts_key = ucontext->key;
 982                ucontext->key += PAGE_SIZE;
 983                spin_unlock(&ucontext->mmap_lock);
 984                ret = ib_copy_to_udata(udata, &uresp,
 985                                       sizeof(uresp) - sizeof(uresp.reserved));
 986                if (ret)
 987                        goto err6;
 988
 989                mm->key = uresp.key;
 990                mm->addr = virt_to_phys(chp->cq.queue);
 991                mm->len = chp->cq.memsize;
 992                insert_mmap(ucontext, mm);
 993
 994                mm2->key = uresp.gts_key;
 995                mm2->addr = chp->cq.bar2_pa;
 996                mm2->len = PAGE_SIZE;
 997                insert_mmap(ucontext, mm2);
 998        }
 999        pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
1000                 __func__, chp->cq.cqid, chp, chp->cq.size,
1001                 chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
1002        return &chp->ibcq;
1003err6:
1004        kfree(mm2);
1005err5:
1006        kfree(mm);
1007err4:
1008        remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1009err3:
1010        destroy_cq(&chp->rhp->rdev, &chp->cq,
1011                   ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1012                   chp->destroy_skb);
1013err2:
1014        kfree_skb(chp->destroy_skb);
1015err1:
1016        kfree(chp);
1017        return ERR_PTR(ret);
1018}
1019
1020int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1021{
1022        return -ENOSYS;
1023}
1024
1025int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1026{
1027        struct c4iw_cq *chp;
1028        int ret = 0;
1029        unsigned long flag;
1030
1031        chp = to_c4iw_cq(ibcq);
1032        spin_lock_irqsave(&chp->lock, flag);
1033        t4_arm_cq(&chp->cq,
1034                  (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1035        if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1036                ret = t4_cq_notempty(&chp->cq);
1037        spin_unlock_irqrestore(&chp->lock, flag);
1038        return ret;
1039}
1040