linux/drivers/infiniband/hw/cxgb4/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "iw_cxgb4.h"
  34
  35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  36                      struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
  37{
  38        struct fw_ri_res_wr *res_wr;
  39        struct fw_ri_res *res;
  40        int wr_len;
  41        struct c4iw_wr_wait wr_wait;
  42        int ret;
  43
  44        wr_len = sizeof *res_wr + sizeof *res;
  45        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  46
  47        res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  48        memset(res_wr, 0, wr_len);
  49        res_wr->op_nres = cpu_to_be32(
  50                        FW_WR_OP_V(FW_RI_RES_WR) |
  51                        FW_RI_RES_WR_NRES_V(1) |
  52                        FW_WR_COMPL_F);
  53        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  54        res_wr->cookie = (uintptr_t)&wr_wait;
  55        res = res_wr->res;
  56        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  57        res->u.cq.op = FW_RI_RES_OP_RESET;
  58        res->u.cq.iqid = cpu_to_be32(cq->cqid);
  59
  60        c4iw_init_wr_wait(&wr_wait);
  61        ret = c4iw_ofld_send(rdev, skb);
  62        if (!ret) {
  63                ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  64        }
  65
  66        kfree(cq->sw_queue);
  67        dma_free_coherent(&(rdev->lldi.pdev->dev),
  68                          cq->memsize, cq->queue,
  69                          dma_unmap_addr(cq, mapping));
  70        c4iw_put_cqid(rdev, cq->cqid, uctx);
  71        return ret;
  72}
  73
  74static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  75                     struct c4iw_dev_ucontext *uctx)
  76{
  77        struct fw_ri_res_wr *res_wr;
  78        struct fw_ri_res *res;
  79        int wr_len;
  80        int user = (uctx != &rdev->uctx);
  81        struct c4iw_wr_wait wr_wait;
  82        int ret;
  83        struct sk_buff *skb;
  84
  85        cq->cqid = c4iw_get_cqid(rdev, uctx);
  86        if (!cq->cqid) {
  87                ret = -ENOMEM;
  88                goto err1;
  89        }
  90
  91        if (!user) {
  92                cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  93                if (!cq->sw_queue) {
  94                        ret = -ENOMEM;
  95                        goto err2;
  96                }
  97        }
  98        cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
  99                                       &cq->dma_addr, GFP_KERNEL);
 100        if (!cq->queue) {
 101                ret = -ENOMEM;
 102                goto err3;
 103        }
 104        dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 105        memset(cq->queue, 0, cq->memsize);
 106
 107        /* build fw_ri_res_wr */
 108        wr_len = sizeof *res_wr + sizeof *res;
 109
 110        skb = alloc_skb(wr_len, GFP_KERNEL);
 111        if (!skb) {
 112                ret = -ENOMEM;
 113                goto err4;
 114        }
 115        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 116
 117        res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
 118        memset(res_wr, 0, wr_len);
 119        res_wr->op_nres = cpu_to_be32(
 120                        FW_WR_OP_V(FW_RI_RES_WR) |
 121                        FW_RI_RES_WR_NRES_V(1) |
 122                        FW_WR_COMPL_F);
 123        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 124        res_wr->cookie = (uintptr_t)&wr_wait;
 125        res = res_wr->res;
 126        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 127        res->u.cq.op = FW_RI_RES_OP_WRITE;
 128        res->u.cq.iqid = cpu_to_be32(cq->cqid);
 129        res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
 130                        FW_RI_RES_WR_IQANUS_V(0) |
 131                        FW_RI_RES_WR_IQANUD_V(1) |
 132                        FW_RI_RES_WR_IQANDST_F |
 133                        FW_RI_RES_WR_IQANDSTINDEX_V(
 134                                rdev->lldi.ciq_ids[cq->vector]));
 135        res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
 136                        FW_RI_RES_WR_IQDROPRSS_F |
 137                        FW_RI_RES_WR_IQPCIECH_V(2) |
 138                        FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
 139                        FW_RI_RES_WR_IQO_F |
 140                        FW_RI_RES_WR_IQESIZE_V(1));
 141        res->u.cq.iqsize = cpu_to_be16(cq->size);
 142        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 143
 144        c4iw_init_wr_wait(&wr_wait);
 145
 146        ret = c4iw_ofld_send(rdev, skb);
 147        if (ret)
 148                goto err4;
 149        pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
 150        ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
 151        if (ret)
 152                goto err4;
 153
 154        cq->gen = 1;
 155        cq->gts = rdev->lldi.gts_reg;
 156        cq->rdev = rdev;
 157
 158        cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
 159                                      &cq->bar2_qid,
 160                                      user ? &cq->bar2_pa : NULL);
 161        if (user && !cq->bar2_pa) {
 162                pr_warn("%s: cqid %u not in BAR2 range\n",
 163                        pci_name(rdev->lldi.pdev), cq->cqid);
 164                ret = -EINVAL;
 165                goto err4;
 166        }
 167        return 0;
 168err4:
 169        dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
 170                          dma_unmap_addr(cq, mapping));
 171err3:
 172        kfree(cq->sw_queue);
 173err2:
 174        c4iw_put_cqid(rdev, cq->cqid, uctx);
 175err1:
 176        return ret;
 177}
 178
 179static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
 180{
 181        struct t4_cqe cqe;
 182
 183        pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 184                 wq, cq, cq->sw_cidx, cq->sw_pidx);
 185        memset(&cqe, 0, sizeof(cqe));
 186        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 187                                 CQE_OPCODE_V(FW_RI_SEND) |
 188                                 CQE_TYPE_V(0) |
 189                                 CQE_SWCQE_V(1) |
 190                                 CQE_QPID_V(wq->sq.qid));
 191        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 192        cq->sw_queue[cq->sw_pidx] = cqe;
 193        t4_swcq_produce(cq);
 194}
 195
 196int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
 197{
 198        int flushed = 0;
 199        int in_use = wq->rq.in_use - count;
 200
 201        BUG_ON(in_use < 0);
 202        pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
 203                 wq, cq, wq->rq.in_use, count);
 204        while (in_use--) {
 205                insert_recv_cqe(wq, cq);
 206                flushed++;
 207        }
 208        return flushed;
 209}
 210
 211static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
 212                          struct t4_swsqe *swcqe)
 213{
 214        struct t4_cqe cqe;
 215
 216        pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 217                 wq, cq, cq->sw_cidx, cq->sw_pidx);
 218        memset(&cqe, 0, sizeof(cqe));
 219        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 220                                 CQE_OPCODE_V(swcqe->opcode) |
 221                                 CQE_TYPE_V(1) |
 222                                 CQE_SWCQE_V(1) |
 223                                 CQE_QPID_V(wq->sq.qid));
 224        CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
 225        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 226        cq->sw_queue[cq->sw_pidx] = cqe;
 227        t4_swcq_produce(cq);
 228}
 229
 230static void advance_oldest_read(struct t4_wq *wq);
 231
 232int c4iw_flush_sq(struct c4iw_qp *qhp)
 233{
 234        int flushed = 0;
 235        struct t4_wq *wq = &qhp->wq;
 236        struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
 237        struct t4_cq *cq = &chp->cq;
 238        int idx;
 239        struct t4_swsqe *swsqe;
 240
 241        if (wq->sq.flush_cidx == -1)
 242                wq->sq.flush_cidx = wq->sq.cidx;
 243        idx = wq->sq.flush_cidx;
 244        BUG_ON(idx >= wq->sq.size);
 245        while (idx != wq->sq.pidx) {
 246                swsqe = &wq->sq.sw_sq[idx];
 247                BUG_ON(swsqe->flushed);
 248                swsqe->flushed = 1;
 249                insert_sq_cqe(wq, cq, swsqe);
 250                if (wq->sq.oldest_read == swsqe) {
 251                        BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
 252                        advance_oldest_read(wq);
 253                }
 254                flushed++;
 255                if (++idx == wq->sq.size)
 256                        idx = 0;
 257        }
 258        wq->sq.flush_cidx += flushed;
 259        if (wq->sq.flush_cidx >= wq->sq.size)
 260                wq->sq.flush_cidx -= wq->sq.size;
 261        return flushed;
 262}
 263
 264static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
 265{
 266        struct t4_swsqe *swsqe;
 267        int cidx;
 268
 269        if (wq->sq.flush_cidx == -1)
 270                wq->sq.flush_cidx = wq->sq.cidx;
 271        cidx = wq->sq.flush_cidx;
 272        BUG_ON(cidx > wq->sq.size);
 273
 274        while (cidx != wq->sq.pidx) {
 275                swsqe = &wq->sq.sw_sq[cidx];
 276                if (!swsqe->signaled) {
 277                        if (++cidx == wq->sq.size)
 278                                cidx = 0;
 279                } else if (swsqe->complete) {
 280
 281                        BUG_ON(swsqe->flushed);
 282
 283                        /*
 284                         * Insert this completed cqe into the swcq.
 285                         */
 286                        pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
 287                                 __func__, cidx, cq->sw_pidx);
 288                        swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
 289                        cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
 290                        t4_swcq_produce(cq);
 291                        swsqe->flushed = 1;
 292                        if (++cidx == wq->sq.size)
 293                                cidx = 0;
 294                        wq->sq.flush_cidx = cidx;
 295                } else
 296                        break;
 297        }
 298}
 299
 300static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
 301                struct t4_cqe *read_cqe)
 302{
 303        read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
 304        read_cqe->len = htonl(wq->sq.oldest_read->read_len);
 305        read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
 306                        CQE_SWCQE_V(SW_CQE(hw_cqe)) |
 307                        CQE_OPCODE_V(FW_RI_READ_REQ) |
 308                        CQE_TYPE_V(1));
 309        read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
 310}
 311
 312static void advance_oldest_read(struct t4_wq *wq)
 313{
 314
 315        u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
 316
 317        if (rptr == wq->sq.size)
 318                rptr = 0;
 319        while (rptr != wq->sq.pidx) {
 320                wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
 321
 322                if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
 323                        return;
 324                if (++rptr == wq->sq.size)
 325                        rptr = 0;
 326        }
 327        wq->sq.oldest_read = NULL;
 328}
 329
 330/*
 331 * Move all CQEs from the HWCQ into the SWCQ.
 332 * Deal with out-of-order and/or completions that complete
 333 * prior unsignalled WRs.
 334 */
 335void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 336{
 337        struct t4_cqe *hw_cqe, *swcqe, read_cqe;
 338        struct c4iw_qp *qhp;
 339        struct t4_swsqe *swsqe;
 340        int ret;
 341
 342        pr_debug("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
 343        ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 344
 345        /*
 346         * This logic is similar to poll_cq(), but not quite the same
 347         * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
 348         * also do any translation magic that poll_cq() normally does.
 349         */
 350        while (!ret) {
 351                qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
 352
 353                /*
 354                 * drop CQEs with no associated QP
 355                 */
 356                if (qhp == NULL)
 357                        goto next_cqe;
 358
 359                if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
 360                        goto next_cqe;
 361
 362                if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
 363
 364                        /* If we have reached here because of async
 365                         * event or other error, and have egress error
 366                         * then drop
 367                         */
 368                        if (CQE_TYPE(hw_cqe) == 1)
 369                                goto next_cqe;
 370
 371                        /* drop peer2peer RTR reads.
 372                         */
 373                        if (CQE_WRID_STAG(hw_cqe) == 1)
 374                                goto next_cqe;
 375
 376                        /*
 377                         * Eat completions for unsignaled read WRs.
 378                         */
 379                        if (!qhp->wq.sq.oldest_read->signaled) {
 380                                advance_oldest_read(&qhp->wq);
 381                                goto next_cqe;
 382                        }
 383
 384                        /*
 385                         * Don't write to the HWCQ, create a new read req CQE
 386                         * in local memory and move it into the swcq.
 387                         */
 388                        create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
 389                        hw_cqe = &read_cqe;
 390                        advance_oldest_read(&qhp->wq);
 391                }
 392
 393                /* if its a SQ completion, then do the magic to move all the
 394                 * unsignaled and now in-order completions into the swcq.
 395                 */
 396                if (SQ_TYPE(hw_cqe)) {
 397                        swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 398                        swsqe->cqe = *hw_cqe;
 399                        swsqe->complete = 1;
 400                        flush_completed_wrs(&qhp->wq, &chp->cq);
 401                } else {
 402                        swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
 403                        *swcqe = *hw_cqe;
 404                        swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
 405                        t4_swcq_produce(&chp->cq);
 406                }
 407next_cqe:
 408                t4_hwcq_consume(&chp->cq);
 409                ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 410        }
 411}
 412
 413static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 414{
 415        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
 416                return 0;
 417
 418        if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
 419                return 0;
 420
 421        if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
 422                return 0;
 423
 424        if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
 425                return 0;
 426        return 1;
 427}
 428
 429void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 430{
 431        struct t4_cqe *cqe;
 432        u32 ptr;
 433
 434        *count = 0;
 435        pr_debug("%s count zero %d\n", __func__, *count);
 436        ptr = cq->sw_cidx;
 437        while (ptr != cq->sw_pidx) {
 438                cqe = &cq->sw_queue[ptr];
 439                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
 440                    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
 441                        (*count)++;
 442                if (++ptr == cq->size)
 443                        ptr = 0;
 444        }
 445        pr_debug("%s cq %p count %d\n", __func__, cq, *count);
 446}
 447
 448/*
 449 * poll_cq
 450 *
 451 * Caller must:
 452 *     check the validity of the first CQE,
 453 *     supply the wq assicated with the qpid.
 454 *
 455 * credit: cq credit to return to sge.
 456 * cqe_flushed: 1 iff the CQE is flushed.
 457 * cqe: copy of the polled CQE.
 458 *
 459 * return value:
 460 *    0             CQE returned ok.
 461 *    -EAGAIN       CQE skipped, try again.
 462 *    -EOVERFLOW    CQ overflow detected.
 463 */
 464static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 465                   u8 *cqe_flushed, u64 *cookie, u32 *credit)
 466{
 467        int ret = 0;
 468        struct t4_cqe *hw_cqe, read_cqe;
 469
 470        *cqe_flushed = 0;
 471        *credit = 0;
 472        ret = t4_next_cqe(cq, &hw_cqe);
 473        if (ret)
 474                return ret;
 475
 476        pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
 477                 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
 478                 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
 479                 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
 480                 CQE_WRID_LOW(hw_cqe));
 481
 482        /*
 483         * skip cqe's not affiliated with a QP.
 484         */
 485        if (wq == NULL) {
 486                ret = -EAGAIN;
 487                goto skip_cqe;
 488        }
 489
 490        /*
 491        * skip hw cqe's if the wq is flushed.
 492        */
 493        if (wq->flushed && !SW_CQE(hw_cqe)) {
 494                ret = -EAGAIN;
 495                goto skip_cqe;
 496        }
 497
 498        /*
 499         * skip TERMINATE cqes...
 500         */
 501        if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
 502                ret = -EAGAIN;
 503                goto skip_cqe;
 504        }
 505
 506        /*
 507         * Special cqe for drain WR completions...
 508         */
 509        if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
 510                *cookie = CQE_DRAIN_COOKIE(hw_cqe);
 511                *cqe = *hw_cqe;
 512                goto skip_cqe;
 513        }
 514
 515        /*
 516         * Gotta tweak READ completions:
 517         *      1) the cqe doesn't contain the sq_wptr from the wr.
 518         *      2) opcode not reflected from the wr.
 519         *      3) read_len not reflected from the wr.
 520         *      4) cq_type is RQ_TYPE not SQ_TYPE.
 521         */
 522        if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
 523
 524                /* If we have reached here because of async
 525                 * event or other error, and have egress error
 526                 * then drop
 527                 */
 528                if (CQE_TYPE(hw_cqe) == 1) {
 529                        if (CQE_STATUS(hw_cqe))
 530                                t4_set_wq_in_error(wq);
 531                        ret = -EAGAIN;
 532                        goto skip_cqe;
 533                }
 534
 535                /* If this is an unsolicited read response, then the read
 536                 * was generated by the kernel driver as part of peer-2-peer
 537                 * connection setup.  So ignore the completion.
 538                 */
 539                if (CQE_WRID_STAG(hw_cqe) == 1) {
 540                        if (CQE_STATUS(hw_cqe))
 541                                t4_set_wq_in_error(wq);
 542                        ret = -EAGAIN;
 543                        goto skip_cqe;
 544                }
 545
 546                /*
 547                 * Eat completions for unsignaled read WRs.
 548                 */
 549                if (!wq->sq.oldest_read->signaled) {
 550                        advance_oldest_read(wq);
 551                        ret = -EAGAIN;
 552                        goto skip_cqe;
 553                }
 554
 555                /*
 556                 * Don't write to the HWCQ, so create a new read req CQE
 557                 * in local memory.
 558                 */
 559                create_read_req_cqe(wq, hw_cqe, &read_cqe);
 560                hw_cqe = &read_cqe;
 561                advance_oldest_read(wq);
 562        }
 563
 564        if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
 565                *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
 566                t4_set_wq_in_error(wq);
 567        }
 568
 569        /*
 570         * RECV completion.
 571         */
 572        if (RQ_TYPE(hw_cqe)) {
 573
 574                /*
 575                 * HW only validates 4 bits of MSN.  So we must validate that
 576                 * the MSN in the SEND is the next expected MSN.  If its not,
 577                 * then we complete this with T4_ERR_MSN and mark the wq in
 578                 * error.
 579                 */
 580
 581                if (t4_rq_empty(wq)) {
 582                        t4_set_wq_in_error(wq);
 583                        ret = -EAGAIN;
 584                        goto skip_cqe;
 585                }
 586                if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
 587                        t4_set_wq_in_error(wq);
 588                        hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
 589                        goto proc_cqe;
 590                }
 591                goto proc_cqe;
 592        }
 593
 594        /*
 595         * If we get here its a send completion.
 596         *
 597         * Handle out of order completion. These get stuffed
 598         * in the SW SQ. Then the SW SQ is walked to move any
 599         * now in-order completions into the SW CQ.  This handles
 600         * 2 cases:
 601         *      1) reaping unsignaled WRs when the first subsequent
 602         *         signaled WR is completed.
 603         *      2) out of order read completions.
 604         */
 605        if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
 606                struct t4_swsqe *swsqe;
 607
 608                pr_debug("%s out of order completion going in sw_sq at idx %u\n",
 609                         __func__, CQE_WRID_SQ_IDX(hw_cqe));
 610                swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 611                swsqe->cqe = *hw_cqe;
 612                swsqe->complete = 1;
 613                ret = -EAGAIN;
 614                goto flush_wq;
 615        }
 616
 617proc_cqe:
 618        *cqe = *hw_cqe;
 619
 620        /*
 621         * Reap the associated WR(s) that are freed up with this
 622         * completion.
 623         */
 624        if (SQ_TYPE(hw_cqe)) {
 625                int idx = CQE_WRID_SQ_IDX(hw_cqe);
 626                BUG_ON(idx >= wq->sq.size);
 627
 628                /*
 629                * Account for any unsignaled completions completed by
 630                * this signaled completion.  In this case, cidx points
 631                * to the first unsignaled one, and idx points to the
 632                * signaled one.  So adjust in_use based on this delta.
 633                * if this is not completing any unsigned wrs, then the
 634                * delta will be 0. Handle wrapping also!
 635                */
 636                if (idx < wq->sq.cidx)
 637                        wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
 638                else
 639                        wq->sq.in_use -= idx - wq->sq.cidx;
 640                BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
 641
 642                wq->sq.cidx = (uint16_t)idx;
 643                pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
 644                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
 645                if (c4iw_wr_log)
 646                        c4iw_log_wr_stats(wq, hw_cqe);
 647                t4_sq_consume(wq);
 648        } else {
 649                pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
 650                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
 651                BUG_ON(t4_rq_empty(wq));
 652                if (c4iw_wr_log)
 653                        c4iw_log_wr_stats(wq, hw_cqe);
 654                t4_rq_consume(wq);
 655                goto skip_cqe;
 656        }
 657
 658flush_wq:
 659        /*
 660         * Flush any completed cqes that are now in-order.
 661         */
 662        flush_completed_wrs(wq, cq);
 663
 664skip_cqe:
 665        if (SW_CQE(hw_cqe)) {
 666                pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
 667                         __func__, cq, cq->cqid, cq->sw_cidx);
 668                t4_swcq_consume(cq);
 669        } else {
 670                pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
 671                         __func__, cq, cq->cqid, cq->cidx);
 672                t4_hwcq_consume(cq);
 673        }
 674        return ret;
 675}
 676
 677/*
 678 * Get one cq entry from c4iw and map it to openib.
 679 *
 680 * Returns:
 681 *      0                       cqe returned
 682 *      -ENODATA                EMPTY;
 683 *      -EAGAIN                 caller must try again
 684 *      any other -errno        fatal error
 685 */
 686static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 687{
 688        struct c4iw_qp *qhp = NULL;
 689        struct t4_cqe uninitialized_var(cqe), *rd_cqe;
 690        struct t4_wq *wq;
 691        u32 credit = 0;
 692        u8 cqe_flushed;
 693        u64 cookie = 0;
 694        int ret;
 695
 696        ret = t4_next_cqe(&chp->cq, &rd_cqe);
 697
 698        if (ret)
 699                return ret;
 700
 701        qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
 702        if (!qhp)
 703                wq = NULL;
 704        else {
 705                spin_lock(&qhp->lock);
 706                wq = &(qhp->wq);
 707        }
 708        ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
 709        if (ret)
 710                goto out;
 711
 712        wc->wr_id = cookie;
 713        wc->qp = &qhp->ibqp;
 714        wc->vendor_err = CQE_STATUS(&cqe);
 715        wc->wc_flags = 0;
 716
 717        pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
 718                 __func__, CQE_QPID(&cqe),
 719                 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
 720                 CQE_STATUS(&cqe), CQE_LEN(&cqe),
 721                 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
 722                 (unsigned long long)cookie);
 723
 724        if (CQE_TYPE(&cqe) == 0) {
 725                if (!CQE_STATUS(&cqe))
 726                        wc->byte_len = CQE_LEN(&cqe);
 727                else
 728                        wc->byte_len = 0;
 729                wc->opcode = IB_WC_RECV;
 730                if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
 731                    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
 732                        wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
 733                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 734                        c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
 735                }
 736        } else {
 737                switch (CQE_OPCODE(&cqe)) {
 738                case FW_RI_RDMA_WRITE:
 739                        wc->opcode = IB_WC_RDMA_WRITE;
 740                        break;
 741                case FW_RI_READ_REQ:
 742                        wc->opcode = IB_WC_RDMA_READ;
 743                        wc->byte_len = CQE_LEN(&cqe);
 744                        break;
 745                case FW_RI_SEND_WITH_INV:
 746                case FW_RI_SEND_WITH_SE_INV:
 747                        wc->opcode = IB_WC_SEND;
 748                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 749                        break;
 750                case FW_RI_SEND:
 751                case FW_RI_SEND_WITH_SE:
 752                        wc->opcode = IB_WC_SEND;
 753                        break;
 754
 755                case FW_RI_LOCAL_INV:
 756                        wc->opcode = IB_WC_LOCAL_INV;
 757                        break;
 758                case FW_RI_FAST_REGISTER:
 759                        wc->opcode = IB_WC_REG_MR;
 760
 761                        /* Invalidate the MR if the fastreg failed */
 762                        if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
 763                                c4iw_invalidate_mr(qhp->rhp,
 764                                                   CQE_WRID_FR_STAG(&cqe));
 765                        break;
 766                case C4IW_DRAIN_OPCODE:
 767                        wc->opcode = IB_WC_SEND;
 768                        break;
 769                default:
 770                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
 771                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
 772                        ret = -EINVAL;
 773                        goto out;
 774                }
 775        }
 776
 777        if (cqe_flushed)
 778                wc->status = IB_WC_WR_FLUSH_ERR;
 779        else {
 780
 781                switch (CQE_STATUS(&cqe)) {
 782                case T4_ERR_SUCCESS:
 783                        wc->status = IB_WC_SUCCESS;
 784                        break;
 785                case T4_ERR_STAG:
 786                        wc->status = IB_WC_LOC_ACCESS_ERR;
 787                        break;
 788                case T4_ERR_PDID:
 789                        wc->status = IB_WC_LOC_PROT_ERR;
 790                        break;
 791                case T4_ERR_QPID:
 792                case T4_ERR_ACCESS:
 793                        wc->status = IB_WC_LOC_ACCESS_ERR;
 794                        break;
 795                case T4_ERR_WRAP:
 796                        wc->status = IB_WC_GENERAL_ERR;
 797                        break;
 798                case T4_ERR_BOUND:
 799                        wc->status = IB_WC_LOC_LEN_ERR;
 800                        break;
 801                case T4_ERR_INVALIDATE_SHARED_MR:
 802                case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 803                        wc->status = IB_WC_MW_BIND_ERR;
 804                        break;
 805                case T4_ERR_CRC:
 806                case T4_ERR_MARKER:
 807                case T4_ERR_PDU_LEN_ERR:
 808                case T4_ERR_OUT_OF_RQE:
 809                case T4_ERR_DDP_VERSION:
 810                case T4_ERR_RDMA_VERSION:
 811                case T4_ERR_DDP_QUEUE_NUM:
 812                case T4_ERR_MSN:
 813                case T4_ERR_TBIT:
 814                case T4_ERR_MO:
 815                case T4_ERR_MSN_RANGE:
 816                case T4_ERR_IRD_OVERFLOW:
 817                case T4_ERR_OPCODE:
 818                case T4_ERR_INTERNAL_ERR:
 819                        wc->status = IB_WC_FATAL_ERR;
 820                        break;
 821                case T4_ERR_SWFLUSH:
 822                        wc->status = IB_WC_WR_FLUSH_ERR;
 823                        break;
 824                default:
 825                        pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
 826                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
 827                        wc->status = IB_WC_FATAL_ERR;
 828                }
 829        }
 830out:
 831        if (wq)
 832                spin_unlock(&qhp->lock);
 833        return ret;
 834}
 835
 836int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 837{
 838        struct c4iw_cq *chp;
 839        unsigned long flags;
 840        int npolled;
 841        int err = 0;
 842
 843        chp = to_c4iw_cq(ibcq);
 844
 845        spin_lock_irqsave(&chp->lock, flags);
 846        for (npolled = 0; npolled < num_entries; ++npolled) {
 847                do {
 848                        err = c4iw_poll_cq_one(chp, wc + npolled);
 849                } while (err == -EAGAIN);
 850                if (err)
 851                        break;
 852        }
 853        spin_unlock_irqrestore(&chp->lock, flags);
 854        return !err || err == -ENODATA ? npolled : err;
 855}
 856
 857int c4iw_destroy_cq(struct ib_cq *ib_cq)
 858{
 859        struct c4iw_cq *chp;
 860        struct c4iw_ucontext *ucontext;
 861
 862        pr_debug("%s ib_cq %p\n", __func__, ib_cq);
 863        chp = to_c4iw_cq(ib_cq);
 864
 865        remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
 866        atomic_dec(&chp->refcnt);
 867        wait_event(chp->wait, !atomic_read(&chp->refcnt));
 868
 869        ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
 870                                  : NULL;
 871        destroy_cq(&chp->rhp->rdev, &chp->cq,
 872                   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
 873                   chp->destroy_skb);
 874        chp->destroy_skb = NULL;
 875        kfree(chp);
 876        return 0;
 877}
 878
 879struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 880                             const struct ib_cq_init_attr *attr,
 881                             struct ib_ucontext *ib_context,
 882                             struct ib_udata *udata)
 883{
 884        int entries = attr->cqe;
 885        int vector = attr->comp_vector;
 886        struct c4iw_dev *rhp;
 887        struct c4iw_cq *chp;
 888        struct c4iw_create_cq_resp uresp;
 889        struct c4iw_ucontext *ucontext = NULL;
 890        int ret, wr_len;
 891        size_t memsize, hwentries;
 892        struct c4iw_mm_entry *mm, *mm2;
 893
 894        pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
 895        if (attr->flags)
 896                return ERR_PTR(-EINVAL);
 897
 898        rhp = to_c4iw_dev(ibdev);
 899
 900        if (vector >= rhp->rdev.lldi.nciq)
 901                return ERR_PTR(-EINVAL);
 902
 903        chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 904        if (!chp)
 905                return ERR_PTR(-ENOMEM);
 906
 907        wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
 908        chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
 909        if (!chp->destroy_skb) {
 910                ret = -ENOMEM;
 911                goto err1;
 912        }
 913
 914        if (ib_context)
 915                ucontext = to_c4iw_ucontext(ib_context);
 916
 917        /* account for the status page. */
 918        entries++;
 919
 920        /* IQ needs one extra entry to differentiate full vs empty. */
 921        entries++;
 922
 923        /*
 924         * entries must be multiple of 16 for HW.
 925         */
 926        entries = roundup(entries, 16);
 927
 928        /*
 929         * Make actual HW queue 2x to avoid cdix_inc overflows.
 930         */
 931        hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
 932
 933        /*
 934         * Make HW queue at least 64 entries so GTS updates aren't too
 935         * frequent.
 936         */
 937        if (hwentries < 64)
 938                hwentries = 64;
 939
 940        memsize = hwentries * sizeof *chp->cq.queue;
 941
 942        /*
 943         * memsize must be a multiple of the page size if its a user cq.
 944         */
 945        if (ucontext)
 946                memsize = roundup(memsize, PAGE_SIZE);
 947        chp->cq.size = hwentries;
 948        chp->cq.memsize = memsize;
 949        chp->cq.vector = vector;
 950
 951        ret = create_cq(&rhp->rdev, &chp->cq,
 952                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 953        if (ret)
 954                goto err2;
 955
 956        chp->rhp = rhp;
 957        chp->cq.size--;                         /* status page */
 958        chp->ibcq.cqe = entries - 2;
 959        spin_lock_init(&chp->lock);
 960        spin_lock_init(&chp->comp_handler_lock);
 961        atomic_set(&chp->refcnt, 1);
 962        init_waitqueue_head(&chp->wait);
 963        ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
 964        if (ret)
 965                goto err3;
 966
 967        if (ucontext) {
 968                mm = kmalloc(sizeof *mm, GFP_KERNEL);
 969                if (!mm)
 970                        goto err4;
 971                mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
 972                if (!mm2)
 973                        goto err5;
 974
 975                uresp.qid_mask = rhp->rdev.cqmask;
 976                uresp.cqid = chp->cq.cqid;
 977                uresp.size = chp->cq.size;
 978                uresp.memsize = chp->cq.memsize;
 979                spin_lock(&ucontext->mmap_lock);
 980                uresp.key = ucontext->key;
 981                ucontext->key += PAGE_SIZE;
 982                uresp.gts_key = ucontext->key;
 983                ucontext->key += PAGE_SIZE;
 984                spin_unlock(&ucontext->mmap_lock);
 985                ret = ib_copy_to_udata(udata, &uresp,
 986                                       sizeof(uresp) - sizeof(uresp.reserved));
 987                if (ret)
 988                        goto err6;
 989
 990                mm->key = uresp.key;
 991                mm->addr = virt_to_phys(chp->cq.queue);
 992                mm->len = chp->cq.memsize;
 993                insert_mmap(ucontext, mm);
 994
 995                mm2->key = uresp.gts_key;
 996                mm2->addr = chp->cq.bar2_pa;
 997                mm2->len = PAGE_SIZE;
 998                insert_mmap(ucontext, mm2);
 999        }
1000        pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
1001                 __func__, chp->cq.cqid, chp, chp->cq.size,
1002                 chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
1003        return &chp->ibcq;
1004err6:
1005        kfree(mm2);
1006err5:
1007        kfree(mm);
1008err4:
1009        remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1010err3:
1011        destroy_cq(&chp->rhp->rdev, &chp->cq,
1012                   ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1013                   chp->destroy_skb);
1014err2:
1015        kfree_skb(chp->destroy_skb);
1016err1:
1017        kfree(chp);
1018        return ERR_PTR(ret);
1019}
1020
1021int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1022{
1023        return -ENOSYS;
1024}
1025
1026int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1027{
1028        struct c4iw_cq *chp;
1029        int ret = 0;
1030        unsigned long flag;
1031
1032        chp = to_c4iw_cq(ibcq);
1033        spin_lock_irqsave(&chp->lock, flag);
1034        t4_arm_cq(&chp->cq,
1035                  (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1036        if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1037                ret = t4_cq_notempty(&chp->cq);
1038        spin_unlock_irqrestore(&chp->lock, flag);
1039        return ret;
1040}
1041