linux/drivers/infiniband/hw/cxgb4/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "iw_cxgb4.h"
  34
  35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  36                      struct c4iw_dev_ucontext *uctx)
  37{
  38        struct fw_ri_res_wr *res_wr;
  39        struct fw_ri_res *res;
  40        int wr_len;
  41        struct c4iw_wr_wait wr_wait;
  42        struct sk_buff *skb;
  43        int ret;
  44
  45        wr_len = sizeof *res_wr + sizeof *res;
  46        skb = alloc_skb(wr_len, GFP_KERNEL);
  47        if (!skb)
  48                return -ENOMEM;
  49        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  50
  51        res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  52        memset(res_wr, 0, wr_len);
  53        res_wr->op_nres = cpu_to_be32(
  54                        FW_WR_OP_V(FW_RI_RES_WR) |
  55                        FW_RI_RES_WR_NRES_V(1) |
  56                        FW_WR_COMPL_F);
  57        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  58        res_wr->cookie = (uintptr_t)&wr_wait;
  59        res = res_wr->res;
  60        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  61        res->u.cq.op = FW_RI_RES_OP_RESET;
  62        res->u.cq.iqid = cpu_to_be32(cq->cqid);
  63
  64        c4iw_init_wr_wait(&wr_wait);
  65        ret = c4iw_ofld_send(rdev, skb);
  66        if (!ret) {
  67                ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
  68        }
  69
  70        kfree(cq->sw_queue);
  71        dma_free_coherent(&(rdev->lldi.pdev->dev),
  72                          cq->memsize, cq->queue,
  73                          dma_unmap_addr(cq, mapping));
  74        c4iw_put_cqid(rdev, cq->cqid, uctx);
  75        return ret;
  76}
  77
  78static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  79                     struct c4iw_dev_ucontext *uctx)
  80{
  81        struct fw_ri_res_wr *res_wr;
  82        struct fw_ri_res *res;
  83        int wr_len;
  84        int user = (uctx != &rdev->uctx);
  85        struct c4iw_wr_wait wr_wait;
  86        int ret;
  87        struct sk_buff *skb;
  88
  89        cq->cqid = c4iw_get_cqid(rdev, uctx);
  90        if (!cq->cqid) {
  91                ret = -ENOMEM;
  92                goto err1;
  93        }
  94
  95        if (!user) {
  96                cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  97                if (!cq->sw_queue) {
  98                        ret = -ENOMEM;
  99                        goto err2;
 100                }
 101        }
 102        cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
 103                                       &cq->dma_addr, GFP_KERNEL);
 104        if (!cq->queue) {
 105                ret = -ENOMEM;
 106                goto err3;
 107        }
 108        dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 109        memset(cq->queue, 0, cq->memsize);
 110
 111        /* build fw_ri_res_wr */
 112        wr_len = sizeof *res_wr + sizeof *res;
 113
 114        skb = alloc_skb(wr_len, GFP_KERNEL);
 115        if (!skb) {
 116                ret = -ENOMEM;
 117                goto err4;
 118        }
 119        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 120
 121        res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
 122        memset(res_wr, 0, wr_len);
 123        res_wr->op_nres = cpu_to_be32(
 124                        FW_WR_OP_V(FW_RI_RES_WR) |
 125                        FW_RI_RES_WR_NRES_V(1) |
 126                        FW_WR_COMPL_F);
 127        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
 128        res_wr->cookie = (uintptr_t)&wr_wait;
 129        res = res_wr->res;
 130        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 131        res->u.cq.op = FW_RI_RES_OP_WRITE;
 132        res->u.cq.iqid = cpu_to_be32(cq->cqid);
 133        res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
 134                        FW_RI_RES_WR_IQANUS_V(0) |
 135                        FW_RI_RES_WR_IQANUD_V(1) |
 136                        FW_RI_RES_WR_IQANDST_F |
 137                        FW_RI_RES_WR_IQANDSTINDEX_V(
 138                                rdev->lldi.ciq_ids[cq->vector]));
 139        res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
 140                        FW_RI_RES_WR_IQDROPRSS_F |
 141                        FW_RI_RES_WR_IQPCIECH_V(2) |
 142                        FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
 143                        FW_RI_RES_WR_IQO_F |
 144                        FW_RI_RES_WR_IQESIZE_V(1));
 145        res->u.cq.iqsize = cpu_to_be16(cq->size);
 146        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 147
 148        c4iw_init_wr_wait(&wr_wait);
 149
 150        ret = c4iw_ofld_send(rdev, skb);
 151        if (ret)
 152                goto err4;
 153        PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
 154        ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
 155        if (ret)
 156                goto err4;
 157
 158        cq->gen = 1;
 159        cq->gts = rdev->lldi.gts_reg;
 160        cq->rdev = rdev;
 161
 162        cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
 163                                      &cq->bar2_qid,
 164                                      user ? &cq->bar2_pa : NULL);
 165        if (user && !cq->bar2_pa) {
 166                pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
 167                        pci_name(rdev->lldi.pdev), cq->cqid);
 168                ret = -EINVAL;
 169                goto err4;
 170        }
 171        return 0;
 172err4:
 173        dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
 174                          dma_unmap_addr(cq, mapping));
 175err3:
 176        kfree(cq->sw_queue);
 177err2:
 178        c4iw_put_cqid(rdev, cq->cqid, uctx);
 179err1:
 180        return ret;
 181}
 182
 183static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
 184{
 185        struct t4_cqe cqe;
 186
 187        PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 188             wq, cq, cq->sw_cidx, cq->sw_pidx);
 189        memset(&cqe, 0, sizeof(cqe));
 190        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 191                                 CQE_OPCODE_V(FW_RI_SEND) |
 192                                 CQE_TYPE_V(0) |
 193                                 CQE_SWCQE_V(1) |
 194                                 CQE_QPID_V(wq->sq.qid));
 195        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 196        cq->sw_queue[cq->sw_pidx] = cqe;
 197        t4_swcq_produce(cq);
 198}
 199
 200int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
 201{
 202        int flushed = 0;
 203        int in_use = wq->rq.in_use - count;
 204
 205        BUG_ON(in_use < 0);
 206        PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
 207             wq, cq, wq->rq.in_use, count);
 208        while (in_use--) {
 209                insert_recv_cqe(wq, cq);
 210                flushed++;
 211        }
 212        return flushed;
 213}
 214
 215static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
 216                          struct t4_swsqe *swcqe)
 217{
 218        struct t4_cqe cqe;
 219
 220        PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
 221             wq, cq, cq->sw_cidx, cq->sw_pidx);
 222        memset(&cqe, 0, sizeof(cqe));
 223        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
 224                                 CQE_OPCODE_V(swcqe->opcode) |
 225                                 CQE_TYPE_V(1) |
 226                                 CQE_SWCQE_V(1) |
 227                                 CQE_QPID_V(wq->sq.qid));
 228        CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
 229        cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
 230        cq->sw_queue[cq->sw_pidx] = cqe;
 231        t4_swcq_produce(cq);
 232}
 233
 234static void advance_oldest_read(struct t4_wq *wq);
 235
 236int c4iw_flush_sq(struct c4iw_qp *qhp)
 237{
 238        int flushed = 0;
 239        struct t4_wq *wq = &qhp->wq;
 240        struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
 241        struct t4_cq *cq = &chp->cq;
 242        int idx;
 243        struct t4_swsqe *swsqe;
 244
 245        if (wq->sq.flush_cidx == -1)
 246                wq->sq.flush_cidx = wq->sq.cidx;
 247        idx = wq->sq.flush_cidx;
 248        BUG_ON(idx >= wq->sq.size);
 249        while (idx != wq->sq.pidx) {
 250                swsqe = &wq->sq.sw_sq[idx];
 251                BUG_ON(swsqe->flushed);
 252                swsqe->flushed = 1;
 253                insert_sq_cqe(wq, cq, swsqe);
 254                if (wq->sq.oldest_read == swsqe) {
 255                        BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
 256                        advance_oldest_read(wq);
 257                }
 258                flushed++;
 259                if (++idx == wq->sq.size)
 260                        idx = 0;
 261        }
 262        wq->sq.flush_cidx += flushed;
 263        if (wq->sq.flush_cidx >= wq->sq.size)
 264                wq->sq.flush_cidx -= wq->sq.size;
 265        return flushed;
 266}
 267
 268static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
 269{
 270        struct t4_swsqe *swsqe;
 271        int cidx;
 272
 273        if (wq->sq.flush_cidx == -1)
 274                wq->sq.flush_cidx = wq->sq.cidx;
 275        cidx = wq->sq.flush_cidx;
 276        BUG_ON(cidx > wq->sq.size);
 277
 278        while (cidx != wq->sq.pidx) {
 279                swsqe = &wq->sq.sw_sq[cidx];
 280                if (!swsqe->signaled) {
 281                        if (++cidx == wq->sq.size)
 282                                cidx = 0;
 283                } else if (swsqe->complete) {
 284
 285                        BUG_ON(swsqe->flushed);
 286
 287                        /*
 288                         * Insert this completed cqe into the swcq.
 289                         */
 290                        PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
 291                                        __func__, cidx, cq->sw_pidx);
 292                        swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
 293                        cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
 294                        t4_swcq_produce(cq);
 295                        swsqe->flushed = 1;
 296                        if (++cidx == wq->sq.size)
 297                                cidx = 0;
 298                        wq->sq.flush_cidx = cidx;
 299                } else
 300                        break;
 301        }
 302}
 303
 304static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
 305                struct t4_cqe *read_cqe)
 306{
 307        read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
 308        read_cqe->len = htonl(wq->sq.oldest_read->read_len);
 309        read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
 310                        CQE_SWCQE_V(SW_CQE(hw_cqe)) |
 311                        CQE_OPCODE_V(FW_RI_READ_REQ) |
 312                        CQE_TYPE_V(1));
 313        read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
 314}
 315
 316static void advance_oldest_read(struct t4_wq *wq)
 317{
 318
 319        u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
 320
 321        if (rptr == wq->sq.size)
 322                rptr = 0;
 323        while (rptr != wq->sq.pidx) {
 324                wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
 325
 326                if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
 327                        return;
 328                if (++rptr == wq->sq.size)
 329                        rptr = 0;
 330        }
 331        wq->sq.oldest_read = NULL;
 332}
 333
 334/*
 335 * Move all CQEs from the HWCQ into the SWCQ.
 336 * Deal with out-of-order and/or completions that complete
 337 * prior unsignalled WRs.
 338 */
 339void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 340{
 341        struct t4_cqe *hw_cqe, *swcqe, read_cqe;
 342        struct c4iw_qp *qhp;
 343        struct t4_swsqe *swsqe;
 344        int ret;
 345
 346        PDBG("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
 347        ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 348
 349        /*
 350         * This logic is similar to poll_cq(), but not quite the same
 351         * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
 352         * also do any translation magic that poll_cq() normally does.
 353         */
 354        while (!ret) {
 355                qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
 356
 357                /*
 358                 * drop CQEs with no associated QP
 359                 */
 360                if (qhp == NULL)
 361                        goto next_cqe;
 362
 363                if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
 364                        goto next_cqe;
 365
 366                if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
 367
 368                        /* If we have reached here because of async
 369                         * event or other error, and have egress error
 370                         * then drop
 371                         */
 372                        if (CQE_TYPE(hw_cqe) == 1)
 373                                goto next_cqe;
 374
 375                        /* drop peer2peer RTR reads.
 376                         */
 377                        if (CQE_WRID_STAG(hw_cqe) == 1)
 378                                goto next_cqe;
 379
 380                        /*
 381                         * Eat completions for unsignaled read WRs.
 382                         */
 383                        if (!qhp->wq.sq.oldest_read->signaled) {
 384                                advance_oldest_read(&qhp->wq);
 385                                goto next_cqe;
 386                        }
 387
 388                        /*
 389                         * Don't write to the HWCQ, create a new read req CQE
 390                         * in local memory and move it into the swcq.
 391                         */
 392                        create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
 393                        hw_cqe = &read_cqe;
 394                        advance_oldest_read(&qhp->wq);
 395                }
 396
 397                /* if its a SQ completion, then do the magic to move all the
 398                 * unsignaled and now in-order completions into the swcq.
 399                 */
 400                if (SQ_TYPE(hw_cqe)) {
 401                        swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 402                        swsqe->cqe = *hw_cqe;
 403                        swsqe->complete = 1;
 404                        flush_completed_wrs(&qhp->wq, &chp->cq);
 405                } else {
 406                        swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
 407                        *swcqe = *hw_cqe;
 408                        swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
 409                        t4_swcq_produce(&chp->cq);
 410                }
 411next_cqe:
 412                t4_hwcq_consume(&chp->cq);
 413                ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 414        }
 415}
 416
 417static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 418{
 419        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
 420                return 0;
 421
 422        if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
 423                return 0;
 424
 425        if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
 426                return 0;
 427
 428        if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
 429                return 0;
 430        return 1;
 431}
 432
 433void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 434{
 435        struct t4_cqe *cqe;
 436        u32 ptr;
 437
 438        *count = 0;
 439        PDBG("%s count zero %d\n", __func__, *count);
 440        ptr = cq->sw_cidx;
 441        while (ptr != cq->sw_pidx) {
 442                cqe = &cq->sw_queue[ptr];
 443                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
 444                    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
 445                        (*count)++;
 446                if (++ptr == cq->size)
 447                        ptr = 0;
 448        }
 449        PDBG("%s cq %p count %d\n", __func__, cq, *count);
 450}
 451
 452/*
 453 * poll_cq
 454 *
 455 * Caller must:
 456 *     check the validity of the first CQE,
 457 *     supply the wq assicated with the qpid.
 458 *
 459 * credit: cq credit to return to sge.
 460 * cqe_flushed: 1 iff the CQE is flushed.
 461 * cqe: copy of the polled CQE.
 462 *
 463 * return value:
 464 *    0             CQE returned ok.
 465 *    -EAGAIN       CQE skipped, try again.
 466 *    -EOVERFLOW    CQ overflow detected.
 467 */
 468static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 469                   u8 *cqe_flushed, u64 *cookie, u32 *credit)
 470{
 471        int ret = 0;
 472        struct t4_cqe *hw_cqe, read_cqe;
 473
 474        *cqe_flushed = 0;
 475        *credit = 0;
 476        ret = t4_next_cqe(cq, &hw_cqe);
 477        if (ret)
 478                return ret;
 479
 480        PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
 481             " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
 482             __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
 483             CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
 484             CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
 485             CQE_WRID_LOW(hw_cqe));
 486
 487        /*
 488         * skip cqe's not affiliated with a QP.
 489         */
 490        if (wq == NULL) {
 491                ret = -EAGAIN;
 492                goto skip_cqe;
 493        }
 494
 495        /*
 496        * skip hw cqe's if the wq is flushed.
 497        */
 498        if (wq->flushed && !SW_CQE(hw_cqe)) {
 499                ret = -EAGAIN;
 500                goto skip_cqe;
 501        }
 502
 503        /*
 504         * skip TERMINATE cqes...
 505         */
 506        if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
 507                ret = -EAGAIN;
 508                goto skip_cqe;
 509        }
 510
 511        /*
 512         * Gotta tweak READ completions:
 513         *      1) the cqe doesn't contain the sq_wptr from the wr.
 514         *      2) opcode not reflected from the wr.
 515         *      3) read_len not reflected from the wr.
 516         *      4) cq_type is RQ_TYPE not SQ_TYPE.
 517         */
 518        if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
 519
 520                /* If we have reached here because of async
 521                 * event or other error, and have egress error
 522                 * then drop
 523                 */
 524                if (CQE_TYPE(hw_cqe) == 1) {
 525                        if (CQE_STATUS(hw_cqe))
 526                                t4_set_wq_in_error(wq);
 527                        ret = -EAGAIN;
 528                        goto skip_cqe;
 529                }
 530
 531                /* If this is an unsolicited read response, then the read
 532                 * was generated by the kernel driver as part of peer-2-peer
 533                 * connection setup.  So ignore the completion.
 534                 */
 535                if (CQE_WRID_STAG(hw_cqe) == 1) {
 536                        if (CQE_STATUS(hw_cqe))
 537                                t4_set_wq_in_error(wq);
 538                        ret = -EAGAIN;
 539                        goto skip_cqe;
 540                }
 541
 542                /*
 543                 * Eat completions for unsignaled read WRs.
 544                 */
 545                if (!wq->sq.oldest_read->signaled) {
 546                        advance_oldest_read(wq);
 547                        ret = -EAGAIN;
 548                        goto skip_cqe;
 549                }
 550
 551                /*
 552                 * Don't write to the HWCQ, so create a new read req CQE
 553                 * in local memory.
 554                 */
 555                create_read_req_cqe(wq, hw_cqe, &read_cqe);
 556                hw_cqe = &read_cqe;
 557                advance_oldest_read(wq);
 558        }
 559
 560        if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
 561                *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
 562                t4_set_wq_in_error(wq);
 563        }
 564
 565        /*
 566         * RECV completion.
 567         */
 568        if (RQ_TYPE(hw_cqe)) {
 569
 570                /*
 571                 * HW only validates 4 bits of MSN.  So we must validate that
 572                 * the MSN in the SEND is the next expected MSN.  If its not,
 573                 * then we complete this with T4_ERR_MSN and mark the wq in
 574                 * error.
 575                 */
 576
 577                if (t4_rq_empty(wq)) {
 578                        t4_set_wq_in_error(wq);
 579                        ret = -EAGAIN;
 580                        goto skip_cqe;
 581                }
 582                if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
 583                        t4_set_wq_in_error(wq);
 584                        hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
 585                        goto proc_cqe;
 586                }
 587                goto proc_cqe;
 588        }
 589
 590        /*
 591         * If we get here its a send completion.
 592         *
 593         * Handle out of order completion. These get stuffed
 594         * in the SW SQ. Then the SW SQ is walked to move any
 595         * now in-order completions into the SW CQ.  This handles
 596         * 2 cases:
 597         *      1) reaping unsignaled WRs when the first subsequent
 598         *         signaled WR is completed.
 599         *      2) out of order read completions.
 600         */
 601        if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
 602                struct t4_swsqe *swsqe;
 603
 604                PDBG("%s out of order completion going in sw_sq at idx %u\n",
 605                     __func__, CQE_WRID_SQ_IDX(hw_cqe));
 606                swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 607                swsqe->cqe = *hw_cqe;
 608                swsqe->complete = 1;
 609                ret = -EAGAIN;
 610                goto flush_wq;
 611        }
 612
 613proc_cqe:
 614        *cqe = *hw_cqe;
 615
 616        /*
 617         * Reap the associated WR(s) that are freed up with this
 618         * completion.
 619         */
 620        if (SQ_TYPE(hw_cqe)) {
 621                int idx = CQE_WRID_SQ_IDX(hw_cqe);
 622                BUG_ON(idx >= wq->sq.size);
 623
 624                /*
 625                * Account for any unsignaled completions completed by
 626                * this signaled completion.  In this case, cidx points
 627                * to the first unsignaled one, and idx points to the
 628                * signaled one.  So adjust in_use based on this delta.
 629                * if this is not completing any unsigned wrs, then the
 630                * delta will be 0. Handle wrapping also!
 631                */
 632                if (idx < wq->sq.cidx)
 633                        wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
 634                else
 635                        wq->sq.in_use -= idx - wq->sq.cidx;
 636                BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
 637
 638                wq->sq.cidx = (uint16_t)idx;
 639                PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
 640                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
 641                if (c4iw_wr_log)
 642                        c4iw_log_wr_stats(wq, hw_cqe);
 643                t4_sq_consume(wq);
 644        } else {
 645                PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
 646                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
 647                BUG_ON(t4_rq_empty(wq));
 648                if (c4iw_wr_log)
 649                        c4iw_log_wr_stats(wq, hw_cqe);
 650                t4_rq_consume(wq);
 651                goto skip_cqe;
 652        }
 653
 654flush_wq:
 655        /*
 656         * Flush any completed cqes that are now in-order.
 657         */
 658        flush_completed_wrs(wq, cq);
 659
 660skip_cqe:
 661        if (SW_CQE(hw_cqe)) {
 662                PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
 663                     __func__, cq, cq->cqid, cq->sw_cidx);
 664                t4_swcq_consume(cq);
 665        } else {
 666                PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
 667                     __func__, cq, cq->cqid, cq->cidx);
 668                t4_hwcq_consume(cq);
 669        }
 670        return ret;
 671}
 672
 673/*
 674 * Get one cq entry from c4iw and map it to openib.
 675 *
 676 * Returns:
 677 *      0                       cqe returned
 678 *      -ENODATA                EMPTY;
 679 *      -EAGAIN                 caller must try again
 680 *      any other -errno        fatal error
 681 */
 682static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 683{
 684        struct c4iw_qp *qhp = NULL;
 685        struct t4_cqe uninitialized_var(cqe), *rd_cqe;
 686        struct t4_wq *wq;
 687        u32 credit = 0;
 688        u8 cqe_flushed;
 689        u64 cookie = 0;
 690        int ret;
 691
 692        ret = t4_next_cqe(&chp->cq, &rd_cqe);
 693
 694        if (ret)
 695                return ret;
 696
 697        qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
 698        if (!qhp)
 699                wq = NULL;
 700        else {
 701                spin_lock(&qhp->lock);
 702                wq = &(qhp->wq);
 703        }
 704        ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
 705        if (ret)
 706                goto out;
 707
 708        wc->wr_id = cookie;
 709        wc->qp = &qhp->ibqp;
 710        wc->vendor_err = CQE_STATUS(&cqe);
 711        wc->wc_flags = 0;
 712
 713        PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
 714             "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
 715             CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
 716             CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
 717
 718        if (CQE_TYPE(&cqe) == 0) {
 719                if (!CQE_STATUS(&cqe))
 720                        wc->byte_len = CQE_LEN(&cqe);
 721                else
 722                        wc->byte_len = 0;
 723                wc->opcode = IB_WC_RECV;
 724                if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
 725                    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
 726                        wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
 727                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 728                }
 729        } else {
 730                switch (CQE_OPCODE(&cqe)) {
 731                case FW_RI_RDMA_WRITE:
 732                        wc->opcode = IB_WC_RDMA_WRITE;
 733                        break;
 734                case FW_RI_READ_REQ:
 735                        wc->opcode = IB_WC_RDMA_READ;
 736                        wc->byte_len = CQE_LEN(&cqe);
 737                        break;
 738                case FW_RI_SEND_WITH_INV:
 739                case FW_RI_SEND_WITH_SE_INV:
 740                        wc->opcode = IB_WC_SEND;
 741                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 742                        break;
 743                case FW_RI_SEND:
 744                case FW_RI_SEND_WITH_SE:
 745                        wc->opcode = IB_WC_SEND;
 746                        break;
 747
 748                case FW_RI_LOCAL_INV:
 749                        wc->opcode = IB_WC_LOCAL_INV;
 750                        break;
 751                case FW_RI_FAST_REGISTER:
 752                        wc->opcode = IB_WC_REG_MR;
 753                        break;
 754                default:
 755                        printk(KERN_ERR MOD "Unexpected opcode %d "
 756                               "in the CQE received for QPID=0x%0x\n",
 757                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
 758                        ret = -EINVAL;
 759                        goto out;
 760                }
 761        }
 762
 763        if (cqe_flushed)
 764                wc->status = IB_WC_WR_FLUSH_ERR;
 765        else {
 766
 767                switch (CQE_STATUS(&cqe)) {
 768                case T4_ERR_SUCCESS:
 769                        wc->status = IB_WC_SUCCESS;
 770                        break;
 771                case T4_ERR_STAG:
 772                        wc->status = IB_WC_LOC_ACCESS_ERR;
 773                        break;
 774                case T4_ERR_PDID:
 775                        wc->status = IB_WC_LOC_PROT_ERR;
 776                        break;
 777                case T4_ERR_QPID:
 778                case T4_ERR_ACCESS:
 779                        wc->status = IB_WC_LOC_ACCESS_ERR;
 780                        break;
 781                case T4_ERR_WRAP:
 782                        wc->status = IB_WC_GENERAL_ERR;
 783                        break;
 784                case T4_ERR_BOUND:
 785                        wc->status = IB_WC_LOC_LEN_ERR;
 786                        break;
 787                case T4_ERR_INVALIDATE_SHARED_MR:
 788                case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 789                        wc->status = IB_WC_MW_BIND_ERR;
 790                        break;
 791                case T4_ERR_CRC:
 792                case T4_ERR_MARKER:
 793                case T4_ERR_PDU_LEN_ERR:
 794                case T4_ERR_OUT_OF_RQE:
 795                case T4_ERR_DDP_VERSION:
 796                case T4_ERR_RDMA_VERSION:
 797                case T4_ERR_DDP_QUEUE_NUM:
 798                case T4_ERR_MSN:
 799                case T4_ERR_TBIT:
 800                case T4_ERR_MO:
 801                case T4_ERR_MSN_RANGE:
 802                case T4_ERR_IRD_OVERFLOW:
 803                case T4_ERR_OPCODE:
 804                case T4_ERR_INTERNAL_ERR:
 805                        wc->status = IB_WC_FATAL_ERR;
 806                        break;
 807                case T4_ERR_SWFLUSH:
 808                        wc->status = IB_WC_WR_FLUSH_ERR;
 809                        break;
 810                default:
 811                        printk(KERN_ERR MOD
 812                               "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
 813                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
 814                        wc->status = IB_WC_FATAL_ERR;
 815                }
 816        }
 817out:
 818        if (wq) {
 819                if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
 820                        if (t4_sq_empty(wq))
 821                                complete(&qhp->sq_drained);
 822                        if (t4_rq_empty(wq))
 823                                complete(&qhp->rq_drained);
 824                }
 825                spin_unlock(&qhp->lock);
 826        }
 827        return ret;
 828}
 829
 830int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 831{
 832        struct c4iw_cq *chp;
 833        unsigned long flags;
 834        int npolled;
 835        int err = 0;
 836
 837        chp = to_c4iw_cq(ibcq);
 838
 839        spin_lock_irqsave(&chp->lock, flags);
 840        for (npolled = 0; npolled < num_entries; ++npolled) {
 841                do {
 842                        err = c4iw_poll_cq_one(chp, wc + npolled);
 843                } while (err == -EAGAIN);
 844                if (err)
 845                        break;
 846        }
 847        spin_unlock_irqrestore(&chp->lock, flags);
 848        return !err || err == -ENODATA ? npolled : err;
 849}
 850
 851int c4iw_destroy_cq(struct ib_cq *ib_cq)
 852{
 853        struct c4iw_cq *chp;
 854        struct c4iw_ucontext *ucontext;
 855
 856        PDBG("%s ib_cq %p\n", __func__, ib_cq);
 857        chp = to_c4iw_cq(ib_cq);
 858
 859        remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
 860        atomic_dec(&chp->refcnt);
 861        wait_event(chp->wait, !atomic_read(&chp->refcnt));
 862
 863        ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
 864                                  : NULL;
 865        destroy_cq(&chp->rhp->rdev, &chp->cq,
 866                   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
 867        kfree(chp);
 868        return 0;
 869}
 870
 871struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 872                             const struct ib_cq_init_attr *attr,
 873                             struct ib_ucontext *ib_context,
 874                             struct ib_udata *udata)
 875{
 876        int entries = attr->cqe;
 877        int vector = attr->comp_vector;
 878        struct c4iw_dev *rhp;
 879        struct c4iw_cq *chp;
 880        struct c4iw_create_cq_resp uresp;
 881        struct c4iw_ucontext *ucontext = NULL;
 882        int ret;
 883        size_t memsize, hwentries;
 884        struct c4iw_mm_entry *mm, *mm2;
 885
 886        PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
 887        if (attr->flags)
 888                return ERR_PTR(-EINVAL);
 889
 890        rhp = to_c4iw_dev(ibdev);
 891
 892        if (vector >= rhp->rdev.lldi.nciq)
 893                return ERR_PTR(-EINVAL);
 894
 895        chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 896        if (!chp)
 897                return ERR_PTR(-ENOMEM);
 898
 899        if (ib_context)
 900                ucontext = to_c4iw_ucontext(ib_context);
 901
 902        /* account for the status page. */
 903        entries++;
 904
 905        /* IQ needs one extra entry to differentiate full vs empty. */
 906        entries++;
 907
 908        /*
 909         * entries must be multiple of 16 for HW.
 910         */
 911        entries = roundup(entries, 16);
 912
 913        /*
 914         * Make actual HW queue 2x to avoid cdix_inc overflows.
 915         */
 916        hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
 917
 918        /*
 919         * Make HW queue at least 64 entries so GTS updates aren't too
 920         * frequent.
 921         */
 922        if (hwentries < 64)
 923                hwentries = 64;
 924
 925        memsize = hwentries * sizeof *chp->cq.queue;
 926
 927        /*
 928         * memsize must be a multiple of the page size if its a user cq.
 929         */
 930        if (ucontext)
 931                memsize = roundup(memsize, PAGE_SIZE);
 932        chp->cq.size = hwentries;
 933        chp->cq.memsize = memsize;
 934        chp->cq.vector = vector;
 935
 936        ret = create_cq(&rhp->rdev, &chp->cq,
 937                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 938        if (ret)
 939                goto err1;
 940
 941        chp->rhp = rhp;
 942        chp->cq.size--;                         /* status page */
 943        chp->ibcq.cqe = entries - 2;
 944        spin_lock_init(&chp->lock);
 945        spin_lock_init(&chp->comp_handler_lock);
 946        atomic_set(&chp->refcnt, 1);
 947        init_waitqueue_head(&chp->wait);
 948        ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
 949        if (ret)
 950                goto err2;
 951
 952        if (ucontext) {
 953                mm = kmalloc(sizeof *mm, GFP_KERNEL);
 954                if (!mm)
 955                        goto err3;
 956                mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
 957                if (!mm2)
 958                        goto err4;
 959
 960                uresp.qid_mask = rhp->rdev.cqmask;
 961                uresp.cqid = chp->cq.cqid;
 962                uresp.size = chp->cq.size;
 963                uresp.memsize = chp->cq.memsize;
 964                spin_lock(&ucontext->mmap_lock);
 965                uresp.key = ucontext->key;
 966                ucontext->key += PAGE_SIZE;
 967                uresp.gts_key = ucontext->key;
 968                ucontext->key += PAGE_SIZE;
 969                spin_unlock(&ucontext->mmap_lock);
 970                ret = ib_copy_to_udata(udata, &uresp,
 971                                       sizeof(uresp) - sizeof(uresp.reserved));
 972                if (ret)
 973                        goto err5;
 974
 975                mm->key = uresp.key;
 976                mm->addr = virt_to_phys(chp->cq.queue);
 977                mm->len = chp->cq.memsize;
 978                insert_mmap(ucontext, mm);
 979
 980                mm2->key = uresp.gts_key;
 981                mm2->addr = chp->cq.bar2_pa;
 982                mm2->len = PAGE_SIZE;
 983                insert_mmap(ucontext, mm2);
 984        }
 985        PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
 986             __func__, chp->cq.cqid, chp, chp->cq.size,
 987             chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
 988        return &chp->ibcq;
 989err5:
 990        kfree(mm2);
 991err4:
 992        kfree(mm);
 993err3:
 994        remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
 995err2:
 996        destroy_cq(&chp->rhp->rdev, &chp->cq,
 997                   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 998err1:
 999        kfree(chp);
1000        return ERR_PTR(ret);
1001}
1002
1003int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1004{
1005        return -ENOSYS;
1006}
1007
1008int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1009{
1010        struct c4iw_cq *chp;
1011        int ret;
1012        unsigned long flag;
1013
1014        chp = to_c4iw_cq(ibcq);
1015        spin_lock_irqsave(&chp->lock, flag);
1016        ret = t4_arm_cq(&chp->cq,
1017                        (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1018        spin_unlock_irqrestore(&chp->lock, flag);
1019        if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
1020                ret = 0;
1021        return ret;
1022}
1023