linux/drivers/infiniband/hw/cxgb3/iwch_cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include "iwch_provider.h"
  33#include "iwch.h"
  34
  35/*
  36 * Get one cq entry from cxio and map it to openib.
  37 *
  38 * Returns:
  39 *      0                       EMPTY;
  40 *      1                       cqe returned
  41 *      -EAGAIN         caller must try again
  42 *      any other -errno        fatal error
  43 */
  44static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
  45                            struct ib_wc *wc)
  46{
  47        struct iwch_qp *qhp = NULL;
  48        struct t3_cqe cqe, *rd_cqe;
  49        struct t3_wq *wq;
  50        u32 credit = 0;
  51        u8 cqe_flushed;
  52        u64 cookie;
  53        int ret = 1;
  54
  55        rd_cqe = cxio_next_cqe(&chp->cq);
  56
  57        if (!rd_cqe)
  58                return 0;
  59
  60        qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
  61        if (!qhp)
  62                wq = NULL;
  63        else {
  64                spin_lock(&qhp->lock);
  65                wq = &(qhp->wq);
  66        }
  67        ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
  68                                   &credit);
  69        if (t3a_device(chp->rhp) && credit) {
  70                PDBG("%s updating %d cq credits on id %d\n", __func__,
  71                     credit, chp->cq.cqid);
  72                cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
  73        }
  74
  75        if (ret) {
  76                ret = -EAGAIN;
  77                goto out;
  78        }
  79        ret = 1;
  80
  81        wc->wr_id = cookie;
  82        wc->qp = &qhp->ibqp;
  83        wc->vendor_err = CQE_STATUS(cqe);
  84        wc->wc_flags = 0;
  85
  86        PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
  87             "lo 0x%x cookie 0x%llx\n", __func__,
  88             CQE_QPID(cqe), CQE_TYPE(cqe),
  89             CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
  90             CQE_WRID_LOW(cqe), (unsigned long long) cookie);
  91
  92        if (CQE_TYPE(cqe) == 0) {
  93                if (!CQE_STATUS(cqe))
  94                        wc->byte_len = CQE_LEN(cqe);
  95                else
  96                        wc->byte_len = 0;
  97                wc->opcode = IB_WC_RECV;
  98                if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
  99                    CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
 100                        wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
 101                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 102                }
 103        } else {
 104                switch (CQE_OPCODE(cqe)) {
 105                case T3_RDMA_WRITE:
 106                        wc->opcode = IB_WC_RDMA_WRITE;
 107                        break;
 108                case T3_READ_REQ:
 109                        wc->opcode = IB_WC_RDMA_READ;
 110                        wc->byte_len = CQE_LEN(cqe);
 111                        break;
 112                case T3_SEND:
 113                case T3_SEND_WITH_SE:
 114                case T3_SEND_WITH_INV:
 115                case T3_SEND_WITH_SE_INV:
 116                        wc->opcode = IB_WC_SEND;
 117                        break;
 118                case T3_LOCAL_INV:
 119                        wc->opcode = IB_WC_LOCAL_INV;
 120                        break;
 121                case T3_FAST_REGISTER:
 122                        wc->opcode = IB_WC_REG_MR;
 123                        break;
 124                default:
 125                        printk(KERN_ERR MOD "Unexpected opcode %d "
 126                               "in the CQE received for QPID=0x%0x\n",
 127                               CQE_OPCODE(cqe), CQE_QPID(cqe));
 128                        ret = -EINVAL;
 129                        goto out;
 130                }
 131        }
 132
 133        if (cqe_flushed)
 134                wc->status = IB_WC_WR_FLUSH_ERR;
 135        else {
 136
 137                switch (CQE_STATUS(cqe)) {
 138                case TPT_ERR_SUCCESS:
 139                        wc->status = IB_WC_SUCCESS;
 140                        break;
 141                case TPT_ERR_STAG:
 142                        wc->status = IB_WC_LOC_ACCESS_ERR;
 143                        break;
 144                case TPT_ERR_PDID:
 145                        wc->status = IB_WC_LOC_PROT_ERR;
 146                        break;
 147                case TPT_ERR_QPID:
 148                case TPT_ERR_ACCESS:
 149                        wc->status = IB_WC_LOC_ACCESS_ERR;
 150                        break;
 151                case TPT_ERR_WRAP:
 152                        wc->status = IB_WC_GENERAL_ERR;
 153                        break;
 154                case TPT_ERR_BOUND:
 155                        wc->status = IB_WC_LOC_LEN_ERR;
 156                        break;
 157                case TPT_ERR_INVALIDATE_SHARED_MR:
 158                case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 159                        wc->status = IB_WC_MW_BIND_ERR;
 160                        break;
 161                case TPT_ERR_CRC:
 162                case TPT_ERR_MARKER:
 163                case TPT_ERR_PDU_LEN_ERR:
 164                case TPT_ERR_OUT_OF_RQE:
 165                case TPT_ERR_DDP_VERSION:
 166                case TPT_ERR_RDMA_VERSION:
 167                case TPT_ERR_DDP_QUEUE_NUM:
 168                case TPT_ERR_MSN:
 169                case TPT_ERR_TBIT:
 170                case TPT_ERR_MO:
 171                case TPT_ERR_MSN_RANGE:
 172                case TPT_ERR_IRD_OVERFLOW:
 173                case TPT_ERR_OPCODE:
 174                        wc->status = IB_WC_FATAL_ERR;
 175                        break;
 176                case TPT_ERR_SWFLUSH:
 177                        wc->status = IB_WC_WR_FLUSH_ERR;
 178                        break;
 179                default:
 180                        printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
 181                               "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
 182                        ret = -EINVAL;
 183                }
 184        }
 185out:
 186        if (wq)
 187                spin_unlock(&qhp->lock);
 188        return ret;
 189}
 190
 191int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 192{
 193        struct iwch_dev *rhp;
 194        struct iwch_cq *chp;
 195        unsigned long flags;
 196        int npolled;
 197        int err = 0;
 198
 199        chp = to_iwch_cq(ibcq);
 200        rhp = chp->rhp;
 201
 202        spin_lock_irqsave(&chp->lock, flags);
 203        for (npolled = 0; npolled < num_entries; ++npolled) {
 204#ifdef DEBUG
 205                int i=0;
 206#endif
 207
 208                /*
 209                 * Because T3 can post CQEs that are _not_ associated
 210                 * with a WR, we might have to poll again after removing
 211                 * one of these.
 212                 */
 213                do {
 214                        err = iwch_poll_cq_one(rhp, chp, wc + npolled);
 215#ifdef DEBUG
 216                        BUG_ON(++i > 1000);
 217#endif
 218                } while (err == -EAGAIN);
 219                if (err <= 0)
 220                        break;
 221        }
 222        spin_unlock_irqrestore(&chp->lock, flags);
 223
 224        if (err < 0)
 225                return err;
 226        else {
 227                return npolled;
 228        }
 229}
 230