linux/drivers/infiniband/hw/cxgb4/ev.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/slab.h>
  33#include <linux/mman.h>
  34#include <net/sock.h>
  35
  36#include "iw_cxgb4.h"
  37
  38static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
  39                          struct c4iw_qp *qhp,
  40                          struct t4_cqe *err_cqe,
  41                          enum ib_event_type ib_event)
  42{
  43        struct ib_event event;
  44        struct c4iw_qp_attributes attrs;
  45        unsigned long flag;
  46
  47        if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
  48            (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
  49                pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
  50                       "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
  51                       __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
  52                       CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
  53                       CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
  54                return;
  55        }
  56
  57        printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
  58               "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
  59               CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
  60               CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
  61               CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
  62
  63        if (qhp->attr.state == C4IW_QP_STATE_RTS) {
  64                attrs.next_state = C4IW_QP_STATE_TERMINATE;
  65                c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
  66                               &attrs, 0);
  67        }
  68
  69        event.event = ib_event;
  70        event.device = chp->ibcq.device;
  71        if (ib_event == IB_EVENT_CQ_ERR)
  72                event.element.cq = &chp->ibcq;
  73        else
  74                event.element.qp = &qhp->ibqp;
  75        if (qhp->ibqp.event_handler)
  76                (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
  77
  78        spin_lock_irqsave(&chp->comp_handler_lock, flag);
  79        (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
  80        spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
  81}
  82
  83void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
  84{
  85        struct c4iw_cq *chp;
  86        struct c4iw_qp *qhp;
  87        u32 cqid;
  88
  89        spin_lock_irq(&dev->lock);
  90        qhp = get_qhp(dev, CQE_QPID(err_cqe));
  91        if (!qhp) {
  92                printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
  93                       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
  94                       CQE_QPID(err_cqe),
  95                       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
  96                       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
  97                       CQE_WRID_LOW(err_cqe));
  98                spin_unlock_irq(&dev->lock);
  99                goto out;
 100        }
 101
 102        if (SQ_TYPE(err_cqe))
 103                cqid = qhp->attr.scq;
 104        else
 105                cqid = qhp->attr.rcq;
 106        chp = get_chp(dev, cqid);
 107        if (!chp) {
 108                printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
 109                       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
 110                       cqid, CQE_QPID(err_cqe),
 111                       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
 112                       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
 113                       CQE_WRID_LOW(err_cqe));
 114                spin_unlock_irq(&dev->lock);
 115                goto out;
 116        }
 117
 118        c4iw_qp_add_ref(&qhp->ibqp);
 119        atomic_inc(&chp->refcnt);
 120        spin_unlock_irq(&dev->lock);
 121
 122        /* Bad incoming write */
 123        if (RQ_TYPE(err_cqe) &&
 124            (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
 125                post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
 126                goto done;
 127        }
 128
 129        switch (CQE_STATUS(err_cqe)) {
 130
 131        /* Completion Events */
 132        case T4_ERR_SUCCESS:
 133                printk(KERN_ERR MOD "AE with status 0!\n");
 134                break;
 135
 136        case T4_ERR_STAG:
 137        case T4_ERR_PDID:
 138        case T4_ERR_QPID:
 139        case T4_ERR_ACCESS:
 140        case T4_ERR_WRAP:
 141        case T4_ERR_BOUND:
 142        case T4_ERR_INVALIDATE_SHARED_MR:
 143        case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
 144                post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
 145                break;
 146
 147        /* Device Fatal Errors */
 148        case T4_ERR_ECC:
 149        case T4_ERR_ECC_PSTAG:
 150        case T4_ERR_INTERNAL_ERR:
 151                post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
 152                break;
 153
 154        /* QP Fatal Errors */
 155        case T4_ERR_OUT_OF_RQE:
 156        case T4_ERR_PBL_ADDR_BOUND:
 157        case T4_ERR_CRC:
 158        case T4_ERR_MARKER:
 159        case T4_ERR_PDU_LEN_ERR:
 160        case T4_ERR_DDP_VERSION:
 161        case T4_ERR_RDMA_VERSION:
 162        case T4_ERR_OPCODE:
 163        case T4_ERR_DDP_QUEUE_NUM:
 164        case T4_ERR_MSN:
 165        case T4_ERR_TBIT:
 166        case T4_ERR_MO:
 167        case T4_ERR_MSN_GAP:
 168        case T4_ERR_MSN_RANGE:
 169        case T4_ERR_RQE_ADDR_BOUND:
 170        case T4_ERR_IRD_OVERFLOW:
 171                post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
 172                break;
 173
 174        default:
 175                printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
 176                       CQE_STATUS(err_cqe), qhp->wq.sq.qid);
 177                post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
 178                break;
 179        }
 180done:
 181        if (atomic_dec_and_test(&chp->refcnt))
 182                wake_up(&chp->wait);
 183        c4iw_qp_rem_ref(&qhp->ibqp);
 184out:
 185        return;
 186}
 187
 188int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
 189{
 190        struct c4iw_cq *chp;
 191        unsigned long flag;
 192
 193        chp = get_chp(dev, qid);
 194        if (chp) {
 195                spin_lock_irqsave(&chp->comp_handler_lock, flag);
 196                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
 197                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
 198        } else
 199                PDBG("%s unknown cqid 0x%x\n", __func__, qid);
 200        return 0;
 201}
 202