linux/drivers/infiniband/hw/qib/qib_ud.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
   3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <rdma/ib_smi.h>
  35#include <rdma/ib_verbs.h>
  36
  37#include "qib.h"
  38#include "qib_mad.h"
  39
  40/**
  41 * qib_ud_loopback - handle send on loopback QPs
  42 * @sqp: the sending QP
  43 * @swqe: the send work request
  44 *
  45 * This is called from qib_make_ud_req() to forward a WQE addressed
  46 * to the same HCA.
  47 * Note that the receive interrupt handler may be calling qib_ud_rcv()
  48 * while this is being called.
  49 */
  50static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
  51{
  52        struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
  53        struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  54        struct qib_devdata *dd = ppd->dd;
  55        struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
  56        struct rvt_qp *qp;
  57        struct ib_ah_attr *ah_attr;
  58        unsigned long flags;
  59        struct rvt_sge_state ssge;
  60        struct rvt_sge *sge;
  61        struct ib_wc wc;
  62        u32 length;
  63        enum ib_qp_type sqptype, dqptype;
  64
  65        rcu_read_lock();
  66        qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
  67        if (!qp) {
  68                ibp->rvp.n_pkt_drops++;
  69                rcu_read_unlock();
  70                return;
  71        }
  72
  73        sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
  74                        IB_QPT_UD : sqp->ibqp.qp_type;
  75        dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
  76                        IB_QPT_UD : qp->ibqp.qp_type;
  77
  78        if (dqptype != sqptype ||
  79            !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
  80                ibp->rvp.n_pkt_drops++;
  81                goto drop;
  82        }
  83
  84        ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
  85        ppd = ppd_from_ibp(ibp);
  86
  87        if (qp->ibqp.qp_num > 1) {
  88                u16 pkey1;
  89                u16 pkey2;
  90                u16 lid;
  91
  92                pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
  93                pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
  94                if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
  95                        lid = ppd->lid | (ah_attr->src_path_bits &
  96                                          ((1 << ppd->lmc) - 1));
  97                        qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
  98                                      ah_attr->sl,
  99                                      sqp->ibqp.qp_num, qp->ibqp.qp_num,
 100                                      cpu_to_be16(lid),
 101                                      cpu_to_be16(ah_attr->dlid));
 102                        goto drop;
 103                }
 104        }
 105
 106        /*
 107         * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
 108         * Qkeys with the high order bit set mean use the
 109         * qkey from the QP context instead of the WR (see 10.2.5).
 110         */
 111        if (qp->ibqp.qp_num) {
 112                u32 qkey;
 113
 114                qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
 115                        sqp->qkey : swqe->ud_wr.remote_qkey;
 116                if (unlikely(qkey != qp->qkey)) {
 117                        u16 lid;
 118
 119                        lid = ppd->lid | (ah_attr->src_path_bits &
 120                                          ((1 << ppd->lmc) - 1));
 121                        qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
 122                                      ah_attr->sl,
 123                                      sqp->ibqp.qp_num, qp->ibqp.qp_num,
 124                                      cpu_to_be16(lid),
 125                                      cpu_to_be16(ah_attr->dlid));
 126                        goto drop;
 127                }
 128        }
 129
 130        /*
 131         * A GRH is expected to precede the data even if not
 132         * present on the wire.
 133         */
 134        length = swqe->length;
 135        memset(&wc, 0, sizeof(wc));
 136        wc.byte_len = length + sizeof(struct ib_grh);
 137
 138        if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
 139                wc.wc_flags = IB_WC_WITH_IMM;
 140                wc.ex.imm_data = swqe->wr.ex.imm_data;
 141        }
 142
 143        spin_lock_irqsave(&qp->r_lock, flags);
 144
 145        /*
 146         * Get the next work request entry to find where to put the data.
 147         */
 148        if (qp->r_flags & RVT_R_REUSE_SGE)
 149                qp->r_flags &= ~RVT_R_REUSE_SGE;
 150        else {
 151                int ret;
 152
 153                ret = qib_get_rwqe(qp, 0);
 154                if (ret < 0) {
 155                        qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
 156                        goto bail_unlock;
 157                }
 158                if (!ret) {
 159                        if (qp->ibqp.qp_num == 0)
 160                                ibp->rvp.n_vl15_dropped++;
 161                        goto bail_unlock;
 162                }
 163        }
 164        /* Silently drop packets which are too big. */
 165        if (unlikely(wc.byte_len > qp->r_len)) {
 166                qp->r_flags |= RVT_R_REUSE_SGE;
 167                ibp->rvp.n_pkt_drops++;
 168                goto bail_unlock;
 169        }
 170
 171        if (ah_attr->ah_flags & IB_AH_GRH) {
 172                struct ib_grh grh;
 173                struct ib_global_route grd = ah_attr->grh;
 174
 175                qib_make_grh(ibp, &grh, &grd, 0, 0);
 176                qib_copy_sge(&qp->r_sge, &grh,
 177                             sizeof(grh), 1);
 178                wc.wc_flags |= IB_WC_GRH;
 179        } else
 180                qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
 181        ssge.sg_list = swqe->sg_list + 1;
 182        ssge.sge = *swqe->sg_list;
 183        ssge.num_sge = swqe->wr.num_sge;
 184        sge = &ssge.sge;
 185        while (length) {
 186                u32 len = sge->length;
 187
 188                if (len > length)
 189                        len = length;
 190                if (len > sge->sge_length)
 191                        len = sge->sge_length;
 192                BUG_ON(len == 0);
 193                qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
 194                sge->vaddr += len;
 195                sge->length -= len;
 196                sge->sge_length -= len;
 197                if (sge->sge_length == 0) {
 198                        if (--ssge.num_sge)
 199                                *sge = *ssge.sg_list++;
 200                } else if (sge->length == 0 && sge->mr->lkey) {
 201                        if (++sge->n >= RVT_SEGSZ) {
 202                                if (++sge->m >= sge->mr->mapsz)
 203                                        break;
 204                                sge->n = 0;
 205                        }
 206                        sge->vaddr =
 207                                sge->mr->map[sge->m]->segs[sge->n].vaddr;
 208                        sge->length =
 209                                sge->mr->map[sge->m]->segs[sge->n].length;
 210                }
 211                length -= len;
 212        }
 213        rvt_put_ss(&qp->r_sge);
 214        if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
 215                goto bail_unlock;
 216        wc.wr_id = qp->r_wr_id;
 217        wc.status = IB_WC_SUCCESS;
 218        wc.opcode = IB_WC_RECV;
 219        wc.qp = &qp->ibqp;
 220        wc.src_qp = sqp->ibqp.qp_num;
 221        wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
 222                swqe->ud_wr.pkey_index : 0;
 223        wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
 224        wc.sl = ah_attr->sl;
 225        wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
 226        wc.port_num = qp->port_num;
 227        /* Signal completion event if the solicited bit is set. */
 228        rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
 229                     swqe->wr.send_flags & IB_SEND_SOLICITED);
 230        ibp->rvp.n_loop_pkts++;
 231bail_unlock:
 232        spin_unlock_irqrestore(&qp->r_lock, flags);
 233drop:
 234        rcu_read_unlock();
 235}
 236
 237/**
 238 * qib_make_ud_req - construct a UD request packet
 239 * @qp: the QP
 240 *
 241 * Assumes the s_lock is held.
 242 *
 243 * Return 1 if constructed; otherwise, return 0.
 244 */
 245int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
 246{
 247        struct qib_qp_priv *priv = qp->priv;
 248        struct qib_other_headers *ohdr;
 249        struct ib_ah_attr *ah_attr;
 250        struct qib_pportdata *ppd;
 251        struct qib_ibport *ibp;
 252        struct rvt_swqe *wqe;
 253        u32 nwords;
 254        u32 extra_bytes;
 255        u32 bth0;
 256        u16 lrh0;
 257        u16 lid;
 258        int ret = 0;
 259        int next_cur;
 260
 261        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
 262                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
 263                        goto bail;
 264                /* We are in the error state, flush the work request. */
 265                smp_read_barrier_depends(); /* see post_one_send */
 266                if (qp->s_last == ACCESS_ONCE(qp->s_head))
 267                        goto bail;
 268                /* If DMAs are in progress, we can't flush immediately. */
 269                if (atomic_read(&priv->s_dma_busy)) {
 270                        qp->s_flags |= RVT_S_WAIT_DMA;
 271                        goto bail;
 272                }
 273                wqe = rvt_get_swqe_ptr(qp, qp->s_last);
 274                qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
 275                goto done;
 276        }
 277
 278        /* see post_one_send() */
 279        smp_read_barrier_depends();
 280        if (qp->s_cur == ACCESS_ONCE(qp->s_head))
 281                goto bail;
 282
 283        wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
 284        next_cur = qp->s_cur + 1;
 285        if (next_cur >= qp->s_size)
 286                next_cur = 0;
 287
 288        /* Construct the header. */
 289        ibp = to_iport(qp->ibqp.device, qp->port_num);
 290        ppd = ppd_from_ibp(ibp);
 291        ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
 292        if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
 293                if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE))
 294                        this_cpu_inc(ibp->pmastats->n_multicast_xmit);
 295                else
 296                        this_cpu_inc(ibp->pmastats->n_unicast_xmit);
 297        } else {
 298                this_cpu_inc(ibp->pmastats->n_unicast_xmit);
 299                lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
 300                if (unlikely(lid == ppd->lid)) {
 301                        unsigned long tflags = *flags;
 302                        /*
 303                         * If DMAs are in progress, we can't generate
 304                         * a completion for the loopback packet since
 305                         * it would be out of order.
 306                         * XXX Instead of waiting, we could queue a
 307                         * zero length descriptor so we get a callback.
 308                         */
 309                        if (atomic_read(&priv->s_dma_busy)) {
 310                                qp->s_flags |= RVT_S_WAIT_DMA;
 311                                goto bail;
 312                        }
 313                        qp->s_cur = next_cur;
 314                        spin_unlock_irqrestore(&qp->s_lock, tflags);
 315                        qib_ud_loopback(qp, wqe);
 316                        spin_lock_irqsave(&qp->s_lock, tflags);
 317                        *flags = tflags;
 318                        qib_send_complete(qp, wqe, IB_WC_SUCCESS);
 319                        goto done;
 320                }
 321        }
 322
 323        qp->s_cur = next_cur;
 324        extra_bytes = -wqe->length & 3;
 325        nwords = (wqe->length + extra_bytes) >> 2;
 326
 327        /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
 328        qp->s_hdrwords = 7;
 329        qp->s_cur_size = wqe->length;
 330        qp->s_cur_sge = &qp->s_sge;
 331        qp->s_srate = ah_attr->static_rate;
 332        qp->s_wqe = wqe;
 333        qp->s_sge.sge = wqe->sg_list[0];
 334        qp->s_sge.sg_list = wqe->sg_list + 1;
 335        qp->s_sge.num_sge = wqe->wr.num_sge;
 336        qp->s_sge.total_len = wqe->length;
 337
 338        if (ah_attr->ah_flags & IB_AH_GRH) {
 339                /* Header size in 32-bit words. */
 340                qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
 341                                               &ah_attr->grh,
 342                                               qp->s_hdrwords, nwords);
 343                lrh0 = QIB_LRH_GRH;
 344                ohdr = &priv->s_hdr->u.l.oth;
 345                /*
 346                 * Don't worry about sending to locally attached multicast
 347                 * QPs.  It is unspecified by the spec. what happens.
 348                 */
 349        } else {
 350                /* Header size in 32-bit words. */
 351                lrh0 = QIB_LRH_BTH;
 352                ohdr = &priv->s_hdr->u.oth;
 353        }
 354        if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
 355                qp->s_hdrwords++;
 356                ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
 357                bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
 358        } else
 359                bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
 360        lrh0 |= ah_attr->sl << 4;
 361        if (qp->ibqp.qp_type == IB_QPT_SMI)
 362                lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
 363        else
 364                lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
 365        priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
 366        priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
 367        priv->s_hdr->lrh[2] =
 368                        cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
 369        lid = ppd->lid;
 370        if (lid) {
 371                lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
 372                priv->s_hdr->lrh[3] = cpu_to_be16(lid);
 373        } else
 374                priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
 375        if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 376                bth0 |= IB_BTH_SOLICITED;
 377        bth0 |= extra_bytes << 20;
 378        bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
 379                qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
 380                             wqe->ud_wr.pkey_index : qp->s_pkey_index);
 381        ohdr->bth[0] = cpu_to_be32(bth0);
 382        /*
 383         * Use the multicast QP if the destination LID is a multicast LID.
 384         */
 385        ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
 386                ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ?
 387                cpu_to_be32(QIB_MULTICAST_QPN) :
 388                cpu_to_be32(wqe->ud_wr.remote_qpn);
 389        ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
 390        /*
 391         * Qkeys with the high order bit set mean use the
 392         * qkey from the QP context instead of the WR (see 10.2.5).
 393         */
 394        ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
 395                                         qp->qkey : wqe->ud_wr.remote_qkey);
 396        ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
 397
 398done:
 399        return 1;
 400bail:
 401        qp->s_flags &= ~RVT_S_BUSY;
 402        return ret;
 403}
 404
 405static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
 406{
 407        struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 408        struct qib_devdata *dd = ppd->dd;
 409        unsigned ctxt = ppd->hw_pidx;
 410        unsigned i;
 411
 412        pkey &= 0x7fff; /* remove limited/full membership bit */
 413
 414        for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
 415                if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
 416                        return i;
 417
 418        /*
 419         * Should not get here, this means hardware failed to validate pkeys.
 420         * Punt and return index 0.
 421         */
 422        return 0;
 423}
 424
 425/**
 426 * qib_ud_rcv - receive an incoming UD packet
 427 * @ibp: the port the packet came in on
 428 * @hdr: the packet header
 429 * @has_grh: true if the packet has a GRH
 430 * @data: the packet data
 431 * @tlen: the packet length
 432 * @qp: the QP the packet came on
 433 *
 434 * This is called from qib_qp_rcv() to process an incoming UD packet
 435 * for the given QP.
 436 * Called at interrupt level.
 437 */
 438void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 439                int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
 440{
 441        struct qib_other_headers *ohdr;
 442        int opcode;
 443        u32 hdrsize;
 444        u32 pad;
 445        struct ib_wc wc;
 446        u32 qkey;
 447        u32 src_qp;
 448        u16 dlid;
 449
 450        /* Check for GRH */
 451        if (!has_grh) {
 452                ohdr = &hdr->u.oth;
 453                hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
 454        } else {
 455                ohdr = &hdr->u.l.oth;
 456                hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
 457        }
 458        qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
 459        src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
 460
 461        /*
 462         * Get the number of bytes the message was padded by
 463         * and drop incomplete packets.
 464         */
 465        pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
 466        if (unlikely(tlen < (hdrsize + pad + 4)))
 467                goto drop;
 468
 469        tlen -= hdrsize + pad + 4;
 470
 471        /*
 472         * Check that the permissive LID is only used on QP0
 473         * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
 474         */
 475        if (qp->ibqp.qp_num) {
 476                if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
 477                             hdr->lrh[3] == IB_LID_PERMISSIVE))
 478                        goto drop;
 479                if (qp->ibqp.qp_num > 1) {
 480                        u16 pkey1, pkey2;
 481
 482                        pkey1 = be32_to_cpu(ohdr->bth[0]);
 483                        pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
 484                        if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
 485                                qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
 486                                              pkey1,
 487                                              (be16_to_cpu(hdr->lrh[0]) >> 4) &
 488                                                0xF,
 489                                              src_qp, qp->ibqp.qp_num,
 490                                              hdr->lrh[3], hdr->lrh[1]);
 491                                return;
 492                        }
 493                }
 494                if (unlikely(qkey != qp->qkey)) {
 495                        qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
 496                                      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 497                                      src_qp, qp->ibqp.qp_num,
 498                                      hdr->lrh[3], hdr->lrh[1]);
 499                        return;
 500                }
 501                /* Drop invalid MAD packets (see 13.5.3.1). */
 502                if (unlikely(qp->ibqp.qp_num == 1 &&
 503                             (tlen != 256 ||
 504                              (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
 505                        goto drop;
 506        } else {
 507                struct ib_smp *smp;
 508
 509                /* Drop invalid MAD packets (see 13.5.3.1). */
 510                if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
 511                        goto drop;
 512                smp = (struct ib_smp *) data;
 513                if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
 514                     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
 515                    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 516                        goto drop;
 517        }
 518
 519        /*
 520         * The opcode is in the low byte when its in network order
 521         * (top byte when in host order).
 522         */
 523        opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
 524        if (qp->ibqp.qp_num > 1 &&
 525            opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 526                wc.ex.imm_data = ohdr->u.ud.imm_data;
 527                wc.wc_flags = IB_WC_WITH_IMM;
 528                tlen -= sizeof(u32);
 529        } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 530                wc.ex.imm_data = 0;
 531                wc.wc_flags = 0;
 532        } else
 533                goto drop;
 534
 535        /*
 536         * A GRH is expected to precede the data even if not
 537         * present on the wire.
 538         */
 539        wc.byte_len = tlen + sizeof(struct ib_grh);
 540
 541        /*
 542         * Get the next work request entry to find where to put the data.
 543         */
 544        if (qp->r_flags & RVT_R_REUSE_SGE)
 545                qp->r_flags &= ~RVT_R_REUSE_SGE;
 546        else {
 547                int ret;
 548
 549                ret = qib_get_rwqe(qp, 0);
 550                if (ret < 0) {
 551                        qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
 552                        return;
 553                }
 554                if (!ret) {
 555                        if (qp->ibqp.qp_num == 0)
 556                                ibp->rvp.n_vl15_dropped++;
 557                        return;
 558                }
 559        }
 560        /* Silently drop packets which are too big. */
 561        if (unlikely(wc.byte_len > qp->r_len)) {
 562                qp->r_flags |= RVT_R_REUSE_SGE;
 563                goto drop;
 564        }
 565        if (has_grh) {
 566                qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
 567                             sizeof(struct ib_grh), 1);
 568                wc.wc_flags |= IB_WC_GRH;
 569        } else
 570                qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
 571        qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
 572        rvt_put_ss(&qp->r_sge);
 573        if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
 574                return;
 575        wc.wr_id = qp->r_wr_id;
 576        wc.status = IB_WC_SUCCESS;
 577        wc.opcode = IB_WC_RECV;
 578        wc.vendor_err = 0;
 579        wc.qp = &qp->ibqp;
 580        wc.src_qp = src_qp;
 581        wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
 582                qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
 583        wc.slid = be16_to_cpu(hdr->lrh[3]);
 584        wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
 585        dlid = be16_to_cpu(hdr->lrh[1]);
 586        /*
 587         * Save the LMC lower bits if the destination LID is a unicast LID.
 588         */
 589        wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
 590                dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
 591        wc.port_num = qp->port_num;
 592        /* Signal completion event if the solicited bit is set. */
 593        rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
 594                     (ohdr->bth[0] &
 595                        cpu_to_be32(IB_BTH_SOLICITED)) != 0);
 596        return;
 597
 598drop:
 599        ibp->rvp.n_pkt_drops++;
 600}
 601