linux/drivers/infiniband/hw/qib/qib_ruc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
   3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/spinlock.h>
  35
  36#include "qib.h"
  37#include "qib_mad.h"
  38
  39/*
  40 * Convert the AETH RNR timeout code into the number of microseconds.
  41 */
  42const u32 ib_qib_rnr_table[32] = {
  43        655360, /* 00: 655.36 */
  44        10,     /* 01:    .01 */
  45        20,     /* 02     .02 */
  46        30,     /* 03:    .03 */
  47        40,     /* 04:    .04 */
  48        60,     /* 05:    .06 */
  49        80,     /* 06:    .08 */
  50        120,    /* 07:    .12 */
  51        160,    /* 08:    .16 */
  52        240,    /* 09:    .24 */
  53        320,    /* 0A:    .32 */
  54        480,    /* 0B:    .48 */
  55        640,    /* 0C:    .64 */
  56        960,    /* 0D:    .96 */
  57        1280,   /* 0E:   1.28 */
  58        1920,   /* 0F:   1.92 */
  59        2560,   /* 10:   2.56 */
  60        3840,   /* 11:   3.84 */
  61        5120,   /* 12:   5.12 */
  62        7680,   /* 13:   7.68 */
  63        10240,  /* 14:  10.24 */
  64        15360,  /* 15:  15.36 */
  65        20480,  /* 16:  20.48 */
  66        30720,  /* 17:  30.72 */
  67        40960,  /* 18:  40.96 */
  68        61440,  /* 19:  61.44 */
  69        81920,  /* 1A:  81.92 */
  70        122880, /* 1B: 122.88 */
  71        163840, /* 1C: 163.84 */
  72        245760, /* 1D: 245.76 */
  73        327680, /* 1E: 327.68 */
  74        491520  /* 1F: 491.52 */
  75};
  76
  77/*
  78 * Validate a RWQE and fill in the SGE state.
  79 * Return 1 if OK.
  80 */
  81static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
  82{
  83        int i, j, ret;
  84        struct ib_wc wc;
  85        struct qib_lkey_table *rkt;
  86        struct qib_pd *pd;
  87        struct qib_sge_state *ss;
  88
  89        rkt = &to_idev(qp->ibqp.device)->lk_table;
  90        pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
  91        ss = &qp->r_sge;
  92        ss->sg_list = qp->r_sg_list;
  93        qp->r_len = 0;
  94        for (i = j = 0; i < wqe->num_sge; i++) {
  95                if (wqe->sg_list[i].length == 0)
  96                        continue;
  97                /* Check LKEY */
  98                if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
  99                                 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
 100                        goto bad_lkey;
 101                qp->r_len += wqe->sg_list[i].length;
 102                j++;
 103        }
 104        ss->num_sge = j;
 105        ss->total_len = qp->r_len;
 106        ret = 1;
 107        goto bail;
 108
 109bad_lkey:
 110        while (j) {
 111                struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
 112
 113                atomic_dec(&sge->mr->refcount);
 114        }
 115        ss->num_sge = 0;
 116        memset(&wc, 0, sizeof(wc));
 117        wc.wr_id = wqe->wr_id;
 118        wc.status = IB_WC_LOC_PROT_ERR;
 119        wc.opcode = IB_WC_RECV;
 120        wc.qp = &qp->ibqp;
 121        /* Signal solicited completion event. */
 122        qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
 123        ret = 0;
 124bail:
 125        return ret;
 126}
 127
 128/**
 129 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
 130 * @qp: the QP
 131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
 132 *
 133 * Return -1 if there is a local error, 0 if no RWQE is available,
 134 * otherwise return 1.
 135 *
 136 * Can be called from interrupt level.
 137 */
 138int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
 139{
 140        unsigned long flags;
 141        struct qib_rq *rq;
 142        struct qib_rwq *wq;
 143        struct qib_srq *srq;
 144        struct qib_rwqe *wqe;
 145        void (*handler)(struct ib_event *, void *);
 146        u32 tail;
 147        int ret;
 148
 149        if (qp->ibqp.srq) {
 150                srq = to_isrq(qp->ibqp.srq);
 151                handler = srq->ibsrq.event_handler;
 152                rq = &srq->rq;
 153        } else {
 154                srq = NULL;
 155                handler = NULL;
 156                rq = &qp->r_rq;
 157        }
 158
 159        spin_lock_irqsave(&rq->lock, flags);
 160        if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
 161                ret = 0;
 162                goto unlock;
 163        }
 164
 165        wq = rq->wq;
 166        tail = wq->tail;
 167        /* Validate tail before using it since it is user writable. */
 168        if (tail >= rq->size)
 169                tail = 0;
 170        if (unlikely(tail == wq->head)) {
 171                ret = 0;
 172                goto unlock;
 173        }
 174        /* Make sure entry is read after head index is read. */
 175        smp_rmb();
 176        wqe = get_rwqe_ptr(rq, tail);
 177        /*
 178         * Even though we update the tail index in memory, the verbs
 179         * consumer is not supposed to post more entries until a
 180         * completion is generated.
 181         */
 182        if (++tail >= rq->size)
 183                tail = 0;
 184        wq->tail = tail;
 185        if (!wr_id_only && !qib_init_sge(qp, wqe)) {
 186                ret = -1;
 187                goto unlock;
 188        }
 189        qp->r_wr_id = wqe->wr_id;
 190
 191        ret = 1;
 192        set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
 193        if (handler) {
 194                u32 n;
 195
 196                /*
 197                 * Validate head pointer value and compute
 198                 * the number of remaining WQEs.
 199                 */
 200                n = wq->head;
 201                if (n >= rq->size)
 202                        n = 0;
 203                if (n < tail)
 204                        n += rq->size - tail;
 205                else
 206                        n -= tail;
 207                if (n < srq->limit) {
 208                        struct ib_event ev;
 209
 210                        srq->limit = 0;
 211                        spin_unlock_irqrestore(&rq->lock, flags);
 212                        ev.device = qp->ibqp.device;
 213                        ev.element.srq = qp->ibqp.srq;
 214                        ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
 215                        handler(&ev, srq->ibsrq.srq_context);
 216                        goto bail;
 217                }
 218        }
 219unlock:
 220        spin_unlock_irqrestore(&rq->lock, flags);
 221bail:
 222        return ret;
 223}
 224
 225/*
 226 * Switch to alternate path.
 227 * The QP s_lock should be held and interrupts disabled.
 228 */
 229void qib_migrate_qp(struct qib_qp *qp)
 230{
 231        struct ib_event ev;
 232
 233        qp->s_mig_state = IB_MIG_MIGRATED;
 234        qp->remote_ah_attr = qp->alt_ah_attr;
 235        qp->port_num = qp->alt_ah_attr.port_num;
 236        qp->s_pkey_index = qp->s_alt_pkey_index;
 237
 238        ev.device = qp->ibqp.device;
 239        ev.element.qp = &qp->ibqp;
 240        ev.event = IB_EVENT_PATH_MIG;
 241        qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
 242}
 243
 244static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
 245{
 246        if (!index) {
 247                struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 248
 249                return ppd->guid;
 250        } else
 251                return ibp->guids[index - 1];
 252}
 253
 254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
 255{
 256        return (gid->global.interface_id == id &&
 257                (gid->global.subnet_prefix == gid_prefix ||
 258                 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
 259}
 260
 261/*
 262 *
 263 * This should be called with the QP r_lock held.
 264 *
 265 * The s_lock will be acquired around the qib_migrate_qp() call.
 266 */
 267int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 268                      int has_grh, struct qib_qp *qp, u32 bth0)
 269{
 270        __be64 guid;
 271        unsigned long flags;
 272
 273        if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
 274                if (!has_grh) {
 275                        if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
 276                                goto err;
 277                } else {
 278                        if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
 279                                goto err;
 280                        guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
 281                        if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
 282                                goto err;
 283                        if (!gid_ok(&hdr->u.l.grh.sgid,
 284                            qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
 285                            qp->alt_ah_attr.grh.dgid.global.interface_id))
 286                                goto err;
 287                }
 288                if (!qib_pkey_ok((u16)bth0,
 289                                 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
 290                        qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
 291                                      (u16)bth0,
 292                                      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 293                                      0, qp->ibqp.qp_num,
 294                                      hdr->lrh[3], hdr->lrh[1]);
 295                        goto err;
 296                }
 297                /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
 298                if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
 299                    ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
 300                        goto err;
 301                spin_lock_irqsave(&qp->s_lock, flags);
 302                qib_migrate_qp(qp);
 303                spin_unlock_irqrestore(&qp->s_lock, flags);
 304        } else {
 305                if (!has_grh) {
 306                        if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
 307                                goto err;
 308                } else {
 309                        if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
 310                                goto err;
 311                        guid = get_sguid(ibp,
 312                                         qp->remote_ah_attr.grh.sgid_index);
 313                        if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
 314                                goto err;
 315                        if (!gid_ok(&hdr->u.l.grh.sgid,
 316                            qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
 317                            qp->remote_ah_attr.grh.dgid.global.interface_id))
 318                                goto err;
 319                }
 320                if (!qib_pkey_ok((u16)bth0,
 321                                 qib_get_pkey(ibp, qp->s_pkey_index))) {
 322                        qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
 323                                      (u16)bth0,
 324                                      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 325                                      0, qp->ibqp.qp_num,
 326                                      hdr->lrh[3], hdr->lrh[1]);
 327                        goto err;
 328                }
 329                /* Validate the SLID. See Ch. 9.6.1.5 */
 330                if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
 331                    ppd_from_ibp(ibp)->port != qp->port_num)
 332                        goto err;
 333                if (qp->s_mig_state == IB_MIG_REARM &&
 334                    !(bth0 & IB_BTH_MIG_REQ))
 335                        qp->s_mig_state = IB_MIG_ARMED;
 336        }
 337
 338        return 0;
 339
 340err:
 341        return 1;
 342}
 343
 344/**
 345 * qib_ruc_loopback - handle UC and RC lookback requests
 346 * @sqp: the sending QP
 347 *
 348 * This is called from qib_do_send() to
 349 * forward a WQE addressed to the same HCA.
 350 * Note that although we are single threaded due to the tasklet, we still
 351 * have to protect against post_send().  We don't have to worry about
 352 * receive interrupts since this is a connected protocol and all packets
 353 * will pass through here.
 354 */
 355static void qib_ruc_loopback(struct qib_qp *sqp)
 356{
 357        struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
 358        struct qib_qp *qp;
 359        struct qib_swqe *wqe;
 360        struct qib_sge *sge;
 361        unsigned long flags;
 362        struct ib_wc wc;
 363        u64 sdata;
 364        atomic64_t *maddr;
 365        enum ib_wc_status send_status;
 366        int release;
 367        int ret;
 368
 369        /*
 370         * Note that we check the responder QP state after
 371         * checking the requester's state.
 372         */
 373        qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
 374
 375        spin_lock_irqsave(&sqp->s_lock, flags);
 376
 377        /* Return if we are already busy processing a work request. */
 378        if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
 379            !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
 380                goto unlock;
 381
 382        sqp->s_flags |= QIB_S_BUSY;
 383
 384again:
 385        if (sqp->s_last == sqp->s_head)
 386                goto clr_busy;
 387        wqe = get_swqe_ptr(sqp, sqp->s_last);
 388
 389        /* Return if it is not OK to start a new work reqeust. */
 390        if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
 391                if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
 392                        goto clr_busy;
 393                /* We are in the error state, flush the work request. */
 394                send_status = IB_WC_WR_FLUSH_ERR;
 395                goto flush_send;
 396        }
 397
 398        /*
 399         * We can rely on the entry not changing without the s_lock
 400         * being held until we update s_last.
 401         * We increment s_cur to indicate s_last is in progress.
 402         */
 403        if (sqp->s_last == sqp->s_cur) {
 404                if (++sqp->s_cur >= sqp->s_size)
 405                        sqp->s_cur = 0;
 406        }
 407        spin_unlock_irqrestore(&sqp->s_lock, flags);
 408
 409        if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
 410            qp->ibqp.qp_type != sqp->ibqp.qp_type) {
 411                ibp->n_pkt_drops++;
 412                /*
 413                 * For RC, the requester would timeout and retry so
 414                 * shortcut the timeouts and just signal too many retries.
 415                 */
 416                if (sqp->ibqp.qp_type == IB_QPT_RC)
 417                        send_status = IB_WC_RETRY_EXC_ERR;
 418                else
 419                        send_status = IB_WC_SUCCESS;
 420                goto serr;
 421        }
 422
 423        memset(&wc, 0, sizeof wc);
 424        send_status = IB_WC_SUCCESS;
 425
 426        release = 1;
 427        sqp->s_sge.sge = wqe->sg_list[0];
 428        sqp->s_sge.sg_list = wqe->sg_list + 1;
 429        sqp->s_sge.num_sge = wqe->wr.num_sge;
 430        sqp->s_len = wqe->length;
 431        switch (wqe->wr.opcode) {
 432        case IB_WR_SEND_WITH_IMM:
 433                wc.wc_flags = IB_WC_WITH_IMM;
 434                wc.ex.imm_data = wqe->wr.ex.imm_data;
 435                /* FALLTHROUGH */
 436        case IB_WR_SEND:
 437                ret = qib_get_rwqe(qp, 0);
 438                if (ret < 0)
 439                        goto op_err;
 440                if (!ret)
 441                        goto rnr_nak;
 442                break;
 443
 444        case IB_WR_RDMA_WRITE_WITH_IMM:
 445                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
 446                        goto inv_err;
 447                wc.wc_flags = IB_WC_WITH_IMM;
 448                wc.ex.imm_data = wqe->wr.ex.imm_data;
 449                ret = qib_get_rwqe(qp, 1);
 450                if (ret < 0)
 451                        goto op_err;
 452                if (!ret)
 453                        goto rnr_nak;
 454                /* FALLTHROUGH */
 455        case IB_WR_RDMA_WRITE:
 456                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
 457                        goto inv_err;
 458                if (wqe->length == 0)
 459                        break;
 460                if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
 461                                          wqe->wr.wr.rdma.remote_addr,
 462                                          wqe->wr.wr.rdma.rkey,
 463                                          IB_ACCESS_REMOTE_WRITE)))
 464                        goto acc_err;
 465                qp->r_sge.sg_list = NULL;
 466                qp->r_sge.num_sge = 1;
 467                qp->r_sge.total_len = wqe->length;
 468                break;
 469
 470        case IB_WR_RDMA_READ:
 471                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
 472                        goto inv_err;
 473                if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
 474                                          wqe->wr.wr.rdma.remote_addr,
 475                                          wqe->wr.wr.rdma.rkey,
 476                                          IB_ACCESS_REMOTE_READ)))
 477                        goto acc_err;
 478                release = 0;
 479                sqp->s_sge.sg_list = NULL;
 480                sqp->s_sge.num_sge = 1;
 481                qp->r_sge.sge = wqe->sg_list[0];
 482                qp->r_sge.sg_list = wqe->sg_list + 1;
 483                qp->r_sge.num_sge = wqe->wr.num_sge;
 484                qp->r_sge.total_len = wqe->length;
 485                break;
 486
 487        case IB_WR_ATOMIC_CMP_AND_SWP:
 488        case IB_WR_ATOMIC_FETCH_AND_ADD:
 489                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
 490                        goto inv_err;
 491                if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
 492                                          wqe->wr.wr.atomic.remote_addr,
 493                                          wqe->wr.wr.atomic.rkey,
 494                                          IB_ACCESS_REMOTE_ATOMIC)))
 495                        goto acc_err;
 496                /* Perform atomic OP and save result. */
 497                maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
 498                sdata = wqe->wr.wr.atomic.compare_add;
 499                *(u64 *) sqp->s_sge.sge.vaddr =
 500                        (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
 501                        (u64) atomic64_add_return(sdata, maddr) - sdata :
 502                        (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
 503                                      sdata, wqe->wr.wr.atomic.swap);
 504                atomic_dec(&qp->r_sge.sge.mr->refcount);
 505                qp->r_sge.num_sge = 0;
 506                goto send_comp;
 507
 508        default:
 509                send_status = IB_WC_LOC_QP_OP_ERR;
 510                goto serr;
 511        }
 512
 513        sge = &sqp->s_sge.sge;
 514        while (sqp->s_len) {
 515                u32 len = sqp->s_len;
 516
 517                if (len > sge->length)
 518                        len = sge->length;
 519                if (len > sge->sge_length)
 520                        len = sge->sge_length;
 521                BUG_ON(len == 0);
 522                qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
 523                sge->vaddr += len;
 524                sge->length -= len;
 525                sge->sge_length -= len;
 526                if (sge->sge_length == 0) {
 527                        if (!release)
 528                                atomic_dec(&sge->mr->refcount);
 529                        if (--sqp->s_sge.num_sge)
 530                                *sge = *sqp->s_sge.sg_list++;
 531                } else if (sge->length == 0 && sge->mr->lkey) {
 532                        if (++sge->n >= QIB_SEGSZ) {
 533                                if (++sge->m >= sge->mr->mapsz)
 534                                        break;
 535                                sge->n = 0;
 536                        }
 537                        sge->vaddr =
 538                                sge->mr->map[sge->m]->segs[sge->n].vaddr;
 539                        sge->length =
 540                                sge->mr->map[sge->m]->segs[sge->n].length;
 541                }
 542                sqp->s_len -= len;
 543        }
 544        if (release)
 545                while (qp->r_sge.num_sge) {
 546                        atomic_dec(&qp->r_sge.sge.mr->refcount);
 547                        if (--qp->r_sge.num_sge)
 548                                qp->r_sge.sge = *qp->r_sge.sg_list++;
 549                }
 550
 551        if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
 552                goto send_comp;
 553
 554        if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
 555                wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
 556        else
 557                wc.opcode = IB_WC_RECV;
 558        wc.wr_id = qp->r_wr_id;
 559        wc.status = IB_WC_SUCCESS;
 560        wc.byte_len = wqe->length;
 561        wc.qp = &qp->ibqp;
 562        wc.src_qp = qp->remote_qpn;
 563        wc.slid = qp->remote_ah_attr.dlid;
 564        wc.sl = qp->remote_ah_attr.sl;
 565        wc.port_num = 1;
 566        /* Signal completion event if the solicited bit is set. */
 567        qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 568                       wqe->wr.send_flags & IB_SEND_SOLICITED);
 569
 570send_comp:
 571        spin_lock_irqsave(&sqp->s_lock, flags);
 572        ibp->n_loop_pkts++;
 573flush_send:
 574        sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
 575        qib_send_complete(sqp, wqe, send_status);
 576        goto again;
 577
 578rnr_nak:
 579        /* Handle RNR NAK */
 580        if (qp->ibqp.qp_type == IB_QPT_UC)
 581                goto send_comp;
 582        ibp->n_rnr_naks++;
 583        /*
 584         * Note: we don't need the s_lock held since the BUSY flag
 585         * makes this single threaded.
 586         */
 587        if (sqp->s_rnr_retry == 0) {
 588                send_status = IB_WC_RNR_RETRY_EXC_ERR;
 589                goto serr;
 590        }
 591        if (sqp->s_rnr_retry_cnt < 7)
 592                sqp->s_rnr_retry--;
 593        spin_lock_irqsave(&sqp->s_lock, flags);
 594        if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
 595                goto clr_busy;
 596        sqp->s_flags |= QIB_S_WAIT_RNR;
 597        sqp->s_timer.function = qib_rc_rnr_retry;
 598        sqp->s_timer.expires = jiffies +
 599                usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
 600        add_timer(&sqp->s_timer);
 601        goto clr_busy;
 602
 603op_err:
 604        send_status = IB_WC_REM_OP_ERR;
 605        wc.status = IB_WC_LOC_QP_OP_ERR;
 606        goto err;
 607
 608inv_err:
 609        send_status = IB_WC_REM_INV_REQ_ERR;
 610        wc.status = IB_WC_LOC_QP_OP_ERR;
 611        goto err;
 612
 613acc_err:
 614        send_status = IB_WC_REM_ACCESS_ERR;
 615        wc.status = IB_WC_LOC_PROT_ERR;
 616err:
 617        /* responder goes to error state */
 618        qib_rc_error(qp, wc.status);
 619
 620serr:
 621        spin_lock_irqsave(&sqp->s_lock, flags);
 622        qib_send_complete(sqp, wqe, send_status);
 623        if (sqp->ibqp.qp_type == IB_QPT_RC) {
 624                int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
 625
 626                sqp->s_flags &= ~QIB_S_BUSY;
 627                spin_unlock_irqrestore(&sqp->s_lock, flags);
 628                if (lastwqe) {
 629                        struct ib_event ev;
 630
 631                        ev.device = sqp->ibqp.device;
 632                        ev.element.qp = &sqp->ibqp;
 633                        ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
 634                        sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
 635                }
 636                goto done;
 637        }
 638clr_busy:
 639        sqp->s_flags &= ~QIB_S_BUSY;
 640unlock:
 641        spin_unlock_irqrestore(&sqp->s_lock, flags);
 642done:
 643        if (qp && atomic_dec_and_test(&qp->refcount))
 644                wake_up(&qp->wait);
 645}
 646
 647/**
 648 * qib_make_grh - construct a GRH header
 649 * @ibp: a pointer to the IB port
 650 * @hdr: a pointer to the GRH header being constructed
 651 * @grh: the global route address to send to
 652 * @hwords: the number of 32 bit words of header being sent
 653 * @nwords: the number of 32 bit words of data being sent
 654 *
 655 * Return the size of the header in 32 bit words.
 656 */
 657u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
 658                 struct ib_global_route *grh, u32 hwords, u32 nwords)
 659{
 660        hdr->version_tclass_flow =
 661                cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
 662                            (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
 663                            (grh->flow_label << IB_GRH_FLOW_SHIFT));
 664        hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
 665        /* next_hdr is defined by C8-7 in ch. 8.4.1 */
 666        hdr->next_hdr = IB_GRH_NEXT_HDR;
 667        hdr->hop_limit = grh->hop_limit;
 668        /* The SGID is 32-bit aligned. */
 669        hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
 670        hdr->sgid.global.interface_id = grh->sgid_index ?
 671                ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
 672        hdr->dgid = grh->dgid;
 673
 674        /* GRH header size in 32-bit words. */
 675        return sizeof(struct ib_grh) / sizeof(u32);
 676}
 677
 678void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
 679                         u32 bth0, u32 bth2)
 680{
 681        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
 682        u16 lrh0;
 683        u32 nwords;
 684        u32 extra_bytes;
 685
 686        /* Construct the header. */
 687        extra_bytes = -qp->s_cur_size & 3;
 688        nwords = (qp->s_cur_size + extra_bytes) >> 2;
 689        lrh0 = QIB_LRH_BTH;
 690        if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
 691                qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
 692                                               &qp->remote_ah_attr.grh,
 693                                               qp->s_hdrwords, nwords);
 694                lrh0 = QIB_LRH_GRH;
 695        }
 696        lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
 697                qp->remote_ah_attr.sl << 4;
 698        qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
 699        qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
 700        qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
 701        qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
 702                                       qp->remote_ah_attr.src_path_bits);
 703        bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
 704        bth0 |= extra_bytes << 20;
 705        if (qp->s_mig_state == IB_MIG_MIGRATED)
 706                bth0 |= IB_BTH_MIG_REQ;
 707        ohdr->bth[0] = cpu_to_be32(bth0);
 708        ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
 709        ohdr->bth[2] = cpu_to_be32(bth2);
 710}
 711
 712/**
 713 * qib_do_send - perform a send on a QP
 714 * @work: contains a pointer to the QP
 715 *
 716 * Process entries in the send work queue until credit or queue is
 717 * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
 718 * Otherwise, two threads could send packets out of order.
 719 */
 720void qib_do_send(struct work_struct *work)
 721{
 722        struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
 723        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
 724        struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 725        int (*make_req)(struct qib_qp *qp);
 726        unsigned long flags;
 727
 728        if ((qp->ibqp.qp_type == IB_QPT_RC ||
 729             qp->ibqp.qp_type == IB_QPT_UC) &&
 730            (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
 731                qib_ruc_loopback(qp);
 732                return;
 733        }
 734
 735        if (qp->ibqp.qp_type == IB_QPT_RC)
 736                make_req = qib_make_rc_req;
 737        else if (qp->ibqp.qp_type == IB_QPT_UC)
 738                make_req = qib_make_uc_req;
 739        else
 740                make_req = qib_make_ud_req;
 741
 742        spin_lock_irqsave(&qp->s_lock, flags);
 743
 744        /* Return if we are already busy processing a work request. */
 745        if (!qib_send_ok(qp)) {
 746                spin_unlock_irqrestore(&qp->s_lock, flags);
 747                return;
 748        }
 749
 750        qp->s_flags |= QIB_S_BUSY;
 751
 752        spin_unlock_irqrestore(&qp->s_lock, flags);
 753
 754        do {
 755                /* Check for a constructed packet to be sent. */
 756                if (qp->s_hdrwords != 0) {
 757                        /*
 758                         * If the packet cannot be sent now, return and
 759                         * the send tasklet will be woken up later.
 760                         */
 761                        if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
 762                                           qp->s_cur_sge, qp->s_cur_size))
 763                                break;
 764                        /* Record that s_hdr is empty. */
 765                        qp->s_hdrwords = 0;
 766                }
 767        } while (make_req(qp));
 768}
 769
 770/*
 771 * This should be called with s_lock held.
 772 */
 773void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
 774                       enum ib_wc_status status)
 775{
 776        u32 old_last, last;
 777        unsigned i;
 778
 779        if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
 780                return;
 781
 782        for (i = 0; i < wqe->wr.num_sge; i++) {
 783                struct qib_sge *sge = &wqe->sg_list[i];
 784
 785                atomic_dec(&sge->mr->refcount);
 786        }
 787        if (qp->ibqp.qp_type == IB_QPT_UD ||
 788            qp->ibqp.qp_type == IB_QPT_SMI ||
 789            qp->ibqp.qp_type == IB_QPT_GSI)
 790                atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
 791
 792        /* See ch. 11.2.4.1 and 10.7.3.1 */
 793        if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
 794            (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 795            status != IB_WC_SUCCESS) {
 796                struct ib_wc wc;
 797
 798                memset(&wc, 0, sizeof wc);
 799                wc.wr_id = wqe->wr.wr_id;
 800                wc.status = status;
 801                wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
 802                wc.qp = &qp->ibqp;
 803                if (status == IB_WC_SUCCESS)
 804                        wc.byte_len = wqe->length;
 805                qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
 806                             status != IB_WC_SUCCESS);
 807        }
 808
 809        last = qp->s_last;
 810        old_last = last;
 811        if (++last >= qp->s_size)
 812                last = 0;
 813        qp->s_last = last;
 814        if (qp->s_acked == old_last)
 815                qp->s_acked = last;
 816        if (qp->s_cur == old_last)
 817                qp->s_cur = last;
 818        if (qp->s_tail == old_last)
 819                qp->s_tail = last;
 820        if (qp->state == IB_QPS_SQD && last == qp->s_cur)
 821                qp->s_draining = 0;
 822}
 823