linux/drivers/infiniband/hw/qib/qib_uc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
   3 * All rights reserved.
   4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include "qib.h"
  36
  37/* cut down ridiculously long IB macro names */
  38#define OP(x) IB_OPCODE_UC_##x
  39
  40/**
  41 * qib_make_uc_req - construct a request packet (SEND, RDMA write)
  42 * @qp: a pointer to the QP
  43 *
  44 * Assumes the s_lock is held.
  45 *
  46 * Return 1 if constructed; otherwise, return 0.
  47 */
  48int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
  49{
  50        struct qib_qp_priv *priv = qp->priv;
  51        struct ib_other_headers *ohdr;
  52        struct rvt_swqe *wqe;
  53        u32 hwords;
  54        u32 bth0;
  55        u32 len;
  56        u32 pmtu = qp->pmtu;
  57        int ret = 0;
  58
  59        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
  60                if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
  61                        goto bail;
  62                /* We are in the error state, flush the work request. */
  63                smp_read_barrier_depends(); /* see post_one_send() */
  64                if (qp->s_last == ACCESS_ONCE(qp->s_head))
  65                        goto bail;
  66                /* If DMAs are in progress, we can't flush immediately. */
  67                if (atomic_read(&priv->s_dma_busy)) {
  68                        qp->s_flags |= RVT_S_WAIT_DMA;
  69                        goto bail;
  70                }
  71                wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  72                qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
  73                goto done;
  74        }
  75
  76        ohdr = &priv->s_hdr->u.oth;
  77        if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
  78                ohdr = &priv->s_hdr->u.l.oth;
  79
  80        /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  81        hwords = 5;
  82        bth0 = 0;
  83
  84        /* Get the next send request. */
  85        wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  86        qp->s_wqe = NULL;
  87        switch (qp->s_state) {
  88        default:
  89                if (!(ib_rvt_state_ops[qp->state] &
  90                    RVT_PROCESS_NEXT_SEND_OK))
  91                        goto bail;
  92                /* Check if send work queue is empty. */
  93                smp_read_barrier_depends(); /* see post_one_send() */
  94                if (qp->s_cur == ACCESS_ONCE(qp->s_head))
  95                        goto bail;
  96                /*
  97                 * Start a new request.
  98                 */
  99                qp->s_psn = wqe->psn;
 100                qp->s_sge.sge = wqe->sg_list[0];
 101                qp->s_sge.sg_list = wqe->sg_list + 1;
 102                qp->s_sge.num_sge = wqe->wr.num_sge;
 103                qp->s_sge.total_len = wqe->length;
 104                len = wqe->length;
 105                qp->s_len = len;
 106                switch (wqe->wr.opcode) {
 107                case IB_WR_SEND:
 108                case IB_WR_SEND_WITH_IMM:
 109                        if (len > pmtu) {
 110                                qp->s_state = OP(SEND_FIRST);
 111                                len = pmtu;
 112                                break;
 113                        }
 114                        if (wqe->wr.opcode == IB_WR_SEND)
 115                                qp->s_state = OP(SEND_ONLY);
 116                        else {
 117                                qp->s_state =
 118                                        OP(SEND_ONLY_WITH_IMMEDIATE);
 119                                /* Immediate data comes after the BTH */
 120                                ohdr->u.imm_data = wqe->wr.ex.imm_data;
 121                                hwords += 1;
 122                        }
 123                        if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 124                                bth0 |= IB_BTH_SOLICITED;
 125                        qp->s_wqe = wqe;
 126                        if (++qp->s_cur >= qp->s_size)
 127                                qp->s_cur = 0;
 128                        break;
 129
 130                case IB_WR_RDMA_WRITE:
 131                case IB_WR_RDMA_WRITE_WITH_IMM:
 132                        ohdr->u.rc.reth.vaddr =
 133                                cpu_to_be64(wqe->rdma_wr.remote_addr);
 134                        ohdr->u.rc.reth.rkey =
 135                                cpu_to_be32(wqe->rdma_wr.rkey);
 136                        ohdr->u.rc.reth.length = cpu_to_be32(len);
 137                        hwords += sizeof(struct ib_reth) / 4;
 138                        if (len > pmtu) {
 139                                qp->s_state = OP(RDMA_WRITE_FIRST);
 140                                len = pmtu;
 141                                break;
 142                        }
 143                        if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
 144                                qp->s_state = OP(RDMA_WRITE_ONLY);
 145                        else {
 146                                qp->s_state =
 147                                        OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
 148                                /* Immediate data comes after the RETH */
 149                                ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
 150                                hwords += 1;
 151                                if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 152                                        bth0 |= IB_BTH_SOLICITED;
 153                        }
 154                        qp->s_wqe = wqe;
 155                        if (++qp->s_cur >= qp->s_size)
 156                                qp->s_cur = 0;
 157                        break;
 158
 159                default:
 160                        goto bail;
 161                }
 162                break;
 163
 164        case OP(SEND_FIRST):
 165                qp->s_state = OP(SEND_MIDDLE);
 166                /* FALLTHROUGH */
 167        case OP(SEND_MIDDLE):
 168                len = qp->s_len;
 169                if (len > pmtu) {
 170                        len = pmtu;
 171                        break;
 172                }
 173                if (wqe->wr.opcode == IB_WR_SEND)
 174                        qp->s_state = OP(SEND_LAST);
 175                else {
 176                        qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
 177                        /* Immediate data comes after the BTH */
 178                        ohdr->u.imm_data = wqe->wr.ex.imm_data;
 179                        hwords += 1;
 180                }
 181                if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 182                        bth0 |= IB_BTH_SOLICITED;
 183                qp->s_wqe = wqe;
 184                if (++qp->s_cur >= qp->s_size)
 185                        qp->s_cur = 0;
 186                break;
 187
 188        case OP(RDMA_WRITE_FIRST):
 189                qp->s_state = OP(RDMA_WRITE_MIDDLE);
 190                /* FALLTHROUGH */
 191        case OP(RDMA_WRITE_MIDDLE):
 192                len = qp->s_len;
 193                if (len > pmtu) {
 194                        len = pmtu;
 195                        break;
 196                }
 197                if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
 198                        qp->s_state = OP(RDMA_WRITE_LAST);
 199                else {
 200                        qp->s_state =
 201                                OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
 202                        /* Immediate data comes after the BTH */
 203                        ohdr->u.imm_data = wqe->wr.ex.imm_data;
 204                        hwords += 1;
 205                        if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 206                                bth0 |= IB_BTH_SOLICITED;
 207                }
 208                qp->s_wqe = wqe;
 209                if (++qp->s_cur >= qp->s_size)
 210                        qp->s_cur = 0;
 211                break;
 212        }
 213        qp->s_len -= len;
 214        qp->s_hdrwords = hwords;
 215        qp->s_cur_sge = &qp->s_sge;
 216        qp->s_cur_size = len;
 217        qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
 218                            qp->s_psn++ & QIB_PSN_MASK);
 219done:
 220        return 1;
 221bail:
 222        qp->s_flags &= ~RVT_S_BUSY;
 223        return ret;
 224}
 225
 226/**
 227 * qib_uc_rcv - handle an incoming UC packet
 228 * @ibp: the port the packet came in on
 229 * @hdr: the header of the packet
 230 * @has_grh: true if the packet has a GRH
 231 * @data: the packet data
 232 * @tlen: the length of the packet
 233 * @qp: the QP for this packet.
 234 *
 235 * This is called from qib_qp_rcv() to process an incoming UC packet
 236 * for the given QP.
 237 * Called at interrupt level.
 238 */
 239void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
 240                int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
 241{
 242        struct ib_other_headers *ohdr;
 243        u32 opcode;
 244        u32 hdrsize;
 245        u32 psn;
 246        u32 pad;
 247        struct ib_wc wc;
 248        u32 pmtu = qp->pmtu;
 249        struct ib_reth *reth;
 250        int ret;
 251
 252        /* Check for GRH */
 253        if (!has_grh) {
 254                ohdr = &hdr->u.oth;
 255                hdrsize = 8 + 12;       /* LRH + BTH */
 256        } else {
 257                ohdr = &hdr->u.l.oth;
 258                hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
 259        }
 260
 261        opcode = be32_to_cpu(ohdr->bth[0]);
 262        if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
 263                return;
 264
 265        psn = be32_to_cpu(ohdr->bth[2]);
 266        opcode >>= 24;
 267
 268        /* Compare the PSN verses the expected PSN. */
 269        if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
 270                /*
 271                 * Handle a sequence error.
 272                 * Silently drop any current message.
 273                 */
 274                qp->r_psn = psn;
 275inv:
 276                if (qp->r_state == OP(SEND_FIRST) ||
 277                    qp->r_state == OP(SEND_MIDDLE)) {
 278                        set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
 279                        qp->r_sge.num_sge = 0;
 280                } else
 281                        rvt_put_ss(&qp->r_sge);
 282                qp->r_state = OP(SEND_LAST);
 283                switch (opcode) {
 284                case OP(SEND_FIRST):
 285                case OP(SEND_ONLY):
 286                case OP(SEND_ONLY_WITH_IMMEDIATE):
 287                        goto send_first;
 288
 289                case OP(RDMA_WRITE_FIRST):
 290                case OP(RDMA_WRITE_ONLY):
 291                case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
 292                        goto rdma_first;
 293
 294                default:
 295                        goto drop;
 296                }
 297        }
 298
 299        /* Check for opcode sequence errors. */
 300        switch (qp->r_state) {
 301        case OP(SEND_FIRST):
 302        case OP(SEND_MIDDLE):
 303                if (opcode == OP(SEND_MIDDLE) ||
 304                    opcode == OP(SEND_LAST) ||
 305                    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
 306                        break;
 307                goto inv;
 308
 309        case OP(RDMA_WRITE_FIRST):
 310        case OP(RDMA_WRITE_MIDDLE):
 311                if (opcode == OP(RDMA_WRITE_MIDDLE) ||
 312                    opcode == OP(RDMA_WRITE_LAST) ||
 313                    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
 314                        break;
 315                goto inv;
 316
 317        default:
 318                if (opcode == OP(SEND_FIRST) ||
 319                    opcode == OP(SEND_ONLY) ||
 320                    opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
 321                    opcode == OP(RDMA_WRITE_FIRST) ||
 322                    opcode == OP(RDMA_WRITE_ONLY) ||
 323                    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
 324                        break;
 325                goto inv;
 326        }
 327
 328        if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
 329                rvt_comm_est(qp);
 330
 331        /* OK, process the packet. */
 332        switch (opcode) {
 333        case OP(SEND_FIRST):
 334        case OP(SEND_ONLY):
 335        case OP(SEND_ONLY_WITH_IMMEDIATE):
 336send_first:
 337                if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
 338                        qp->r_sge = qp->s_rdma_read_sge;
 339                else {
 340                        ret = qib_get_rwqe(qp, 0);
 341                        if (ret < 0)
 342                                goto op_err;
 343                        if (!ret)
 344                                goto drop;
 345                        /*
 346                         * qp->s_rdma_read_sge will be the owner
 347                         * of the mr references.
 348                         */
 349                        qp->s_rdma_read_sge = qp->r_sge;
 350                }
 351                qp->r_rcv_len = 0;
 352                if (opcode == OP(SEND_ONLY))
 353                        goto no_immediate_data;
 354                else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
 355                        goto send_last_imm;
 356                /* FALLTHROUGH */
 357        case OP(SEND_MIDDLE):
 358                /* Check for invalid length PMTU or posted rwqe len. */
 359                if (unlikely(tlen != (hdrsize + pmtu + 4)))
 360                        goto rewind;
 361                qp->r_rcv_len += pmtu;
 362                if (unlikely(qp->r_rcv_len > qp->r_len))
 363                        goto rewind;
 364                qib_copy_sge(&qp->r_sge, data, pmtu, 0);
 365                break;
 366
 367        case OP(SEND_LAST_WITH_IMMEDIATE):
 368send_last_imm:
 369                wc.ex.imm_data = ohdr->u.imm_data;
 370                hdrsize += 4;
 371                wc.wc_flags = IB_WC_WITH_IMM;
 372                goto send_last;
 373        case OP(SEND_LAST):
 374no_immediate_data:
 375                wc.ex.imm_data = 0;
 376                wc.wc_flags = 0;
 377send_last:
 378                /* Get the number of bytes the message was padded by. */
 379                pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
 380                /* Check for invalid length. */
 381                /* XXX LAST len should be >= 1 */
 382                if (unlikely(tlen < (hdrsize + pad + 4)))
 383                        goto rewind;
 384                /* Don't count the CRC. */
 385                tlen -= (hdrsize + pad + 4);
 386                wc.byte_len = tlen + qp->r_rcv_len;
 387                if (unlikely(wc.byte_len > qp->r_len))
 388                        goto rewind;
 389                wc.opcode = IB_WC_RECV;
 390                qib_copy_sge(&qp->r_sge, data, tlen, 0);
 391                rvt_put_ss(&qp->s_rdma_read_sge);
 392last_imm:
 393                wc.wr_id = qp->r_wr_id;
 394                wc.status = IB_WC_SUCCESS;
 395                wc.qp = &qp->ibqp;
 396                wc.src_qp = qp->remote_qpn;
 397                wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
 398                wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
 399                /* zero fields that are N/A */
 400                wc.vendor_err = 0;
 401                wc.pkey_index = 0;
 402                wc.dlid_path_bits = 0;
 403                wc.port_num = 0;
 404                /* Signal completion event if the solicited bit is set. */
 405                rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
 406                             (ohdr->bth[0] &
 407                                cpu_to_be32(IB_BTH_SOLICITED)) != 0);
 408                break;
 409
 410        case OP(RDMA_WRITE_FIRST):
 411        case OP(RDMA_WRITE_ONLY):
 412        case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
 413rdma_first:
 414                if (unlikely(!(qp->qp_access_flags &
 415                               IB_ACCESS_REMOTE_WRITE))) {
 416                        goto drop;
 417                }
 418                reth = &ohdr->u.rc.reth;
 419                hdrsize += sizeof(*reth);
 420                qp->r_len = be32_to_cpu(reth->length);
 421                qp->r_rcv_len = 0;
 422                qp->r_sge.sg_list = NULL;
 423                if (qp->r_len != 0) {
 424                        u32 rkey = be32_to_cpu(reth->rkey);
 425                        u64 vaddr = be64_to_cpu(reth->vaddr);
 426                        int ok;
 427
 428                        /* Check rkey */
 429                        ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
 430                                         vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
 431                        if (unlikely(!ok))
 432                                goto drop;
 433                        qp->r_sge.num_sge = 1;
 434                } else {
 435                        qp->r_sge.num_sge = 0;
 436                        qp->r_sge.sge.mr = NULL;
 437                        qp->r_sge.sge.vaddr = NULL;
 438                        qp->r_sge.sge.length = 0;
 439                        qp->r_sge.sge.sge_length = 0;
 440                }
 441                if (opcode == OP(RDMA_WRITE_ONLY))
 442                        goto rdma_last;
 443                else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
 444                        wc.ex.imm_data = ohdr->u.rc.imm_data;
 445                        goto rdma_last_imm;
 446                }
 447                /* FALLTHROUGH */
 448        case OP(RDMA_WRITE_MIDDLE):
 449                /* Check for invalid length PMTU or posted rwqe len. */
 450                if (unlikely(tlen != (hdrsize + pmtu + 4)))
 451                        goto drop;
 452                qp->r_rcv_len += pmtu;
 453                if (unlikely(qp->r_rcv_len > qp->r_len))
 454                        goto drop;
 455                qib_copy_sge(&qp->r_sge, data, pmtu, 1);
 456                break;
 457
 458        case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
 459                wc.ex.imm_data = ohdr->u.imm_data;
 460rdma_last_imm:
 461                hdrsize += 4;
 462                wc.wc_flags = IB_WC_WITH_IMM;
 463
 464                /* Get the number of bytes the message was padded by. */
 465                pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
 466                /* Check for invalid length. */
 467                /* XXX LAST len should be >= 1 */
 468                if (unlikely(tlen < (hdrsize + pad + 4)))
 469                        goto drop;
 470                /* Don't count the CRC. */
 471                tlen -= (hdrsize + pad + 4);
 472                if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
 473                        goto drop;
 474                if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
 475                        rvt_put_ss(&qp->s_rdma_read_sge);
 476                else {
 477                        ret = qib_get_rwqe(qp, 1);
 478                        if (ret < 0)
 479                                goto op_err;
 480                        if (!ret)
 481                                goto drop;
 482                }
 483                wc.byte_len = qp->r_len;
 484                wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
 485                qib_copy_sge(&qp->r_sge, data, tlen, 1);
 486                rvt_put_ss(&qp->r_sge);
 487                goto last_imm;
 488
 489        case OP(RDMA_WRITE_LAST):
 490rdma_last:
 491                /* Get the number of bytes the message was padded by. */
 492                pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
 493                /* Check for invalid length. */
 494                /* XXX LAST len should be >= 1 */
 495                if (unlikely(tlen < (hdrsize + pad + 4)))
 496                        goto drop;
 497                /* Don't count the CRC. */
 498                tlen -= (hdrsize + pad + 4);
 499                if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
 500                        goto drop;
 501                qib_copy_sge(&qp->r_sge, data, tlen, 1);
 502                rvt_put_ss(&qp->r_sge);
 503                break;
 504
 505        default:
 506                /* Drop packet for unknown opcodes. */
 507                goto drop;
 508        }
 509        qp->r_psn++;
 510        qp->r_state = opcode;
 511        return;
 512
 513rewind:
 514        set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
 515        qp->r_sge.num_sge = 0;
 516drop:
 517        ibp->rvp.n_pkt_drops++;
 518        return;
 519
 520op_err:
 521        rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
 522        return;
 523
 524}
 525