linux/drivers/infiniband/sw/rxe/rxe_req.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/skbuff.h>
  35
  36#include "rxe.h"
  37#include "rxe_loc.h"
  38#include "rxe_queue.h"
  39
  40static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
  41                       u32 opcode);
  42
  43static inline void retry_first_write_send(struct rxe_qp *qp,
  44                                          struct rxe_send_wqe *wqe,
  45                                          unsigned mask, int npsn)
  46{
  47        int i;
  48
  49        for (i = 0; i < npsn; i++) {
  50                int to_send = (wqe->dma.resid > qp->mtu) ?
  51                                qp->mtu : wqe->dma.resid;
  52
  53                qp->req.opcode = next_opcode(qp, wqe,
  54                                             wqe->wr.opcode);
  55
  56                if (wqe->wr.send_flags & IB_SEND_INLINE) {
  57                        wqe->dma.resid -= to_send;
  58                        wqe->dma.sge_offset += to_send;
  59                } else {
  60                        advance_dma_data(&wqe->dma, to_send);
  61                }
  62                if (mask & WR_WRITE_MASK)
  63                        wqe->iova += qp->mtu;
  64        }
  65}
  66
  67static void req_retry(struct rxe_qp *qp)
  68{
  69        struct rxe_send_wqe *wqe;
  70        unsigned int wqe_index;
  71        unsigned int mask;
  72        int npsn;
  73        int first = 1;
  74
  75        wqe = queue_head(qp->sq.queue);
  76        npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
  77
  78        qp->req.wqe_index       = consumer_index(qp->sq.queue);
  79        qp->req.psn             = qp->comp.psn;
  80        qp->req.opcode          = -1;
  81
  82        for (wqe_index = consumer_index(qp->sq.queue);
  83                wqe_index != producer_index(qp->sq.queue);
  84                wqe_index = next_index(qp->sq.queue, wqe_index)) {
  85                wqe = addr_from_index(qp->sq.queue, wqe_index);
  86                mask = wr_opcode_mask(wqe->wr.opcode, qp);
  87
  88                if (wqe->state == wqe_state_posted)
  89                        break;
  90
  91                if (wqe->state == wqe_state_done)
  92                        continue;
  93
  94                wqe->iova = (mask & WR_ATOMIC_MASK) ?
  95                             wqe->wr.wr.atomic.remote_addr :
  96                             (mask & WR_READ_OR_WRITE_MASK) ?
  97                             wqe->wr.wr.rdma.remote_addr :
  98                             0;
  99
 100                if (!first || (mask & WR_READ_MASK) == 0) {
 101                        wqe->dma.resid = wqe->dma.length;
 102                        wqe->dma.cur_sge = 0;
 103                        wqe->dma.sge_offset = 0;
 104                }
 105
 106                if (first) {
 107                        first = 0;
 108
 109                        if (mask & WR_WRITE_OR_SEND_MASK)
 110                                retry_first_write_send(qp, wqe, mask, npsn);
 111
 112                        if (mask & WR_READ_MASK)
 113                                wqe->iova += npsn * qp->mtu;
 114                }
 115
 116                wqe->state = wqe_state_posted;
 117        }
 118}
 119
 120void rnr_nak_timer(unsigned long data)
 121{
 122        struct rxe_qp *qp = (struct rxe_qp *)data;
 123
 124        pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
 125        rxe_run_task(&qp->req.task, 1);
 126}
 127
 128static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
 129{
 130        struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
 131        unsigned long flags;
 132
 133        if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
 134                /* check to see if we are drained;
 135                 * state_lock used by requester and completer
 136                 */
 137                spin_lock_irqsave(&qp->state_lock, flags);
 138                do {
 139                        if (qp->req.state != QP_STATE_DRAIN) {
 140                                /* comp just finished */
 141                                spin_unlock_irqrestore(&qp->state_lock,
 142                                                       flags);
 143                                break;
 144                        }
 145
 146                        if (wqe && ((qp->req.wqe_index !=
 147                                consumer_index(qp->sq.queue)) ||
 148                                (wqe->state != wqe_state_posted))) {
 149                                /* comp not done yet */
 150                                spin_unlock_irqrestore(&qp->state_lock,
 151                                                       flags);
 152                                break;
 153                        }
 154
 155                        qp->req.state = QP_STATE_DRAINED;
 156                        spin_unlock_irqrestore(&qp->state_lock, flags);
 157
 158                        if (qp->ibqp.event_handler) {
 159                                struct ib_event ev;
 160
 161                                ev.device = qp->ibqp.device;
 162                                ev.element.qp = &qp->ibqp;
 163                                ev.event = IB_EVENT_SQ_DRAINED;
 164                                qp->ibqp.event_handler(&ev,
 165                                        qp->ibqp.qp_context);
 166                        }
 167                } while (0);
 168        }
 169
 170        if (qp->req.wqe_index == producer_index(qp->sq.queue))
 171                return NULL;
 172
 173        wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
 174
 175        if (unlikely((qp->req.state == QP_STATE_DRAIN ||
 176                      qp->req.state == QP_STATE_DRAINED) &&
 177                     (wqe->state != wqe_state_processing)))
 178                return NULL;
 179
 180        if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
 181                     (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
 182                qp->req.wait_fence = 1;
 183                return NULL;
 184        }
 185
 186        wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
 187        return wqe;
 188}
 189
 190static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
 191{
 192        switch (opcode) {
 193        case IB_WR_RDMA_WRITE:
 194                if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
 195                    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
 196                        return fits ?
 197                                IB_OPCODE_RC_RDMA_WRITE_LAST :
 198                                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
 199                else
 200                        return fits ?
 201                                IB_OPCODE_RC_RDMA_WRITE_ONLY :
 202                                IB_OPCODE_RC_RDMA_WRITE_FIRST;
 203
 204        case IB_WR_RDMA_WRITE_WITH_IMM:
 205                if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
 206                    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
 207                        return fits ?
 208                                IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
 209                                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
 210                else
 211                        return fits ?
 212                                IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
 213                                IB_OPCODE_RC_RDMA_WRITE_FIRST;
 214
 215        case IB_WR_SEND:
 216                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 217                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 218                        return fits ?
 219                                IB_OPCODE_RC_SEND_LAST :
 220                                IB_OPCODE_RC_SEND_MIDDLE;
 221                else
 222                        return fits ?
 223                                IB_OPCODE_RC_SEND_ONLY :
 224                                IB_OPCODE_RC_SEND_FIRST;
 225
 226        case IB_WR_SEND_WITH_IMM:
 227                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 228                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 229                        return fits ?
 230                                IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
 231                                IB_OPCODE_RC_SEND_MIDDLE;
 232                else
 233                        return fits ?
 234                                IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
 235                                IB_OPCODE_RC_SEND_FIRST;
 236
 237        case IB_WR_RDMA_READ:
 238                return IB_OPCODE_RC_RDMA_READ_REQUEST;
 239
 240        case IB_WR_ATOMIC_CMP_AND_SWP:
 241                return IB_OPCODE_RC_COMPARE_SWAP;
 242
 243        case IB_WR_ATOMIC_FETCH_AND_ADD:
 244                return IB_OPCODE_RC_FETCH_ADD;
 245
 246        case IB_WR_SEND_WITH_INV:
 247                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 248                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 249                        return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
 250                                IB_OPCODE_RC_SEND_MIDDLE;
 251                else
 252                        return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
 253                                IB_OPCODE_RC_SEND_FIRST;
 254        case IB_WR_REG_MR:
 255        case IB_WR_LOCAL_INV:
 256                return opcode;
 257        }
 258
 259        return -EINVAL;
 260}
 261
 262static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
 263{
 264        switch (opcode) {
 265        case IB_WR_RDMA_WRITE:
 266                if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
 267                    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
 268                        return fits ?
 269                                IB_OPCODE_UC_RDMA_WRITE_LAST :
 270                                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
 271                else
 272                        return fits ?
 273                                IB_OPCODE_UC_RDMA_WRITE_ONLY :
 274                                IB_OPCODE_UC_RDMA_WRITE_FIRST;
 275
 276        case IB_WR_RDMA_WRITE_WITH_IMM:
 277                if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
 278                    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
 279                        return fits ?
 280                                IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
 281                                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
 282                else
 283                        return fits ?
 284                                IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
 285                                IB_OPCODE_UC_RDMA_WRITE_FIRST;
 286
 287        case IB_WR_SEND:
 288                if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
 289                    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
 290                        return fits ?
 291                                IB_OPCODE_UC_SEND_LAST :
 292                                IB_OPCODE_UC_SEND_MIDDLE;
 293                else
 294                        return fits ?
 295                                IB_OPCODE_UC_SEND_ONLY :
 296                                IB_OPCODE_UC_SEND_FIRST;
 297
 298        case IB_WR_SEND_WITH_IMM:
 299                if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
 300                    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
 301                        return fits ?
 302                                IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
 303                                IB_OPCODE_UC_SEND_MIDDLE;
 304                else
 305                        return fits ?
 306                                IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
 307                                IB_OPCODE_UC_SEND_FIRST;
 308        }
 309
 310        return -EINVAL;
 311}
 312
 313static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 314                       u32 opcode)
 315{
 316        int fits = (wqe->dma.resid <= qp->mtu);
 317
 318        switch (qp_type(qp)) {
 319        case IB_QPT_RC:
 320                return next_opcode_rc(qp, opcode, fits);
 321
 322        case IB_QPT_UC:
 323                return next_opcode_uc(qp, opcode, fits);
 324
 325        case IB_QPT_SMI:
 326        case IB_QPT_UD:
 327        case IB_QPT_GSI:
 328                switch (opcode) {
 329                case IB_WR_SEND:
 330                        return IB_OPCODE_UD_SEND_ONLY;
 331
 332                case IB_WR_SEND_WITH_IMM:
 333                        return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
 334                }
 335                break;
 336
 337        default:
 338                break;
 339        }
 340
 341        return -EINVAL;
 342}
 343
 344static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 345{
 346        int depth;
 347
 348        if (wqe->has_rd_atomic)
 349                return 0;
 350
 351        qp->req.need_rd_atomic = 1;
 352        depth = atomic_dec_return(&qp->req.rd_atomic);
 353
 354        if (depth >= 0) {
 355                qp->req.need_rd_atomic = 0;
 356                wqe->has_rd_atomic = 1;
 357                return 0;
 358        }
 359
 360        atomic_inc(&qp->req.rd_atomic);
 361        return -EAGAIN;
 362}
 363
 364static inline int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 365{
 366        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 367        struct rxe_port *port;
 368        struct rxe_av *av;
 369
 370        if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
 371                return qp->mtu;
 372
 373        av = &wqe->av;
 374        port = &rxe->port;
 375
 376        return port->mtu_cap;
 377}
 378
 379static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 380                                       struct rxe_send_wqe *wqe,
 381                                       int opcode, int payload,
 382                                       struct rxe_pkt_info *pkt)
 383{
 384        struct rxe_dev          *rxe = to_rdev(qp->ibqp.device);
 385        struct rxe_port         *port = &rxe->port;
 386        struct sk_buff          *skb;
 387        struct rxe_send_wr      *ibwr = &wqe->wr;
 388        struct rxe_av           *av;
 389        int                     pad = (-payload) & 0x3;
 390        int                     paylen;
 391        int                     solicited;
 392        u16                     pkey;
 393        u32                     qp_num;
 394        int                     ack_req;
 395
 396        /* length from start of bth to end of icrc */
 397        paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
 398
 399        /* pkt->hdr, rxe, port_num and mask are initialized in ifc
 400         * layer
 401         */
 402        pkt->opcode     = opcode;
 403        pkt->qp         = qp;
 404        pkt->psn        = qp->req.psn;
 405        pkt->mask       = rxe_opcode[opcode].mask;
 406        pkt->paylen     = paylen;
 407        pkt->offset     = 0;
 408        pkt->wqe        = wqe;
 409
 410        /* init skb */
 411        av = rxe_get_av(pkt);
 412        skb = rxe->ifc_ops->init_packet(rxe, av, paylen, pkt);
 413        if (unlikely(!skb))
 414                return NULL;
 415
 416        /* init bth */
 417        solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
 418                        (pkt->mask & RXE_END_MASK) &&
 419                        ((pkt->mask & (RXE_SEND_MASK)) ||
 420                        (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
 421                        (RXE_WRITE_MASK | RXE_IMMDT_MASK));
 422
 423        pkey = (qp_type(qp) == IB_QPT_GSI) ?
 424                 port->pkey_tbl[ibwr->wr.ud.pkey_index] :
 425                 port->pkey_tbl[qp->attr.pkey_index];
 426
 427        qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
 428                                         qp->attr.dest_qp_num;
 429
 430        ack_req = ((pkt->mask & RXE_END_MASK) ||
 431                (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
 432        if (ack_req)
 433                qp->req.noack_pkts = 0;
 434
 435        bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
 436                 ack_req, pkt->psn);
 437
 438        /* init optional headers */
 439        if (pkt->mask & RXE_RETH_MASK) {
 440                reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
 441                reth_set_va(pkt, wqe->iova);
 442                reth_set_len(pkt, wqe->dma.length);
 443        }
 444
 445        if (pkt->mask & RXE_IMMDT_MASK)
 446                immdt_set_imm(pkt, ibwr->ex.imm_data);
 447
 448        if (pkt->mask & RXE_IETH_MASK)
 449                ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
 450
 451        if (pkt->mask & RXE_ATMETH_MASK) {
 452                atmeth_set_va(pkt, wqe->iova);
 453                if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
 454                    opcode == IB_OPCODE_RD_COMPARE_SWAP) {
 455                        atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
 456                        atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
 457                } else {
 458                        atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
 459                }
 460                atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
 461        }
 462
 463        if (pkt->mask & RXE_DETH_MASK) {
 464                if (qp->ibqp.qp_num == 1)
 465                        deth_set_qkey(pkt, GSI_QKEY);
 466                else
 467                        deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
 468                deth_set_sqp(pkt, qp->ibqp.qp_num);
 469        }
 470
 471        return skb;
 472}
 473
 474static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 475                       struct rxe_pkt_info *pkt, struct sk_buff *skb,
 476                       int paylen)
 477{
 478        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 479        u32 crc = 0;
 480        u32 *p;
 481        int err;
 482
 483        err = rxe->ifc_ops->prepare(rxe, pkt, skb, &crc);
 484        if (err)
 485                return err;
 486
 487        if (pkt->mask & RXE_WRITE_OR_SEND) {
 488                if (wqe->wr.send_flags & IB_SEND_INLINE) {
 489                        u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
 490
 491                        crc = crc32_le(crc, tmp, paylen);
 492
 493                        memcpy(payload_addr(pkt), tmp, paylen);
 494
 495                        wqe->dma.resid -= paylen;
 496                        wqe->dma.sge_offset += paylen;
 497                } else {
 498                        err = copy_data(rxe, qp->pd, 0, &wqe->dma,
 499                                        payload_addr(pkt), paylen,
 500                                        from_mem_obj,
 501                                        &crc);
 502                        if (err)
 503                                return err;
 504                }
 505        }
 506        p = payload_addr(pkt) + paylen + bth_pad(pkt);
 507
 508        *p = ~crc;
 509
 510        return 0;
 511}
 512
 513static void update_wqe_state(struct rxe_qp *qp,
 514                struct rxe_send_wqe *wqe,
 515                struct rxe_pkt_info *pkt)
 516{
 517        if (pkt->mask & RXE_END_MASK) {
 518                if (qp_type(qp) == IB_QPT_RC)
 519                        wqe->state = wqe_state_pending;
 520        } else {
 521                wqe->state = wqe_state_processing;
 522        }
 523}
 524
 525static void update_wqe_psn(struct rxe_qp *qp,
 526                           struct rxe_send_wqe *wqe,
 527                           struct rxe_pkt_info *pkt,
 528                           int payload)
 529{
 530        /* number of packets left to send including current one */
 531        int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
 532
 533        /* handle zero length packet case */
 534        if (num_pkt == 0)
 535                num_pkt = 1;
 536
 537        if (pkt->mask & RXE_START_MASK) {
 538                wqe->first_psn = qp->req.psn;
 539                wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
 540        }
 541
 542        if (pkt->mask & RXE_READ_MASK)
 543                qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
 544        else
 545                qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
 546}
 547
 548static void save_state(struct rxe_send_wqe *wqe,
 549                       struct rxe_qp *qp,
 550                       struct rxe_send_wqe *rollback_wqe,
 551                       u32 *rollback_psn)
 552{
 553        rollback_wqe->state     = wqe->state;
 554        rollback_wqe->first_psn = wqe->first_psn;
 555        rollback_wqe->last_psn  = wqe->last_psn;
 556        *rollback_psn           = qp->req.psn;
 557}
 558
 559static void rollback_state(struct rxe_send_wqe *wqe,
 560                           struct rxe_qp *qp,
 561                           struct rxe_send_wqe *rollback_wqe,
 562                           u32 rollback_psn)
 563{
 564        wqe->state     = rollback_wqe->state;
 565        wqe->first_psn = rollback_wqe->first_psn;
 566        wqe->last_psn  = rollback_wqe->last_psn;
 567        qp->req.psn    = rollback_psn;
 568}
 569
 570static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 571                         struct rxe_pkt_info *pkt, int payload)
 572{
 573        qp->req.opcode = pkt->opcode;
 574
 575        if (pkt->mask & RXE_END_MASK)
 576                qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
 577
 578        qp->need_req_skb = 0;
 579
 580        if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
 581                mod_timer(&qp->retrans_timer,
 582                          jiffies + qp->qp_timeout_jiffies);
 583}
 584
 585int rxe_requester(void *arg)
 586{
 587        struct rxe_qp *qp = (struct rxe_qp *)arg;
 588        struct rxe_pkt_info pkt;
 589        struct sk_buff *skb;
 590        struct rxe_send_wqe *wqe;
 591        enum rxe_hdr_mask mask;
 592        int payload;
 593        int mtu;
 594        int opcode;
 595        int ret;
 596        struct rxe_send_wqe rollback_wqe;
 597        u32 rollback_psn;
 598
 599        rxe_add_ref(qp);
 600
 601next_wqe:
 602        if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
 603                goto exit;
 604
 605        if (unlikely(qp->req.state == QP_STATE_RESET)) {
 606                qp->req.wqe_index = consumer_index(qp->sq.queue);
 607                qp->req.opcode = -1;
 608                qp->req.need_rd_atomic = 0;
 609                qp->req.wait_psn = 0;
 610                qp->req.need_retry = 0;
 611                goto exit;
 612        }
 613
 614        if (unlikely(qp->req.need_retry)) {
 615                req_retry(qp);
 616                qp->req.need_retry = 0;
 617        }
 618
 619        wqe = req_next_wqe(qp);
 620        if (unlikely(!wqe))
 621                goto exit;
 622
 623        if (wqe->mask & WR_REG_MASK) {
 624                if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
 625                        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 626                        struct rxe_mem *rmr;
 627
 628                        rmr = rxe_pool_get_index(&rxe->mr_pool,
 629                                                 wqe->wr.ex.invalidate_rkey >> 8);
 630                        if (!rmr) {
 631                                pr_err("No mr for key %#x\n",
 632                                       wqe->wr.ex.invalidate_rkey);
 633                                wqe->state = wqe_state_error;
 634                                wqe->status = IB_WC_MW_BIND_ERR;
 635                                goto exit;
 636                        }
 637                        rmr->state = RXE_MEM_STATE_FREE;
 638                        wqe->state = wqe_state_done;
 639                        wqe->status = IB_WC_SUCCESS;
 640                } else if (wqe->wr.opcode == IB_WR_REG_MR) {
 641                        struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
 642
 643                        rmr->state = RXE_MEM_STATE_VALID;
 644                        rmr->access = wqe->wr.wr.reg.access;
 645                        rmr->lkey = wqe->wr.wr.reg.key;
 646                        rmr->rkey = wqe->wr.wr.reg.key;
 647                        wqe->state = wqe_state_done;
 648                        wqe->status = IB_WC_SUCCESS;
 649                } else {
 650                        goto exit;
 651                }
 652                qp->req.wqe_index = next_index(qp->sq.queue,
 653                                                qp->req.wqe_index);
 654                goto next_wqe;
 655        }
 656
 657        if (unlikely(qp_type(qp) == IB_QPT_RC &&
 658                     qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
 659                qp->req.wait_psn = 1;
 660                goto exit;
 661        }
 662
 663        /* Limit the number of inflight SKBs per QP */
 664        if (unlikely(atomic_read(&qp->skb_out) >
 665                     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
 666                qp->need_req_skb = 1;
 667                goto exit;
 668        }
 669
 670        opcode = next_opcode(qp, wqe, wqe->wr.opcode);
 671        if (unlikely(opcode < 0)) {
 672                wqe->status = IB_WC_LOC_QP_OP_ERR;
 673                goto exit;
 674        }
 675
 676        mask = rxe_opcode[opcode].mask;
 677        if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
 678                if (check_init_depth(qp, wqe))
 679                        goto exit;
 680        }
 681
 682        mtu = get_mtu(qp, wqe);
 683        payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
 684        if (payload > mtu) {
 685                if (qp_type(qp) == IB_QPT_UD) {
 686                        /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
 687                         * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
 688                         * shall not emit any packets for this message. Further, the CI shall not
 689                         * generate an error due to this condition.
 690                         */
 691
 692                        /* fake a successful UD send */
 693                        wqe->first_psn = qp->req.psn;
 694                        wqe->last_psn = qp->req.psn;
 695                        qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
 696                        qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
 697                        qp->req.wqe_index = next_index(qp->sq.queue,
 698                                                       qp->req.wqe_index);
 699                        wqe->state = wqe_state_done;
 700                        wqe->status = IB_WC_SUCCESS;
 701                        __rxe_do_task(&qp->comp.task);
 702                        rxe_drop_ref(qp);
 703                        return 0;
 704                }
 705                payload = mtu;
 706        }
 707
 708        skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
 709        if (unlikely(!skb)) {
 710                pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
 711                goto err;
 712        }
 713
 714        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
 715                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
 716                goto err;
 717        }
 718
 719        /*
 720         * To prevent a race on wqe access between requester and completer,
 721         * wqe members state and psn need to be set before calling
 722         * rxe_xmit_packet().
 723         * Otherwise, completer might initiate an unjustified retry flow.
 724         */
 725        save_state(wqe, qp, &rollback_wqe, &rollback_psn);
 726        update_wqe_state(qp, wqe, &pkt);
 727        update_wqe_psn(qp, wqe, &pkt, payload);
 728        ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
 729        if (ret) {
 730                qp->need_req_skb = 1;
 731                kfree_skb(skb);
 732
 733                rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 734
 735                if (ret == -EAGAIN) {
 736                        rxe_run_task(&qp->req.task, 1);
 737                        goto exit;
 738                }
 739
 740                goto err;
 741        }
 742
 743        update_state(qp, wqe, &pkt, payload);
 744
 745        goto next_wqe;
 746
 747err:
 748        kfree_skb(skb);
 749        wqe->status = IB_WC_LOC_PROT_ERR;
 750        wqe->state = wqe_state_error;
 751
 752        /*
 753         * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
 754         * ---------8<---------8<-------------
 755         * ...Note that if a completion error occurs, a Work Completion
 756         * will always be generated, even if the signaling
 757         * indicator requests an Unsignaled Completion.
 758         * ---------8<---------8<-------------
 759         */
 760        wqe->wr.send_flags |= IB_SEND_SIGNALED;
 761        __rxe_do_task(&qp->comp.task);
 762exit:
 763        rxe_drop_ref(qp);
 764        return -EAGAIN;
 765}
 766