linux/drivers/infiniband/sw/rxe/rxe_req.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#include <linux/skbuff.h>
   8#include <crypto/hash.h>
   9
  10#include "rxe.h"
  11#include "rxe_loc.h"
  12#include "rxe_queue.h"
  13
  14static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
  15                       u32 opcode);
  16
  17static inline void retry_first_write_send(struct rxe_qp *qp,
  18                                          struct rxe_send_wqe *wqe,
  19                                          unsigned int mask, int npsn)
  20{
  21        int i;
  22
  23        for (i = 0; i < npsn; i++) {
  24                int to_send = (wqe->dma.resid > qp->mtu) ?
  25                                qp->mtu : wqe->dma.resid;
  26
  27                qp->req.opcode = next_opcode(qp, wqe,
  28                                             wqe->wr.opcode);
  29
  30                if (wqe->wr.send_flags & IB_SEND_INLINE) {
  31                        wqe->dma.resid -= to_send;
  32                        wqe->dma.sge_offset += to_send;
  33                } else {
  34                        advance_dma_data(&wqe->dma, to_send);
  35                }
  36                if (mask & WR_WRITE_MASK)
  37                        wqe->iova += qp->mtu;
  38        }
  39}
  40
  41static void req_retry(struct rxe_qp *qp)
  42{
  43        struct rxe_send_wqe *wqe;
  44        unsigned int wqe_index;
  45        unsigned int mask;
  46        int npsn;
  47        int first = 1;
  48
  49        qp->req.wqe_index       = consumer_index(qp->sq.queue);
  50        qp->req.psn             = qp->comp.psn;
  51        qp->req.opcode          = -1;
  52
  53        for (wqe_index = consumer_index(qp->sq.queue);
  54                wqe_index != producer_index(qp->sq.queue);
  55                wqe_index = next_index(qp->sq.queue, wqe_index)) {
  56                wqe = addr_from_index(qp->sq.queue, wqe_index);
  57                mask = wr_opcode_mask(wqe->wr.opcode, qp);
  58
  59                if (wqe->state == wqe_state_posted)
  60                        break;
  61
  62                if (wqe->state == wqe_state_done)
  63                        continue;
  64
  65                wqe->iova = (mask & WR_ATOMIC_MASK) ?
  66                             wqe->wr.wr.atomic.remote_addr :
  67                             (mask & WR_READ_OR_WRITE_MASK) ?
  68                             wqe->wr.wr.rdma.remote_addr :
  69                             0;
  70
  71                if (!first || (mask & WR_READ_MASK) == 0) {
  72                        wqe->dma.resid = wqe->dma.length;
  73                        wqe->dma.cur_sge = 0;
  74                        wqe->dma.sge_offset = 0;
  75                }
  76
  77                if (first) {
  78                        first = 0;
  79
  80                        if (mask & WR_WRITE_OR_SEND_MASK) {
  81                                npsn = (qp->comp.psn - wqe->first_psn) &
  82                                        BTH_PSN_MASK;
  83                                retry_first_write_send(qp, wqe, mask, npsn);
  84                        }
  85
  86                        if (mask & WR_READ_MASK) {
  87                                npsn = (wqe->dma.length - wqe->dma.resid) /
  88                                        qp->mtu;
  89                                wqe->iova += npsn * qp->mtu;
  90                        }
  91                }
  92
  93                wqe->state = wqe_state_posted;
  94        }
  95}
  96
  97void rnr_nak_timer(struct timer_list *t)
  98{
  99        struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
 100
 101        pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
 102        rxe_run_task(&qp->req.task, 1);
 103}
 104
 105static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
 106{
 107        struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
 108        unsigned long flags;
 109
 110        if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
 111                /* check to see if we are drained;
 112                 * state_lock used by requester and completer
 113                 */
 114                spin_lock_irqsave(&qp->state_lock, flags);
 115                do {
 116                        if (qp->req.state != QP_STATE_DRAIN) {
 117                                /* comp just finished */
 118                                spin_unlock_irqrestore(&qp->state_lock,
 119                                                       flags);
 120                                break;
 121                        }
 122
 123                        if (wqe && ((qp->req.wqe_index !=
 124                                consumer_index(qp->sq.queue)) ||
 125                                (wqe->state != wqe_state_posted))) {
 126                                /* comp not done yet */
 127                                spin_unlock_irqrestore(&qp->state_lock,
 128                                                       flags);
 129                                break;
 130                        }
 131
 132                        qp->req.state = QP_STATE_DRAINED;
 133                        spin_unlock_irqrestore(&qp->state_lock, flags);
 134
 135                        if (qp->ibqp.event_handler) {
 136                                struct ib_event ev;
 137
 138                                ev.device = qp->ibqp.device;
 139                                ev.element.qp = &qp->ibqp;
 140                                ev.event = IB_EVENT_SQ_DRAINED;
 141                                qp->ibqp.event_handler(&ev,
 142                                        qp->ibqp.qp_context);
 143                        }
 144                } while (0);
 145        }
 146
 147        if (qp->req.wqe_index == producer_index(qp->sq.queue))
 148                return NULL;
 149
 150        wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
 151
 152        if (unlikely((qp->req.state == QP_STATE_DRAIN ||
 153                      qp->req.state == QP_STATE_DRAINED) &&
 154                     (wqe->state != wqe_state_processing)))
 155                return NULL;
 156
 157        if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
 158                     (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
 159                qp->req.wait_fence = 1;
 160                return NULL;
 161        }
 162
 163        wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
 164        return wqe;
 165}
 166
 167static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
 168{
 169        switch (opcode) {
 170        case IB_WR_RDMA_WRITE:
 171                if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
 172                    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
 173                        return fits ?
 174                                IB_OPCODE_RC_RDMA_WRITE_LAST :
 175                                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
 176                else
 177                        return fits ?
 178                                IB_OPCODE_RC_RDMA_WRITE_ONLY :
 179                                IB_OPCODE_RC_RDMA_WRITE_FIRST;
 180
 181        case IB_WR_RDMA_WRITE_WITH_IMM:
 182                if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
 183                    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
 184                        return fits ?
 185                                IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
 186                                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
 187                else
 188                        return fits ?
 189                                IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
 190                                IB_OPCODE_RC_RDMA_WRITE_FIRST;
 191
 192        case IB_WR_SEND:
 193                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 194                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 195                        return fits ?
 196                                IB_OPCODE_RC_SEND_LAST :
 197                                IB_OPCODE_RC_SEND_MIDDLE;
 198                else
 199                        return fits ?
 200                                IB_OPCODE_RC_SEND_ONLY :
 201                                IB_OPCODE_RC_SEND_FIRST;
 202
 203        case IB_WR_SEND_WITH_IMM:
 204                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 205                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 206                        return fits ?
 207                                IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
 208                                IB_OPCODE_RC_SEND_MIDDLE;
 209                else
 210                        return fits ?
 211                                IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
 212                                IB_OPCODE_RC_SEND_FIRST;
 213
 214        case IB_WR_RDMA_READ:
 215                return IB_OPCODE_RC_RDMA_READ_REQUEST;
 216
 217        case IB_WR_ATOMIC_CMP_AND_SWP:
 218                return IB_OPCODE_RC_COMPARE_SWAP;
 219
 220        case IB_WR_ATOMIC_FETCH_AND_ADD:
 221                return IB_OPCODE_RC_FETCH_ADD;
 222
 223        case IB_WR_SEND_WITH_INV:
 224                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 225                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 226                        return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
 227                                IB_OPCODE_RC_SEND_MIDDLE;
 228                else
 229                        return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
 230                                IB_OPCODE_RC_SEND_FIRST;
 231        case IB_WR_REG_MR:
 232        case IB_WR_LOCAL_INV:
 233                return opcode;
 234        }
 235
 236        return -EINVAL;
 237}
 238
 239static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
 240{
 241        switch (opcode) {
 242        case IB_WR_RDMA_WRITE:
 243                if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
 244                    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
 245                        return fits ?
 246                                IB_OPCODE_UC_RDMA_WRITE_LAST :
 247                                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
 248                else
 249                        return fits ?
 250                                IB_OPCODE_UC_RDMA_WRITE_ONLY :
 251                                IB_OPCODE_UC_RDMA_WRITE_FIRST;
 252
 253        case IB_WR_RDMA_WRITE_WITH_IMM:
 254                if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
 255                    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
 256                        return fits ?
 257                                IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
 258                                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
 259                else
 260                        return fits ?
 261                                IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
 262                                IB_OPCODE_UC_RDMA_WRITE_FIRST;
 263
 264        case IB_WR_SEND:
 265                if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
 266                    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
 267                        return fits ?
 268                                IB_OPCODE_UC_SEND_LAST :
 269                                IB_OPCODE_UC_SEND_MIDDLE;
 270                else
 271                        return fits ?
 272                                IB_OPCODE_UC_SEND_ONLY :
 273                                IB_OPCODE_UC_SEND_FIRST;
 274
 275        case IB_WR_SEND_WITH_IMM:
 276                if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
 277                    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
 278                        return fits ?
 279                                IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
 280                                IB_OPCODE_UC_SEND_MIDDLE;
 281                else
 282                        return fits ?
 283                                IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
 284                                IB_OPCODE_UC_SEND_FIRST;
 285        }
 286
 287        return -EINVAL;
 288}
 289
 290static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 291                       u32 opcode)
 292{
 293        int fits = (wqe->dma.resid <= qp->mtu);
 294
 295        switch (qp_type(qp)) {
 296        case IB_QPT_RC:
 297                return next_opcode_rc(qp, opcode, fits);
 298
 299        case IB_QPT_UC:
 300                return next_opcode_uc(qp, opcode, fits);
 301
 302        case IB_QPT_SMI:
 303        case IB_QPT_UD:
 304        case IB_QPT_GSI:
 305                switch (opcode) {
 306                case IB_WR_SEND:
 307                        return IB_OPCODE_UD_SEND_ONLY;
 308
 309                case IB_WR_SEND_WITH_IMM:
 310                        return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
 311                }
 312                break;
 313
 314        default:
 315                break;
 316        }
 317
 318        return -EINVAL;
 319}
 320
 321static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 322{
 323        int depth;
 324
 325        if (wqe->has_rd_atomic)
 326                return 0;
 327
 328        qp->req.need_rd_atomic = 1;
 329        depth = atomic_dec_return(&qp->req.rd_atomic);
 330
 331        if (depth >= 0) {
 332                qp->req.need_rd_atomic = 0;
 333                wqe->has_rd_atomic = 1;
 334                return 0;
 335        }
 336
 337        atomic_inc(&qp->req.rd_atomic);
 338        return -EAGAIN;
 339}
 340
 341static inline int get_mtu(struct rxe_qp *qp)
 342{
 343        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 344
 345        if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
 346                return qp->mtu;
 347
 348        return rxe->port.mtu_cap;
 349}
 350
 351static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 352                                       struct rxe_send_wqe *wqe,
 353                                       int opcode, int payload,
 354                                       struct rxe_pkt_info *pkt)
 355{
 356        struct rxe_dev          *rxe = to_rdev(qp->ibqp.device);
 357        struct sk_buff          *skb;
 358        struct rxe_send_wr      *ibwr = &wqe->wr;
 359        struct rxe_av           *av;
 360        int                     pad = (-payload) & 0x3;
 361        int                     paylen;
 362        int                     solicited;
 363        u16                     pkey;
 364        u32                     qp_num;
 365        int                     ack_req;
 366
 367        /* length from start of bth to end of icrc */
 368        paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
 369
 370        /* pkt->hdr, rxe, port_num and mask are initialized in ifc
 371         * layer
 372         */
 373        pkt->opcode     = opcode;
 374        pkt->qp         = qp;
 375        pkt->psn        = qp->req.psn;
 376        pkt->mask       = rxe_opcode[opcode].mask;
 377        pkt->paylen     = paylen;
 378        pkt->wqe        = wqe;
 379
 380        /* init skb */
 381        av = rxe_get_av(pkt);
 382        skb = rxe_init_packet(rxe, av, paylen, pkt);
 383        if (unlikely(!skb))
 384                return NULL;
 385
 386        /* init bth */
 387        solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
 388                        (pkt->mask & RXE_END_MASK) &&
 389                        ((pkt->mask & (RXE_SEND_MASK)) ||
 390                        (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
 391                        (RXE_WRITE_MASK | RXE_IMMDT_MASK));
 392
 393        pkey = IB_DEFAULT_PKEY_FULL;
 394
 395        qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
 396                                         qp->attr.dest_qp_num;
 397
 398        ack_req = ((pkt->mask & RXE_END_MASK) ||
 399                (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
 400        if (ack_req)
 401                qp->req.noack_pkts = 0;
 402
 403        bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
 404                 ack_req, pkt->psn);
 405
 406        /* init optional headers */
 407        if (pkt->mask & RXE_RETH_MASK) {
 408                reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
 409                reth_set_va(pkt, wqe->iova);
 410                reth_set_len(pkt, wqe->dma.resid);
 411        }
 412
 413        if (pkt->mask & RXE_IMMDT_MASK)
 414                immdt_set_imm(pkt, ibwr->ex.imm_data);
 415
 416        if (pkt->mask & RXE_IETH_MASK)
 417                ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
 418
 419        if (pkt->mask & RXE_ATMETH_MASK) {
 420                atmeth_set_va(pkt, wqe->iova);
 421                if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
 422                    opcode == IB_OPCODE_RD_COMPARE_SWAP) {
 423                        atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
 424                        atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
 425                } else {
 426                        atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
 427                }
 428                atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
 429        }
 430
 431        if (pkt->mask & RXE_DETH_MASK) {
 432                if (qp->ibqp.qp_num == 1)
 433                        deth_set_qkey(pkt, GSI_QKEY);
 434                else
 435                        deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
 436                deth_set_sqp(pkt, qp->ibqp.qp_num);
 437        }
 438
 439        return skb;
 440}
 441
 442static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 443                       struct rxe_pkt_info *pkt, struct sk_buff *skb,
 444                       int paylen)
 445{
 446        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 447        u32 crc = 0;
 448        u32 *p;
 449        int err;
 450
 451        err = rxe_prepare(pkt, skb, &crc);
 452        if (err)
 453                return err;
 454
 455        if (pkt->mask & RXE_WRITE_OR_SEND) {
 456                if (wqe->wr.send_flags & IB_SEND_INLINE) {
 457                        u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
 458
 459                        crc = rxe_crc32(rxe, crc, tmp, paylen);
 460                        memcpy(payload_addr(pkt), tmp, paylen);
 461
 462                        wqe->dma.resid -= paylen;
 463                        wqe->dma.sge_offset += paylen;
 464                } else {
 465                        err = copy_data(qp->pd, 0, &wqe->dma,
 466                                        payload_addr(pkt), paylen,
 467                                        from_mem_obj,
 468                                        &crc);
 469                        if (err)
 470                                return err;
 471                }
 472                if (bth_pad(pkt)) {
 473                        u8 *pad = payload_addr(pkt) + paylen;
 474
 475                        memset(pad, 0, bth_pad(pkt));
 476                        crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
 477                }
 478        }
 479        p = payload_addr(pkt) + paylen + bth_pad(pkt);
 480
 481        *p = ~crc;
 482
 483        return 0;
 484}
 485
 486static void update_wqe_state(struct rxe_qp *qp,
 487                struct rxe_send_wqe *wqe,
 488                struct rxe_pkt_info *pkt)
 489{
 490        if (pkt->mask & RXE_END_MASK) {
 491                if (qp_type(qp) == IB_QPT_RC)
 492                        wqe->state = wqe_state_pending;
 493        } else {
 494                wqe->state = wqe_state_processing;
 495        }
 496}
 497
 498static void update_wqe_psn(struct rxe_qp *qp,
 499                           struct rxe_send_wqe *wqe,
 500                           struct rxe_pkt_info *pkt,
 501                           int payload)
 502{
 503        /* number of packets left to send including current one */
 504        int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
 505
 506        /* handle zero length packet case */
 507        if (num_pkt == 0)
 508                num_pkt = 1;
 509
 510        if (pkt->mask & RXE_START_MASK) {
 511                wqe->first_psn = qp->req.psn;
 512                wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
 513        }
 514
 515        if (pkt->mask & RXE_READ_MASK)
 516                qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
 517        else
 518                qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
 519}
 520
 521static void save_state(struct rxe_send_wqe *wqe,
 522                       struct rxe_qp *qp,
 523                       struct rxe_send_wqe *rollback_wqe,
 524                       u32 *rollback_psn)
 525{
 526        rollback_wqe->state     = wqe->state;
 527        rollback_wqe->first_psn = wqe->first_psn;
 528        rollback_wqe->last_psn  = wqe->last_psn;
 529        *rollback_psn           = qp->req.psn;
 530}
 531
 532static void rollback_state(struct rxe_send_wqe *wqe,
 533                           struct rxe_qp *qp,
 534                           struct rxe_send_wqe *rollback_wqe,
 535                           u32 rollback_psn)
 536{
 537        wqe->state     = rollback_wqe->state;
 538        wqe->first_psn = rollback_wqe->first_psn;
 539        wqe->last_psn  = rollback_wqe->last_psn;
 540        qp->req.psn    = rollback_psn;
 541}
 542
 543static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 544                         struct rxe_pkt_info *pkt, int payload)
 545{
 546        qp->req.opcode = pkt->opcode;
 547
 548        if (pkt->mask & RXE_END_MASK)
 549                qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
 550
 551        qp->need_req_skb = 0;
 552
 553        if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
 554                mod_timer(&qp->retrans_timer,
 555                          jiffies + qp->qp_timeout_jiffies);
 556}
 557
 558int rxe_requester(void *arg)
 559{
 560        struct rxe_qp *qp = (struct rxe_qp *)arg;
 561        struct rxe_pkt_info pkt;
 562        struct sk_buff *skb;
 563        struct rxe_send_wqe *wqe;
 564        enum rxe_hdr_mask mask;
 565        int payload;
 566        int mtu;
 567        int opcode;
 568        int ret;
 569        struct rxe_send_wqe rollback_wqe;
 570        u32 rollback_psn;
 571
 572        rxe_add_ref(qp);
 573
 574next_wqe:
 575        if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
 576                goto exit;
 577
 578        if (unlikely(qp->req.state == QP_STATE_RESET)) {
 579                qp->req.wqe_index = consumer_index(qp->sq.queue);
 580                qp->req.opcode = -1;
 581                qp->req.need_rd_atomic = 0;
 582                qp->req.wait_psn = 0;
 583                qp->req.need_retry = 0;
 584                goto exit;
 585        }
 586
 587        if (unlikely(qp->req.need_retry)) {
 588                req_retry(qp);
 589                qp->req.need_retry = 0;
 590        }
 591
 592        wqe = req_next_wqe(qp);
 593        if (unlikely(!wqe))
 594                goto exit;
 595
 596        if (wqe->mask & WR_REG_MASK) {
 597                if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
 598                        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 599                        struct rxe_mem *rmr;
 600
 601                        rmr = rxe_pool_get_index(&rxe->mr_pool,
 602                                                 wqe->wr.ex.invalidate_rkey >> 8);
 603                        if (!rmr) {
 604                                pr_err("No mr for key %#x\n",
 605                                       wqe->wr.ex.invalidate_rkey);
 606                                wqe->state = wqe_state_error;
 607                                wqe->status = IB_WC_MW_BIND_ERR;
 608                                goto exit;
 609                        }
 610                        rmr->state = RXE_MEM_STATE_FREE;
 611                        rxe_drop_ref(rmr);
 612                        wqe->state = wqe_state_done;
 613                        wqe->status = IB_WC_SUCCESS;
 614                } else if (wqe->wr.opcode == IB_WR_REG_MR) {
 615                        struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
 616
 617                        rmr->state = RXE_MEM_STATE_VALID;
 618                        rmr->access = wqe->wr.wr.reg.access;
 619                        rmr->ibmr.lkey = wqe->wr.wr.reg.key;
 620                        rmr->ibmr.rkey = wqe->wr.wr.reg.key;
 621                        rmr->iova = wqe->wr.wr.reg.mr->iova;
 622                        wqe->state = wqe_state_done;
 623                        wqe->status = IB_WC_SUCCESS;
 624                } else {
 625                        goto exit;
 626                }
 627                if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 628                    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
 629                        rxe_run_task(&qp->comp.task, 1);
 630                qp->req.wqe_index = next_index(qp->sq.queue,
 631                                                qp->req.wqe_index);
 632                goto next_wqe;
 633        }
 634
 635        if (unlikely(qp_type(qp) == IB_QPT_RC &&
 636                psn_compare(qp->req.psn, (qp->comp.psn +
 637                                RXE_MAX_UNACKED_PSNS)) > 0)) {
 638                qp->req.wait_psn = 1;
 639                goto exit;
 640        }
 641
 642        /* Limit the number of inflight SKBs per QP */
 643        if (unlikely(atomic_read(&qp->skb_out) >
 644                     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
 645                qp->need_req_skb = 1;
 646                goto exit;
 647        }
 648
 649        opcode = next_opcode(qp, wqe, wqe->wr.opcode);
 650        if (unlikely(opcode < 0)) {
 651                wqe->status = IB_WC_LOC_QP_OP_ERR;
 652                goto exit;
 653        }
 654
 655        mask = rxe_opcode[opcode].mask;
 656        if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
 657                if (check_init_depth(qp, wqe))
 658                        goto exit;
 659        }
 660
 661        mtu = get_mtu(qp);
 662        payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
 663        if (payload > mtu) {
 664                if (qp_type(qp) == IB_QPT_UD) {
 665                        /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
 666                         * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
 667                         * shall not emit any packets for this message. Further, the CI shall not
 668                         * generate an error due to this condition.
 669                         */
 670
 671                        /* fake a successful UD send */
 672                        wqe->first_psn = qp->req.psn;
 673                        wqe->last_psn = qp->req.psn;
 674                        qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
 675                        qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
 676                        qp->req.wqe_index = next_index(qp->sq.queue,
 677                                                       qp->req.wqe_index);
 678                        wqe->state = wqe_state_done;
 679                        wqe->status = IB_WC_SUCCESS;
 680                        __rxe_do_task(&qp->comp.task);
 681                        rxe_drop_ref(qp);
 682                        return 0;
 683                }
 684                payload = mtu;
 685        }
 686
 687        skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
 688        if (unlikely(!skb)) {
 689                pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
 690                goto err;
 691        }
 692
 693        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
 694                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
 695                kfree_skb(skb);
 696                goto err;
 697        }
 698
 699        /*
 700         * To prevent a race on wqe access between requester and completer,
 701         * wqe members state and psn need to be set before calling
 702         * rxe_xmit_packet().
 703         * Otherwise, completer might initiate an unjustified retry flow.
 704         */
 705        save_state(wqe, qp, &rollback_wqe, &rollback_psn);
 706        update_wqe_state(qp, wqe, &pkt);
 707        update_wqe_psn(qp, wqe, &pkt, payload);
 708        ret = rxe_xmit_packet(qp, &pkt, skb);
 709        if (ret) {
 710                qp->need_req_skb = 1;
 711
 712                rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 713
 714                if (ret == -EAGAIN) {
 715                        rxe_run_task(&qp->req.task, 1);
 716                        goto exit;
 717                }
 718
 719                goto err;
 720        }
 721
 722        update_state(qp, wqe, &pkt, payload);
 723
 724        goto next_wqe;
 725
 726err:
 727        wqe->status = IB_WC_LOC_PROT_ERR;
 728        wqe->state = wqe_state_error;
 729        __rxe_do_task(&qp->comp.task);
 730
 731exit:
 732        rxe_drop_ref(qp);
 733        return -EAGAIN;
 734}
 735