linux/drivers/infiniband/sw/rxe/rxe_req.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/skbuff.h>
  35#include <crypto/hash.h>
  36
  37#include "rxe.h"
  38#include "rxe_loc.h"
  39#include "rxe_queue.h"
  40
  41static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
  42                       u32 opcode);
  43
  44static inline void retry_first_write_send(struct rxe_qp *qp,
  45                                          struct rxe_send_wqe *wqe,
  46                                          unsigned int mask, int npsn)
  47{
  48        int i;
  49
  50        for (i = 0; i < npsn; i++) {
  51                int to_send = (wqe->dma.resid > qp->mtu) ?
  52                                qp->mtu : wqe->dma.resid;
  53
  54                qp->req.opcode = next_opcode(qp, wqe,
  55                                             wqe->wr.opcode);
  56
  57                if (wqe->wr.send_flags & IB_SEND_INLINE) {
  58                        wqe->dma.resid -= to_send;
  59                        wqe->dma.sge_offset += to_send;
  60                } else {
  61                        advance_dma_data(&wqe->dma, to_send);
  62                }
  63                if (mask & WR_WRITE_MASK)
  64                        wqe->iova += qp->mtu;
  65        }
  66}
  67
  68static void req_retry(struct rxe_qp *qp)
  69{
  70        struct rxe_send_wqe *wqe;
  71        unsigned int wqe_index;
  72        unsigned int mask;
  73        int npsn;
  74        int first = 1;
  75
  76        qp->req.wqe_index       = consumer_index(qp->sq.queue);
  77        qp->req.psn             = qp->comp.psn;
  78        qp->req.opcode          = -1;
  79
  80        for (wqe_index = consumer_index(qp->sq.queue);
  81                wqe_index != producer_index(qp->sq.queue);
  82                wqe_index = next_index(qp->sq.queue, wqe_index)) {
  83                wqe = addr_from_index(qp->sq.queue, wqe_index);
  84                mask = wr_opcode_mask(wqe->wr.opcode, qp);
  85
  86                if (wqe->state == wqe_state_posted)
  87                        break;
  88
  89                if (wqe->state == wqe_state_done)
  90                        continue;
  91
  92                wqe->iova = (mask & WR_ATOMIC_MASK) ?
  93                             wqe->wr.wr.atomic.remote_addr :
  94                             (mask & WR_READ_OR_WRITE_MASK) ?
  95                             wqe->wr.wr.rdma.remote_addr :
  96                             0;
  97
  98                if (!first || (mask & WR_READ_MASK) == 0) {
  99                        wqe->dma.resid = wqe->dma.length;
 100                        wqe->dma.cur_sge = 0;
 101                        wqe->dma.sge_offset = 0;
 102                }
 103
 104                if (first) {
 105                        first = 0;
 106
 107                        if (mask & WR_WRITE_OR_SEND_MASK) {
 108                                npsn = (qp->comp.psn - wqe->first_psn) &
 109                                        BTH_PSN_MASK;
 110                                retry_first_write_send(qp, wqe, mask, npsn);
 111                        }
 112
 113                        if (mask & WR_READ_MASK) {
 114                                npsn = (wqe->dma.length - wqe->dma.resid) /
 115                                        qp->mtu;
 116                                wqe->iova += npsn * qp->mtu;
 117                        }
 118                }
 119
 120                wqe->state = wqe_state_posted;
 121        }
 122}
 123
 124void rnr_nak_timer(struct timer_list *t)
 125{
 126        struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
 127
 128        pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
 129        rxe_run_task(&qp->req.task, 1);
 130}
 131
 132static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
 133{
 134        struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
 135        unsigned long flags;
 136
 137        if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
 138                /* check to see if we are drained;
 139                 * state_lock used by requester and completer
 140                 */
 141                spin_lock_irqsave(&qp->state_lock, flags);
 142                do {
 143                        if (qp->req.state != QP_STATE_DRAIN) {
 144                                /* comp just finished */
 145                                spin_unlock_irqrestore(&qp->state_lock,
 146                                                       flags);
 147                                break;
 148                        }
 149
 150                        if (wqe && ((qp->req.wqe_index !=
 151                                consumer_index(qp->sq.queue)) ||
 152                                (wqe->state != wqe_state_posted))) {
 153                                /* comp not done yet */
 154                                spin_unlock_irqrestore(&qp->state_lock,
 155                                                       flags);
 156                                break;
 157                        }
 158
 159                        qp->req.state = QP_STATE_DRAINED;
 160                        spin_unlock_irqrestore(&qp->state_lock, flags);
 161
 162                        if (qp->ibqp.event_handler) {
 163                                struct ib_event ev;
 164
 165                                ev.device = qp->ibqp.device;
 166                                ev.element.qp = &qp->ibqp;
 167                                ev.event = IB_EVENT_SQ_DRAINED;
 168                                qp->ibqp.event_handler(&ev,
 169                                        qp->ibqp.qp_context);
 170                        }
 171                } while (0);
 172        }
 173
 174        if (qp->req.wqe_index == producer_index(qp->sq.queue))
 175                return NULL;
 176
 177        wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
 178
 179        if (unlikely((qp->req.state == QP_STATE_DRAIN ||
 180                      qp->req.state == QP_STATE_DRAINED) &&
 181                     (wqe->state != wqe_state_processing)))
 182                return NULL;
 183
 184        if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
 185                     (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
 186                qp->req.wait_fence = 1;
 187                return NULL;
 188        }
 189
 190        wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
 191        return wqe;
 192}
 193
 194static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
 195{
 196        switch (opcode) {
 197        case IB_WR_RDMA_WRITE:
 198                if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
 199                    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
 200                        return fits ?
 201                                IB_OPCODE_RC_RDMA_WRITE_LAST :
 202                                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
 203                else
 204                        return fits ?
 205                                IB_OPCODE_RC_RDMA_WRITE_ONLY :
 206                                IB_OPCODE_RC_RDMA_WRITE_FIRST;
 207
 208        case IB_WR_RDMA_WRITE_WITH_IMM:
 209                if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
 210                    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
 211                        return fits ?
 212                                IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
 213                                IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
 214                else
 215                        return fits ?
 216                                IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
 217                                IB_OPCODE_RC_RDMA_WRITE_FIRST;
 218
 219        case IB_WR_SEND:
 220                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 221                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 222                        return fits ?
 223                                IB_OPCODE_RC_SEND_LAST :
 224                                IB_OPCODE_RC_SEND_MIDDLE;
 225                else
 226                        return fits ?
 227                                IB_OPCODE_RC_SEND_ONLY :
 228                                IB_OPCODE_RC_SEND_FIRST;
 229
 230        case IB_WR_SEND_WITH_IMM:
 231                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 232                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 233                        return fits ?
 234                                IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
 235                                IB_OPCODE_RC_SEND_MIDDLE;
 236                else
 237                        return fits ?
 238                                IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
 239                                IB_OPCODE_RC_SEND_FIRST;
 240
 241        case IB_WR_RDMA_READ:
 242                return IB_OPCODE_RC_RDMA_READ_REQUEST;
 243
 244        case IB_WR_ATOMIC_CMP_AND_SWP:
 245                return IB_OPCODE_RC_COMPARE_SWAP;
 246
 247        case IB_WR_ATOMIC_FETCH_AND_ADD:
 248                return IB_OPCODE_RC_FETCH_ADD;
 249
 250        case IB_WR_SEND_WITH_INV:
 251                if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
 252                    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
 253                        return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
 254                                IB_OPCODE_RC_SEND_MIDDLE;
 255                else
 256                        return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
 257                                IB_OPCODE_RC_SEND_FIRST;
 258        case IB_WR_REG_MR:
 259        case IB_WR_LOCAL_INV:
 260                return opcode;
 261        }
 262
 263        return -EINVAL;
 264}
 265
 266static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
 267{
 268        switch (opcode) {
 269        case IB_WR_RDMA_WRITE:
 270                if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
 271                    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
 272                        return fits ?
 273                                IB_OPCODE_UC_RDMA_WRITE_LAST :
 274                                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
 275                else
 276                        return fits ?
 277                                IB_OPCODE_UC_RDMA_WRITE_ONLY :
 278                                IB_OPCODE_UC_RDMA_WRITE_FIRST;
 279
 280        case IB_WR_RDMA_WRITE_WITH_IMM:
 281                if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
 282                    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
 283                        return fits ?
 284                                IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
 285                                IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
 286                else
 287                        return fits ?
 288                                IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
 289                                IB_OPCODE_UC_RDMA_WRITE_FIRST;
 290
 291        case IB_WR_SEND:
 292                if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
 293                    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
 294                        return fits ?
 295                                IB_OPCODE_UC_SEND_LAST :
 296                                IB_OPCODE_UC_SEND_MIDDLE;
 297                else
 298                        return fits ?
 299                                IB_OPCODE_UC_SEND_ONLY :
 300                                IB_OPCODE_UC_SEND_FIRST;
 301
 302        case IB_WR_SEND_WITH_IMM:
 303                if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
 304                    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
 305                        return fits ?
 306                                IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
 307                                IB_OPCODE_UC_SEND_MIDDLE;
 308                else
 309                        return fits ?
 310                                IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
 311                                IB_OPCODE_UC_SEND_FIRST;
 312        }
 313
 314        return -EINVAL;
 315}
 316
 317static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 318                       u32 opcode)
 319{
 320        int fits = (wqe->dma.resid <= qp->mtu);
 321
 322        switch (qp_type(qp)) {
 323        case IB_QPT_RC:
 324                return next_opcode_rc(qp, opcode, fits);
 325
 326        case IB_QPT_UC:
 327                return next_opcode_uc(qp, opcode, fits);
 328
 329        case IB_QPT_SMI:
 330        case IB_QPT_UD:
 331        case IB_QPT_GSI:
 332                switch (opcode) {
 333                case IB_WR_SEND:
 334                        return IB_OPCODE_UD_SEND_ONLY;
 335
 336                case IB_WR_SEND_WITH_IMM:
 337                        return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
 338                }
 339                break;
 340
 341        default:
 342                break;
 343        }
 344
 345        return -EINVAL;
 346}
 347
 348static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 349{
 350        int depth;
 351
 352        if (wqe->has_rd_atomic)
 353                return 0;
 354
 355        qp->req.need_rd_atomic = 1;
 356        depth = atomic_dec_return(&qp->req.rd_atomic);
 357
 358        if (depth >= 0) {
 359                qp->req.need_rd_atomic = 0;
 360                wqe->has_rd_atomic = 1;
 361                return 0;
 362        }
 363
 364        atomic_inc(&qp->req.rd_atomic);
 365        return -EAGAIN;
 366}
 367
 368static inline int get_mtu(struct rxe_qp *qp)
 369{
 370        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 371
 372        if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
 373                return qp->mtu;
 374
 375        return rxe->port.mtu_cap;
 376}
 377
 378static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 379                                       struct rxe_send_wqe *wqe,
 380                                       int opcode, int payload,
 381                                       struct rxe_pkt_info *pkt)
 382{
 383        struct rxe_dev          *rxe = to_rdev(qp->ibqp.device);
 384        struct rxe_port         *port = &rxe->port;
 385        struct sk_buff          *skb;
 386        struct rxe_send_wr      *ibwr = &wqe->wr;
 387        struct rxe_av           *av;
 388        int                     pad = (-payload) & 0x3;
 389        int                     paylen;
 390        int                     solicited;
 391        u16                     pkey;
 392        u32                     qp_num;
 393        int                     ack_req;
 394
 395        /* length from start of bth to end of icrc */
 396        paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
 397
 398        /* pkt->hdr, rxe, port_num and mask are initialized in ifc
 399         * layer
 400         */
 401        pkt->opcode     = opcode;
 402        pkt->qp         = qp;
 403        pkt->psn        = qp->req.psn;
 404        pkt->mask       = rxe_opcode[opcode].mask;
 405        pkt->paylen     = paylen;
 406        pkt->offset     = 0;
 407        pkt->wqe        = wqe;
 408
 409        /* init skb */
 410        av = rxe_get_av(pkt);
 411        skb = rxe_init_packet(rxe, av, paylen, pkt);
 412        if (unlikely(!skb))
 413                return NULL;
 414
 415        /* init bth */
 416        solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
 417                        (pkt->mask & RXE_END_MASK) &&
 418                        ((pkt->mask & (RXE_SEND_MASK)) ||
 419                        (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
 420                        (RXE_WRITE_MASK | RXE_IMMDT_MASK));
 421
 422        pkey = (qp_type(qp) == IB_QPT_GSI) ?
 423                 port->pkey_tbl[ibwr->wr.ud.pkey_index] :
 424                 port->pkey_tbl[qp->attr.pkey_index];
 425
 426        qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
 427                                         qp->attr.dest_qp_num;
 428
 429        ack_req = ((pkt->mask & RXE_END_MASK) ||
 430                (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
 431        if (ack_req)
 432                qp->req.noack_pkts = 0;
 433
 434        bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
 435                 ack_req, pkt->psn);
 436
 437        /* init optional headers */
 438        if (pkt->mask & RXE_RETH_MASK) {
 439                reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
 440                reth_set_va(pkt, wqe->iova);
 441                reth_set_len(pkt, wqe->dma.resid);
 442        }
 443
 444        if (pkt->mask & RXE_IMMDT_MASK)
 445                immdt_set_imm(pkt, ibwr->ex.imm_data);
 446
 447        if (pkt->mask & RXE_IETH_MASK)
 448                ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
 449
 450        if (pkt->mask & RXE_ATMETH_MASK) {
 451                atmeth_set_va(pkt, wqe->iova);
 452                if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
 453                    opcode == IB_OPCODE_RD_COMPARE_SWAP) {
 454                        atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
 455                        atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
 456                } else {
 457                        atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
 458                }
 459                atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
 460        }
 461
 462        if (pkt->mask & RXE_DETH_MASK) {
 463                if (qp->ibqp.qp_num == 1)
 464                        deth_set_qkey(pkt, GSI_QKEY);
 465                else
 466                        deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
 467                deth_set_sqp(pkt, qp->ibqp.qp_num);
 468        }
 469
 470        return skb;
 471}
 472
 473static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 474                       struct rxe_pkt_info *pkt, struct sk_buff *skb,
 475                       int paylen)
 476{
 477        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 478        u32 crc = 0;
 479        u32 *p;
 480        int err;
 481
 482        err = rxe_prepare(pkt, skb, &crc);
 483        if (err)
 484                return err;
 485
 486        if (pkt->mask & RXE_WRITE_OR_SEND) {
 487                if (wqe->wr.send_flags & IB_SEND_INLINE) {
 488                        u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
 489
 490                        crc = rxe_crc32(rxe, crc, tmp, paylen);
 491                        memcpy(payload_addr(pkt), tmp, paylen);
 492
 493                        wqe->dma.resid -= paylen;
 494                        wqe->dma.sge_offset += paylen;
 495                } else {
 496                        err = copy_data(qp->pd, 0, &wqe->dma,
 497                                        payload_addr(pkt), paylen,
 498                                        from_mem_obj,
 499                                        &crc);
 500                        if (err)
 501                                return err;
 502                }
 503        }
 504        p = payload_addr(pkt) + paylen + bth_pad(pkt);
 505
 506        *p = ~crc;
 507
 508        return 0;
 509}
 510
 511static void update_wqe_state(struct rxe_qp *qp,
 512                struct rxe_send_wqe *wqe,
 513                struct rxe_pkt_info *pkt)
 514{
 515        if (pkt->mask & RXE_END_MASK) {
 516                if (qp_type(qp) == IB_QPT_RC)
 517                        wqe->state = wqe_state_pending;
 518        } else {
 519                wqe->state = wqe_state_processing;
 520        }
 521}
 522
 523static void update_wqe_psn(struct rxe_qp *qp,
 524                           struct rxe_send_wqe *wqe,
 525                           struct rxe_pkt_info *pkt,
 526                           int payload)
 527{
 528        /* number of packets left to send including current one */
 529        int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
 530
 531        /* handle zero length packet case */
 532        if (num_pkt == 0)
 533                num_pkt = 1;
 534
 535        if (pkt->mask & RXE_START_MASK) {
 536                wqe->first_psn = qp->req.psn;
 537                wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
 538        }
 539
 540        if (pkt->mask & RXE_READ_MASK)
 541                qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
 542        else
 543                qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
 544}
 545
 546static void save_state(struct rxe_send_wqe *wqe,
 547                       struct rxe_qp *qp,
 548                       struct rxe_send_wqe *rollback_wqe,
 549                       u32 *rollback_psn)
 550{
 551        rollback_wqe->state     = wqe->state;
 552        rollback_wqe->first_psn = wqe->first_psn;
 553        rollback_wqe->last_psn  = wqe->last_psn;
 554        *rollback_psn           = qp->req.psn;
 555}
 556
 557static void rollback_state(struct rxe_send_wqe *wqe,
 558                           struct rxe_qp *qp,
 559                           struct rxe_send_wqe *rollback_wqe,
 560                           u32 rollback_psn)
 561{
 562        wqe->state     = rollback_wqe->state;
 563        wqe->first_psn = rollback_wqe->first_psn;
 564        wqe->last_psn  = rollback_wqe->last_psn;
 565        qp->req.psn    = rollback_psn;
 566}
 567
 568static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 569                         struct rxe_pkt_info *pkt, int payload)
 570{
 571        qp->req.opcode = pkt->opcode;
 572
 573        if (pkt->mask & RXE_END_MASK)
 574                qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
 575
 576        qp->need_req_skb = 0;
 577
 578        if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
 579                mod_timer(&qp->retrans_timer,
 580                          jiffies + qp->qp_timeout_jiffies);
 581}
 582
 583int rxe_requester(void *arg)
 584{
 585        struct rxe_qp *qp = (struct rxe_qp *)arg;
 586        struct rxe_pkt_info pkt;
 587        struct sk_buff *skb;
 588        struct rxe_send_wqe *wqe;
 589        enum rxe_hdr_mask mask;
 590        int payload;
 591        int mtu;
 592        int opcode;
 593        int ret;
 594        struct rxe_send_wqe rollback_wqe;
 595        u32 rollback_psn;
 596
 597        rxe_add_ref(qp);
 598
 599next_wqe:
 600        if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
 601                goto exit;
 602
 603        if (unlikely(qp->req.state == QP_STATE_RESET)) {
 604                qp->req.wqe_index = consumer_index(qp->sq.queue);
 605                qp->req.opcode = -1;
 606                qp->req.need_rd_atomic = 0;
 607                qp->req.wait_psn = 0;
 608                qp->req.need_retry = 0;
 609                goto exit;
 610        }
 611
 612        if (unlikely(qp->req.need_retry)) {
 613                req_retry(qp);
 614                qp->req.need_retry = 0;
 615        }
 616
 617        wqe = req_next_wqe(qp);
 618        if (unlikely(!wqe))
 619                goto exit;
 620
 621        if (wqe->mask & WR_REG_MASK) {
 622                if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
 623                        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 624                        struct rxe_mem *rmr;
 625
 626                        rmr = rxe_pool_get_index(&rxe->mr_pool,
 627                                                 wqe->wr.ex.invalidate_rkey >> 8);
 628                        if (!rmr) {
 629                                pr_err("No mr for key %#x\n",
 630                                       wqe->wr.ex.invalidate_rkey);
 631                                wqe->state = wqe_state_error;
 632                                wqe->status = IB_WC_MW_BIND_ERR;
 633                                goto exit;
 634                        }
 635                        rmr->state = RXE_MEM_STATE_FREE;
 636                        rxe_drop_ref(rmr);
 637                        wqe->state = wqe_state_done;
 638                        wqe->status = IB_WC_SUCCESS;
 639                } else if (wqe->wr.opcode == IB_WR_REG_MR) {
 640                        struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
 641
 642                        rmr->state = RXE_MEM_STATE_VALID;
 643                        rmr->access = wqe->wr.wr.reg.access;
 644                        rmr->lkey = wqe->wr.wr.reg.key;
 645                        rmr->rkey = wqe->wr.wr.reg.key;
 646                        rmr->iova = wqe->wr.wr.reg.mr->iova;
 647                        wqe->state = wqe_state_done;
 648                        wqe->status = IB_WC_SUCCESS;
 649                } else {
 650                        goto exit;
 651                }
 652                if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 653                    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
 654                        rxe_run_task(&qp->comp.task, 1);
 655                qp->req.wqe_index = next_index(qp->sq.queue,
 656                                                qp->req.wqe_index);
 657                goto next_wqe;
 658        }
 659
 660        if (unlikely(qp_type(qp) == IB_QPT_RC &&
 661                     qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
 662                qp->req.wait_psn = 1;
 663                goto exit;
 664        }
 665
 666        /* Limit the number of inflight SKBs per QP */
 667        if (unlikely(atomic_read(&qp->skb_out) >
 668                     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
 669                qp->need_req_skb = 1;
 670                goto exit;
 671        }
 672
 673        opcode = next_opcode(qp, wqe, wqe->wr.opcode);
 674        if (unlikely(opcode < 0)) {
 675                wqe->status = IB_WC_LOC_QP_OP_ERR;
 676                goto exit;
 677        }
 678
 679        mask = rxe_opcode[opcode].mask;
 680        if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
 681                if (check_init_depth(qp, wqe))
 682                        goto exit;
 683        }
 684
 685        mtu = get_mtu(qp);
 686        payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
 687        if (payload > mtu) {
 688                if (qp_type(qp) == IB_QPT_UD) {
 689                        /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
 690                         * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
 691                         * shall not emit any packets for this message. Further, the CI shall not
 692                         * generate an error due to this condition.
 693                         */
 694
 695                        /* fake a successful UD send */
 696                        wqe->first_psn = qp->req.psn;
 697                        wqe->last_psn = qp->req.psn;
 698                        qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
 699                        qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
 700                        qp->req.wqe_index = next_index(qp->sq.queue,
 701                                                       qp->req.wqe_index);
 702                        wqe->state = wqe_state_done;
 703                        wqe->status = IB_WC_SUCCESS;
 704                        __rxe_do_task(&qp->comp.task);
 705                        rxe_drop_ref(qp);
 706                        return 0;
 707                }
 708                payload = mtu;
 709        }
 710
 711        skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
 712        if (unlikely(!skb)) {
 713                pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
 714                goto err;
 715        }
 716
 717        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
 718                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
 719                kfree_skb(skb);
 720                goto err;
 721        }
 722
 723        /*
 724         * To prevent a race on wqe access between requester and completer,
 725         * wqe members state and psn need to be set before calling
 726         * rxe_xmit_packet().
 727         * Otherwise, completer might initiate an unjustified retry flow.
 728         */
 729        save_state(wqe, qp, &rollback_wqe, &rollback_psn);
 730        update_wqe_state(qp, wqe, &pkt);
 731        update_wqe_psn(qp, wqe, &pkt, payload);
 732        ret = rxe_xmit_packet(qp, &pkt, skb);
 733        if (ret) {
 734                qp->need_req_skb = 1;
 735
 736                rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 737
 738                if (ret == -EAGAIN) {
 739                        rxe_run_task(&qp->req.task, 1);
 740                        goto exit;
 741                }
 742
 743                goto err;
 744        }
 745
 746        update_state(qp, wqe, &pkt, payload);
 747
 748        goto next_wqe;
 749
 750err:
 751        wqe->status = IB_WC_LOC_PROT_ERR;
 752        wqe->state = wqe_state_error;
 753        __rxe_do_task(&qp->comp.task);
 754
 755exit:
 756        rxe_drop_ref(qp);
 757        return -EAGAIN;
 758}
 759