linux/drivers/infiniband/sw/rxe/rxe_resp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/skbuff.h>
  35
  36#include "rxe.h"
  37#include "rxe_loc.h"
  38#include "rxe_queue.h"
  39
  40enum resp_states {
  41        RESPST_NONE,
  42        RESPST_GET_REQ,
  43        RESPST_CHK_PSN,
  44        RESPST_CHK_OP_SEQ,
  45        RESPST_CHK_OP_VALID,
  46        RESPST_CHK_RESOURCE,
  47        RESPST_CHK_LENGTH,
  48        RESPST_CHK_RKEY,
  49        RESPST_EXECUTE,
  50        RESPST_READ_REPLY,
  51        RESPST_COMPLETE,
  52        RESPST_ACKNOWLEDGE,
  53        RESPST_CLEANUP,
  54        RESPST_DUPLICATE_REQUEST,
  55        RESPST_ERR_MALFORMED_WQE,
  56        RESPST_ERR_UNSUPPORTED_OPCODE,
  57        RESPST_ERR_MISALIGNED_ATOMIC,
  58        RESPST_ERR_PSN_OUT_OF_SEQ,
  59        RESPST_ERR_MISSING_OPCODE_FIRST,
  60        RESPST_ERR_MISSING_OPCODE_LAST_C,
  61        RESPST_ERR_MISSING_OPCODE_LAST_D1E,
  62        RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
  63        RESPST_ERR_RNR,
  64        RESPST_ERR_RKEY_VIOLATION,
  65        RESPST_ERR_LENGTH,
  66        RESPST_ERR_CQ_OVERFLOW,
  67        RESPST_ERROR,
  68        RESPST_RESET,
  69        RESPST_DONE,
  70        RESPST_EXIT,
  71};
  72
  73static char *resp_state_name[] = {
  74        [RESPST_NONE]                           = "NONE",
  75        [RESPST_GET_REQ]                        = "GET_REQ",
  76        [RESPST_CHK_PSN]                        = "CHK_PSN",
  77        [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
  78        [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
  79        [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
  80        [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
  81        [RESPST_CHK_RKEY]                       = "CHK_RKEY",
  82        [RESPST_EXECUTE]                        = "EXECUTE",
  83        [RESPST_READ_REPLY]                     = "READ_REPLY",
  84        [RESPST_COMPLETE]                       = "COMPLETE",
  85        [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
  86        [RESPST_CLEANUP]                        = "CLEANUP",
  87        [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
  88        [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
  89        [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
  90        [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
  91        [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
  92        [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
  93        [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
  94        [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
  95        [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
  96        [RESPST_ERR_RNR]                        = "ERR_RNR",
  97        [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
  98        [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
  99        [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
 100        [RESPST_ERROR]                          = "ERROR",
 101        [RESPST_RESET]                          = "RESET",
 102        [RESPST_DONE]                           = "DONE",
 103        [RESPST_EXIT]                           = "EXIT",
 104};
 105
 106/* rxe_recv calls here to add a request packet to the input queue */
 107void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
 108{
 109        int must_sched;
 110        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 111
 112        skb_queue_tail(&qp->req_pkts, skb);
 113
 114        must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
 115                        (skb_queue_len(&qp->req_pkts) > 1);
 116
 117        rxe_run_task(&qp->resp.task, must_sched);
 118}
 119
 120static inline enum resp_states get_req(struct rxe_qp *qp,
 121                                       struct rxe_pkt_info **pkt_p)
 122{
 123        struct sk_buff *skb;
 124
 125        if (qp->resp.state == QP_STATE_ERROR) {
 126                while ((skb = skb_dequeue(&qp->req_pkts))) {
 127                        rxe_drop_ref(qp);
 128                        kfree_skb(skb);
 129                }
 130
 131                /* go drain recv wr queue */
 132                return RESPST_CHK_RESOURCE;
 133        }
 134
 135        skb = skb_peek(&qp->req_pkts);
 136        if (!skb)
 137                return RESPST_EXIT;
 138
 139        *pkt_p = SKB_TO_PKT(skb);
 140
 141        return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
 142}
 143
 144static enum resp_states check_psn(struct rxe_qp *qp,
 145                                  struct rxe_pkt_info *pkt)
 146{
 147        int diff = psn_compare(pkt->psn, qp->resp.psn);
 148        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 149
 150        switch (qp_type(qp)) {
 151        case IB_QPT_RC:
 152                if (diff > 0) {
 153                        if (qp->resp.sent_psn_nak)
 154                                return RESPST_CLEANUP;
 155
 156                        qp->resp.sent_psn_nak = 1;
 157                        rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
 158                        return RESPST_ERR_PSN_OUT_OF_SEQ;
 159
 160                } else if (diff < 0) {
 161                        rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
 162                        return RESPST_DUPLICATE_REQUEST;
 163                }
 164
 165                if (qp->resp.sent_psn_nak)
 166                        qp->resp.sent_psn_nak = 0;
 167
 168                break;
 169
 170        case IB_QPT_UC:
 171                if (qp->resp.drop_msg || diff != 0) {
 172                        if (pkt->mask & RXE_START_MASK) {
 173                                qp->resp.drop_msg = 0;
 174                                return RESPST_CHK_OP_SEQ;
 175                        }
 176
 177                        qp->resp.drop_msg = 1;
 178                        return RESPST_CLEANUP;
 179                }
 180                break;
 181        default:
 182                break;
 183        }
 184
 185        return RESPST_CHK_OP_SEQ;
 186}
 187
 188static enum resp_states check_op_seq(struct rxe_qp *qp,
 189                                     struct rxe_pkt_info *pkt)
 190{
 191        switch (qp_type(qp)) {
 192        case IB_QPT_RC:
 193                switch (qp->resp.opcode) {
 194                case IB_OPCODE_RC_SEND_FIRST:
 195                case IB_OPCODE_RC_SEND_MIDDLE:
 196                        switch (pkt->opcode) {
 197                        case IB_OPCODE_RC_SEND_MIDDLE:
 198                        case IB_OPCODE_RC_SEND_LAST:
 199                        case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
 200                        case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
 201                                return RESPST_CHK_OP_VALID;
 202                        default:
 203                                return RESPST_ERR_MISSING_OPCODE_LAST_C;
 204                        }
 205
 206                case IB_OPCODE_RC_RDMA_WRITE_FIRST:
 207                case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 208                        switch (pkt->opcode) {
 209                        case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 210                        case IB_OPCODE_RC_RDMA_WRITE_LAST:
 211                        case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 212                                return RESPST_CHK_OP_VALID;
 213                        default:
 214                                return RESPST_ERR_MISSING_OPCODE_LAST_C;
 215                        }
 216
 217                default:
 218                        switch (pkt->opcode) {
 219                        case IB_OPCODE_RC_SEND_MIDDLE:
 220                        case IB_OPCODE_RC_SEND_LAST:
 221                        case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
 222                        case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
 223                        case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 224                        case IB_OPCODE_RC_RDMA_WRITE_LAST:
 225                        case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 226                                return RESPST_ERR_MISSING_OPCODE_FIRST;
 227                        default:
 228                                return RESPST_CHK_OP_VALID;
 229                        }
 230                }
 231                break;
 232
 233        case IB_QPT_UC:
 234                switch (qp->resp.opcode) {
 235                case IB_OPCODE_UC_SEND_FIRST:
 236                case IB_OPCODE_UC_SEND_MIDDLE:
 237                        switch (pkt->opcode) {
 238                        case IB_OPCODE_UC_SEND_MIDDLE:
 239                        case IB_OPCODE_UC_SEND_LAST:
 240                        case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
 241                                return RESPST_CHK_OP_VALID;
 242                        default:
 243                                return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
 244                        }
 245
 246                case IB_OPCODE_UC_RDMA_WRITE_FIRST:
 247                case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 248                        switch (pkt->opcode) {
 249                        case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 250                        case IB_OPCODE_UC_RDMA_WRITE_LAST:
 251                        case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 252                                return RESPST_CHK_OP_VALID;
 253                        default:
 254                                return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
 255                        }
 256
 257                default:
 258                        switch (pkt->opcode) {
 259                        case IB_OPCODE_UC_SEND_MIDDLE:
 260                        case IB_OPCODE_UC_SEND_LAST:
 261                        case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
 262                        case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 263                        case IB_OPCODE_UC_RDMA_WRITE_LAST:
 264                        case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 265                                qp->resp.drop_msg = 1;
 266                                return RESPST_CLEANUP;
 267                        default:
 268                                return RESPST_CHK_OP_VALID;
 269                        }
 270                }
 271                break;
 272
 273        default:
 274                return RESPST_CHK_OP_VALID;
 275        }
 276}
 277
 278static enum resp_states check_op_valid(struct rxe_qp *qp,
 279                                       struct rxe_pkt_info *pkt)
 280{
 281        switch (qp_type(qp)) {
 282        case IB_QPT_RC:
 283                if (((pkt->mask & RXE_READ_MASK) &&
 284                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
 285                    ((pkt->mask & RXE_WRITE_MASK) &&
 286                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
 287                    ((pkt->mask & RXE_ATOMIC_MASK) &&
 288                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
 289                        return RESPST_ERR_UNSUPPORTED_OPCODE;
 290                }
 291
 292                break;
 293
 294        case IB_QPT_UC:
 295                if ((pkt->mask & RXE_WRITE_MASK) &&
 296                    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
 297                        qp->resp.drop_msg = 1;
 298                        return RESPST_CLEANUP;
 299                }
 300
 301                break;
 302
 303        case IB_QPT_UD:
 304        case IB_QPT_SMI:
 305        case IB_QPT_GSI:
 306                break;
 307
 308        default:
 309                WARN_ON_ONCE(1);
 310                break;
 311        }
 312
 313        return RESPST_CHK_RESOURCE;
 314}
 315
 316static enum resp_states get_srq_wqe(struct rxe_qp *qp)
 317{
 318        struct rxe_srq *srq = qp->srq;
 319        struct rxe_queue *q = srq->rq.queue;
 320        struct rxe_recv_wqe *wqe;
 321        struct ib_event ev;
 322
 323        if (srq->error)
 324                return RESPST_ERR_RNR;
 325
 326        spin_lock_bh(&srq->rq.consumer_lock);
 327
 328        wqe = queue_head(q);
 329        if (!wqe) {
 330                spin_unlock_bh(&srq->rq.consumer_lock);
 331                return RESPST_ERR_RNR;
 332        }
 333
 334        /* note kernel and user space recv wqes have same size */
 335        memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
 336
 337        qp->resp.wqe = &qp->resp.srq_wqe.wqe;
 338        advance_consumer(q);
 339
 340        if (srq->limit && srq->ibsrq.event_handler &&
 341            (queue_count(q) < srq->limit)) {
 342                srq->limit = 0;
 343                goto event;
 344        }
 345
 346        spin_unlock_bh(&srq->rq.consumer_lock);
 347        return RESPST_CHK_LENGTH;
 348
 349event:
 350        spin_unlock_bh(&srq->rq.consumer_lock);
 351        ev.device = qp->ibqp.device;
 352        ev.element.srq = qp->ibqp.srq;
 353        ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
 354        srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
 355        return RESPST_CHK_LENGTH;
 356}
 357
 358static enum resp_states check_resource(struct rxe_qp *qp,
 359                                       struct rxe_pkt_info *pkt)
 360{
 361        struct rxe_srq *srq = qp->srq;
 362
 363        if (qp->resp.state == QP_STATE_ERROR) {
 364                if (qp->resp.wqe) {
 365                        qp->resp.status = IB_WC_WR_FLUSH_ERR;
 366                        return RESPST_COMPLETE;
 367                } else if (!srq) {
 368                        qp->resp.wqe = queue_head(qp->rq.queue);
 369                        if (qp->resp.wqe) {
 370                                qp->resp.status = IB_WC_WR_FLUSH_ERR;
 371                                return RESPST_COMPLETE;
 372                        } else {
 373                                return RESPST_EXIT;
 374                        }
 375                } else {
 376                        return RESPST_EXIT;
 377                }
 378        }
 379
 380        if (pkt->mask & RXE_READ_OR_ATOMIC) {
 381                /* it is the requesters job to not send
 382                 * too many read/atomic ops, we just
 383                 * recycle the responder resource queue
 384                 */
 385                if (likely(qp->attr.max_dest_rd_atomic > 0))
 386                        return RESPST_CHK_LENGTH;
 387                else
 388                        return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
 389        }
 390
 391        if (pkt->mask & RXE_RWR_MASK) {
 392                if (srq)
 393                        return get_srq_wqe(qp);
 394
 395                qp->resp.wqe = queue_head(qp->rq.queue);
 396                return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
 397        }
 398
 399        return RESPST_CHK_LENGTH;
 400}
 401
 402static enum resp_states check_length(struct rxe_qp *qp,
 403                                     struct rxe_pkt_info *pkt)
 404{
 405        switch (qp_type(qp)) {
 406        case IB_QPT_RC:
 407                return RESPST_CHK_RKEY;
 408
 409        case IB_QPT_UC:
 410                return RESPST_CHK_RKEY;
 411
 412        default:
 413                return RESPST_CHK_RKEY;
 414        }
 415}
 416
 417static enum resp_states check_rkey(struct rxe_qp *qp,
 418                                   struct rxe_pkt_info *pkt)
 419{
 420        struct rxe_mem *mem = NULL;
 421        u64 va;
 422        u32 rkey;
 423        u32 resid;
 424        u32 pktlen;
 425        int mtu = qp->mtu;
 426        enum resp_states state;
 427        int access;
 428
 429        if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
 430                if (pkt->mask & RXE_RETH_MASK) {
 431                        qp->resp.va = reth_va(pkt);
 432                        qp->resp.rkey = reth_rkey(pkt);
 433                        qp->resp.resid = reth_len(pkt);
 434                        qp->resp.length = reth_len(pkt);
 435                }
 436                access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
 437                                                     : IB_ACCESS_REMOTE_WRITE;
 438        } else if (pkt->mask & RXE_ATOMIC_MASK) {
 439                qp->resp.va = atmeth_va(pkt);
 440                qp->resp.rkey = atmeth_rkey(pkt);
 441                qp->resp.resid = sizeof(u64);
 442                access = IB_ACCESS_REMOTE_ATOMIC;
 443        } else {
 444                return RESPST_EXECUTE;
 445        }
 446
 447        /* A zero-byte op is not required to set an addr or rkey. */
 448        if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
 449            (pkt->mask & RXE_RETH_MASK) &&
 450            reth_len(pkt) == 0) {
 451                return RESPST_EXECUTE;
 452        }
 453
 454        va      = qp->resp.va;
 455        rkey    = qp->resp.rkey;
 456        resid   = qp->resp.resid;
 457        pktlen  = payload_size(pkt);
 458
 459        mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
 460        if (!mem) {
 461                state = RESPST_ERR_RKEY_VIOLATION;
 462                goto err;
 463        }
 464
 465        if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
 466                state = RESPST_ERR_RKEY_VIOLATION;
 467                goto err;
 468        }
 469
 470        if (mem_check_range(mem, va, resid)) {
 471                state = RESPST_ERR_RKEY_VIOLATION;
 472                goto err;
 473        }
 474
 475        if (pkt->mask & RXE_WRITE_MASK)  {
 476                if (resid > mtu) {
 477                        if (pktlen != mtu || bth_pad(pkt)) {
 478                                state = RESPST_ERR_LENGTH;
 479                                goto err;
 480                        }
 481                } else {
 482                        if (pktlen != resid) {
 483                                state = RESPST_ERR_LENGTH;
 484                                goto err;
 485                        }
 486                        if ((bth_pad(pkt) != (0x3 & (-resid)))) {
 487                                /* This case may not be exactly that
 488                                 * but nothing else fits.
 489                                 */
 490                                state = RESPST_ERR_LENGTH;
 491                                goto err;
 492                        }
 493                }
 494        }
 495
 496        WARN_ON_ONCE(qp->resp.mr);
 497
 498        qp->resp.mr = mem;
 499        return RESPST_EXECUTE;
 500
 501err:
 502        if (mem)
 503                rxe_drop_ref(mem);
 504        return state;
 505}
 506
 507static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
 508                                     int data_len)
 509{
 510        int err;
 511
 512        err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
 513                        data_addr, data_len, to_mem_obj, NULL);
 514        if (unlikely(err))
 515                return (err == -ENOSPC) ? RESPST_ERR_LENGTH
 516                                        : RESPST_ERR_MALFORMED_WQE;
 517
 518        return RESPST_NONE;
 519}
 520
 521static enum resp_states write_data_in(struct rxe_qp *qp,
 522                                      struct rxe_pkt_info *pkt)
 523{
 524        enum resp_states rc = RESPST_NONE;
 525        int     err;
 526        int data_len = payload_size(pkt);
 527
 528        err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
 529                           data_len, to_mem_obj, NULL);
 530        if (err) {
 531                rc = RESPST_ERR_RKEY_VIOLATION;
 532                goto out;
 533        }
 534
 535        qp->resp.va += data_len;
 536        qp->resp.resid -= data_len;
 537
 538out:
 539        return rc;
 540}
 541
 542/* Guarantee atomicity of atomic operations at the machine level. */
 543static DEFINE_SPINLOCK(atomic_ops_lock);
 544
 545static enum resp_states process_atomic(struct rxe_qp *qp,
 546                                       struct rxe_pkt_info *pkt)
 547{
 548        u64 iova = atmeth_va(pkt);
 549        u64 *vaddr;
 550        enum resp_states ret;
 551        struct rxe_mem *mr = qp->resp.mr;
 552
 553        if (mr->state != RXE_MEM_STATE_VALID) {
 554                ret = RESPST_ERR_RKEY_VIOLATION;
 555                goto out;
 556        }
 557
 558        vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
 559
 560        /* check vaddr is 8 bytes aligned. */
 561        if (!vaddr || (uintptr_t)vaddr & 7) {
 562                ret = RESPST_ERR_MISALIGNED_ATOMIC;
 563                goto out;
 564        }
 565
 566        spin_lock_bh(&atomic_ops_lock);
 567
 568        qp->resp.atomic_orig = *vaddr;
 569
 570        if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
 571            pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
 572                if (*vaddr == atmeth_comp(pkt))
 573                        *vaddr = atmeth_swap_add(pkt);
 574        } else {
 575                *vaddr += atmeth_swap_add(pkt);
 576        }
 577
 578        spin_unlock_bh(&atomic_ops_lock);
 579
 580        ret = RESPST_NONE;
 581out:
 582        return ret;
 583}
 584
 585static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
 586                                          struct rxe_pkt_info *pkt,
 587                                          struct rxe_pkt_info *ack,
 588                                          int opcode,
 589                                          int payload,
 590                                          u32 psn,
 591                                          u8 syndrome,
 592                                          u32 *crcp)
 593{
 594        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 595        struct sk_buff *skb;
 596        u32 crc = 0;
 597        u32 *p;
 598        int paylen;
 599        int pad;
 600        int err;
 601
 602        /*
 603         * allocate packet
 604         */
 605        pad = (-payload) & 0x3;
 606        paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
 607
 608        skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
 609        if (!skb)
 610                return NULL;
 611
 612        ack->qp = qp;
 613        ack->opcode = opcode;
 614        ack->mask = rxe_opcode[opcode].mask;
 615        ack->offset = pkt->offset;
 616        ack->paylen = paylen;
 617
 618        /* fill in bth using the request packet headers */
 619        memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
 620
 621        bth_set_opcode(ack, opcode);
 622        bth_set_qpn(ack, qp->attr.dest_qp_num);
 623        bth_set_pad(ack, pad);
 624        bth_set_se(ack, 0);
 625        bth_set_psn(ack, psn);
 626        bth_set_ack(ack, 0);
 627        ack->psn = psn;
 628
 629        if (ack->mask & RXE_AETH_MASK) {
 630                aeth_set_syn(ack, syndrome);
 631                aeth_set_msn(ack, qp->resp.msn);
 632        }
 633
 634        if (ack->mask & RXE_ATMACK_MASK)
 635                atmack_set_orig(ack, qp->resp.atomic_orig);
 636
 637        err = rxe_prepare(ack, skb, &crc);
 638        if (err) {
 639                kfree_skb(skb);
 640                return NULL;
 641        }
 642
 643        if (crcp) {
 644                /* CRC computation will be continued by the caller */
 645                *crcp = crc;
 646        } else {
 647                p = payload_addr(ack) + payload + bth_pad(ack);
 648                *p = ~crc;
 649        }
 650
 651        return skb;
 652}
 653
 654/* RDMA read response. If res is not NULL, then we have a current RDMA request
 655 * being processed or replayed.
 656 */
 657static enum resp_states read_reply(struct rxe_qp *qp,
 658                                   struct rxe_pkt_info *req_pkt)
 659{
 660        struct rxe_pkt_info ack_pkt;
 661        struct sk_buff *skb;
 662        int mtu = qp->mtu;
 663        enum resp_states state;
 664        int payload;
 665        int opcode;
 666        int err;
 667        struct resp_res *res = qp->resp.res;
 668        u32 icrc;
 669        u32 *p;
 670
 671        if (!res) {
 672                /* This is the first time we process that request. Get a
 673                 * resource
 674                 */
 675                res = &qp->resp.resources[qp->resp.res_head];
 676
 677                free_rd_atomic_resource(qp, res);
 678                rxe_advance_resp_resource(qp);
 679
 680                res->type               = RXE_READ_MASK;
 681                res->replay             = 0;
 682
 683                res->read.va            = qp->resp.va;
 684                res->read.va_org        = qp->resp.va;
 685
 686                res->first_psn          = req_pkt->psn;
 687
 688                if (reth_len(req_pkt)) {
 689                        res->last_psn   = (req_pkt->psn +
 690                                           (reth_len(req_pkt) + mtu - 1) /
 691                                           mtu - 1) & BTH_PSN_MASK;
 692                } else {
 693                        res->last_psn   = res->first_psn;
 694                }
 695                res->cur_psn            = req_pkt->psn;
 696
 697                res->read.resid         = qp->resp.resid;
 698                res->read.length        = qp->resp.resid;
 699                res->read.rkey          = qp->resp.rkey;
 700
 701                /* note res inherits the reference to mr from qp */
 702                res->read.mr            = qp->resp.mr;
 703                qp->resp.mr             = NULL;
 704
 705                qp->resp.res            = res;
 706                res->state              = rdatm_res_state_new;
 707        }
 708
 709        if (res->state == rdatm_res_state_new) {
 710                if (res->read.resid <= mtu)
 711                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
 712                else
 713                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
 714        } else {
 715                if (res->read.resid > mtu)
 716                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
 717                else
 718                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
 719        }
 720
 721        res->state = rdatm_res_state_next;
 722
 723        payload = min_t(int, res->read.resid, mtu);
 724
 725        skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
 726                                 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
 727        if (!skb)
 728                return RESPST_ERR_RNR;
 729
 730        err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
 731                           payload, from_mem_obj, &icrc);
 732        if (err)
 733                pr_err("Failed copying memory\n");
 734
 735        p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
 736        *p = ~icrc;
 737
 738        err = rxe_xmit_packet(qp, &ack_pkt, skb);
 739        if (err) {
 740                pr_err("Failed sending RDMA reply.\n");
 741                return RESPST_ERR_RNR;
 742        }
 743
 744        res->read.va += payload;
 745        res->read.resid -= payload;
 746        res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
 747
 748        if (res->read.resid > 0) {
 749                state = RESPST_DONE;
 750        } else {
 751                qp->resp.res = NULL;
 752                if (!res->replay)
 753                        qp->resp.opcode = -1;
 754                if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
 755                        qp->resp.psn = res->cur_psn;
 756                state = RESPST_CLEANUP;
 757        }
 758
 759        return state;
 760}
 761
 762static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
 763                                   struct rxe_pkt_info *pkt)
 764{
 765        struct sk_buff *skb = PKT_TO_SKB(pkt);
 766
 767        memset(hdr, 0, sizeof(*hdr));
 768        if (skb->protocol == htons(ETH_P_IP))
 769                memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
 770        else if (skb->protocol == htons(ETH_P_IPV6))
 771                memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
 772}
 773
 774/* Executes a new request. A retried request never reach that function (send
 775 * and writes are discarded, and reads and atomics are retried elsewhere.
 776 */
 777static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
 778{
 779        enum resp_states err;
 780
 781        if (pkt->mask & RXE_SEND_MASK) {
 782                if (qp_type(qp) == IB_QPT_UD ||
 783                    qp_type(qp) == IB_QPT_SMI ||
 784                    qp_type(qp) == IB_QPT_GSI) {
 785                        union rdma_network_hdr hdr;
 786
 787                        build_rdma_network_hdr(&hdr, pkt);
 788
 789                        err = send_data_in(qp, &hdr, sizeof(hdr));
 790                        if (err)
 791                                return err;
 792                }
 793                err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
 794                if (err)
 795                        return err;
 796        } else if (pkt->mask & RXE_WRITE_MASK) {
 797                err = write_data_in(qp, pkt);
 798                if (err)
 799                        return err;
 800        } else if (pkt->mask & RXE_READ_MASK) {
 801                /* For RDMA Read we can increment the msn now. See C9-148. */
 802                qp->resp.msn++;
 803                return RESPST_READ_REPLY;
 804        } else if (pkt->mask & RXE_ATOMIC_MASK) {
 805                err = process_atomic(qp, pkt);
 806                if (err)
 807                        return err;
 808        } else {
 809                /* Unreachable */
 810                WARN_ON_ONCE(1);
 811        }
 812
 813        /* next expected psn, read handles this separately */
 814        qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 815        qp->resp.ack_psn = qp->resp.psn;
 816
 817        qp->resp.opcode = pkt->opcode;
 818        qp->resp.status = IB_WC_SUCCESS;
 819
 820        if (pkt->mask & RXE_COMP_MASK) {
 821                /* We successfully processed this new request. */
 822                qp->resp.msn++;
 823                return RESPST_COMPLETE;
 824        } else if (qp_type(qp) == IB_QPT_RC)
 825                return RESPST_ACKNOWLEDGE;
 826        else
 827                return RESPST_CLEANUP;
 828}
 829
 830static enum resp_states do_complete(struct rxe_qp *qp,
 831                                    struct rxe_pkt_info *pkt)
 832{
 833        struct rxe_cqe cqe;
 834        struct ib_wc *wc = &cqe.ibwc;
 835        struct ib_uverbs_wc *uwc = &cqe.uibwc;
 836        struct rxe_recv_wqe *wqe = qp->resp.wqe;
 837        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 838
 839        if (unlikely(!wqe))
 840                return RESPST_CLEANUP;
 841
 842        memset(&cqe, 0, sizeof(cqe));
 843
 844        if (qp->rcq->is_user) {
 845                uwc->status             = qp->resp.status;
 846                uwc->qp_num             = qp->ibqp.qp_num;
 847                uwc->wr_id              = wqe->wr_id;
 848        } else {
 849                wc->status              = qp->resp.status;
 850                wc->qp                  = &qp->ibqp;
 851                wc->wr_id               = wqe->wr_id;
 852        }
 853
 854        if (wc->status == IB_WC_SUCCESS) {
 855                rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
 856                wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
 857                                pkt->mask & RXE_WRITE_MASK) ?
 858                                        IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
 859                wc->vendor_err = 0;
 860                wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
 861                                pkt->mask & RXE_WRITE_MASK) ?
 862                                        qp->resp.length : wqe->dma.length - wqe->dma.resid;
 863
 864                /* fields after byte_len are different between kernel and user
 865                 * space
 866                 */
 867                if (qp->rcq->is_user) {
 868                        uwc->wc_flags = IB_WC_GRH;
 869
 870                        if (pkt->mask & RXE_IMMDT_MASK) {
 871                                uwc->wc_flags |= IB_WC_WITH_IMM;
 872                                uwc->ex.imm_data = immdt_imm(pkt);
 873                        }
 874
 875                        if (pkt->mask & RXE_IETH_MASK) {
 876                                uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
 877                                uwc->ex.invalidate_rkey = ieth_rkey(pkt);
 878                        }
 879
 880                        uwc->qp_num             = qp->ibqp.qp_num;
 881
 882                        if (pkt->mask & RXE_DETH_MASK)
 883                                uwc->src_qp = deth_sqp(pkt);
 884
 885                        uwc->port_num           = qp->attr.port_num;
 886                } else {
 887                        struct sk_buff *skb = PKT_TO_SKB(pkt);
 888
 889                        wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
 890                        if (skb->protocol == htons(ETH_P_IP))
 891                                wc->network_hdr_type = RDMA_NETWORK_IPV4;
 892                        else
 893                                wc->network_hdr_type = RDMA_NETWORK_IPV6;
 894
 895                        if (is_vlan_dev(skb->dev)) {
 896                                wc->wc_flags |= IB_WC_WITH_VLAN;
 897                                wc->vlan_id = vlan_dev_vlan_id(skb->dev);
 898                        }
 899
 900                        if (pkt->mask & RXE_IMMDT_MASK) {
 901                                wc->wc_flags |= IB_WC_WITH_IMM;
 902                                wc->ex.imm_data = immdt_imm(pkt);
 903                        }
 904
 905                        if (pkt->mask & RXE_IETH_MASK) {
 906                                struct rxe_mem *rmr;
 907
 908                                wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 909                                wc->ex.invalidate_rkey = ieth_rkey(pkt);
 910
 911                                rmr = rxe_pool_get_index(&rxe->mr_pool,
 912                                                         wc->ex.invalidate_rkey >> 8);
 913                                if (unlikely(!rmr)) {
 914                                        pr_err("Bad rkey %#x invalidation\n",
 915                                               wc->ex.invalidate_rkey);
 916                                        return RESPST_ERROR;
 917                                }
 918                                rmr->state = RXE_MEM_STATE_FREE;
 919                                rxe_drop_ref(rmr);
 920                        }
 921
 922                        wc->qp                  = &qp->ibqp;
 923
 924                        if (pkt->mask & RXE_DETH_MASK)
 925                                wc->src_qp = deth_sqp(pkt);
 926
 927                        wc->port_num            = qp->attr.port_num;
 928                }
 929        }
 930
 931        /* have copy for srq and reference for !srq */
 932        if (!qp->srq)
 933                advance_consumer(qp->rq.queue);
 934
 935        qp->resp.wqe = NULL;
 936
 937        if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
 938                return RESPST_ERR_CQ_OVERFLOW;
 939
 940        if (qp->resp.state == QP_STATE_ERROR)
 941                return RESPST_CHK_RESOURCE;
 942
 943        if (!pkt)
 944                return RESPST_DONE;
 945        else if (qp_type(qp) == IB_QPT_RC)
 946                return RESPST_ACKNOWLEDGE;
 947        else
 948                return RESPST_CLEANUP;
 949}
 950
 951static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 952                    u8 syndrome, u32 psn)
 953{
 954        int err = 0;
 955        struct rxe_pkt_info ack_pkt;
 956        struct sk_buff *skb;
 957
 958        skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
 959                                 0, psn, syndrome, NULL);
 960        if (!skb) {
 961                err = -ENOMEM;
 962                goto err1;
 963        }
 964
 965        err = rxe_xmit_packet(qp, &ack_pkt, skb);
 966        if (err)
 967                pr_err_ratelimited("Failed sending ack\n");
 968
 969err1:
 970        return err;
 971}
 972
 973static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 974                           u8 syndrome)
 975{
 976        int rc = 0;
 977        struct rxe_pkt_info ack_pkt;
 978        struct sk_buff *skb;
 979        struct resp_res *res;
 980
 981        skb = prepare_ack_packet(qp, pkt, &ack_pkt,
 982                                 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
 983                                 syndrome, NULL);
 984        if (!skb) {
 985                rc = -ENOMEM;
 986                goto out;
 987        }
 988
 989        rxe_add_ref(qp);
 990
 991        res = &qp->resp.resources[qp->resp.res_head];
 992        free_rd_atomic_resource(qp, res);
 993        rxe_advance_resp_resource(qp);
 994
 995        memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
 996        memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
 997               sizeof(skb->cb) - sizeof(ack_pkt));
 998
 999        skb_get(skb);
1000        res->type = RXE_ATOMIC_MASK;
1001        res->atomic.skb = skb;
1002        res->first_psn = ack_pkt.psn;
1003        res->last_psn  = ack_pkt.psn;
1004        res->cur_psn   = ack_pkt.psn;
1005
1006        rc = rxe_xmit_packet(qp, &ack_pkt, skb);
1007        if (rc) {
1008                pr_err_ratelimited("Failed sending ack\n");
1009                rxe_drop_ref(qp);
1010        }
1011out:
1012        return rc;
1013}
1014
1015static enum resp_states acknowledge(struct rxe_qp *qp,
1016                                    struct rxe_pkt_info *pkt)
1017{
1018        if (qp_type(qp) != IB_QPT_RC)
1019                return RESPST_CLEANUP;
1020
1021        if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1022                send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1023        else if (pkt->mask & RXE_ATOMIC_MASK)
1024                send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1025        else if (bth_ack(pkt))
1026                send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1027
1028        return RESPST_CLEANUP;
1029}
1030
1031static enum resp_states cleanup(struct rxe_qp *qp,
1032                                struct rxe_pkt_info *pkt)
1033{
1034        struct sk_buff *skb;
1035
1036        if (pkt) {
1037                skb = skb_dequeue(&qp->req_pkts);
1038                rxe_drop_ref(qp);
1039                kfree_skb(skb);
1040        }
1041
1042        if (qp->resp.mr) {
1043                rxe_drop_ref(qp->resp.mr);
1044                qp->resp.mr = NULL;
1045        }
1046
1047        return RESPST_DONE;
1048}
1049
1050static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1051{
1052        int i;
1053
1054        for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1055                struct resp_res *res = &qp->resp.resources[i];
1056
1057                if (res->type == 0)
1058                        continue;
1059
1060                if (psn_compare(psn, res->first_psn) >= 0 &&
1061                    psn_compare(psn, res->last_psn) <= 0) {
1062                        return res;
1063                }
1064        }
1065
1066        return NULL;
1067}
1068
1069static enum resp_states duplicate_request(struct rxe_qp *qp,
1070                                          struct rxe_pkt_info *pkt)
1071{
1072        enum resp_states rc;
1073        u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1074
1075        if (pkt->mask & RXE_SEND_MASK ||
1076            pkt->mask & RXE_WRITE_MASK) {
1077                /* SEND. Ack again and cleanup. C9-105. */
1078                if (bth_ack(pkt))
1079                        send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1080                rc = RESPST_CLEANUP;
1081                goto out;
1082        } else if (pkt->mask & RXE_READ_MASK) {
1083                struct resp_res *res;
1084
1085                res = find_resource(qp, pkt->psn);
1086                if (!res) {
1087                        /* Resource not found. Class D error.  Drop the
1088                         * request.
1089                         */
1090                        rc = RESPST_CLEANUP;
1091                        goto out;
1092                } else {
1093                        /* Ensure this new request is the same as the previous
1094                         * one or a subset of it.
1095                         */
1096                        u64 iova = reth_va(pkt);
1097                        u32 resid = reth_len(pkt);
1098
1099                        if (iova < res->read.va_org ||
1100                            resid > res->read.length ||
1101                            (iova + resid) > (res->read.va_org +
1102                                              res->read.length)) {
1103                                rc = RESPST_CLEANUP;
1104                                goto out;
1105                        }
1106
1107                        if (reth_rkey(pkt) != res->read.rkey) {
1108                                rc = RESPST_CLEANUP;
1109                                goto out;
1110                        }
1111
1112                        res->cur_psn = pkt->psn;
1113                        res->state = (pkt->psn == res->first_psn) ?
1114                                        rdatm_res_state_new :
1115                                        rdatm_res_state_replay;
1116                        res->replay = 1;
1117
1118                        /* Reset the resource, except length. */
1119                        res->read.va_org = iova;
1120                        res->read.va = iova;
1121                        res->read.resid = resid;
1122
1123                        /* Replay the RDMA read reply. */
1124                        qp->resp.res = res;
1125                        rc = RESPST_READ_REPLY;
1126                        goto out;
1127                }
1128        } else {
1129                struct resp_res *res;
1130
1131                /* Find the operation in our list of responder resources. */
1132                res = find_resource(qp, pkt->psn);
1133                if (res) {
1134                        skb_get(res->atomic.skb);
1135                        /* Resend the result. */
1136                        rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1137                        if (rc) {
1138                                pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1139                                rc = RESPST_CLEANUP;
1140                                goto out;
1141                        }
1142                }
1143
1144                /* Resource not found. Class D error. Drop the request. */
1145                rc = RESPST_CLEANUP;
1146                goto out;
1147        }
1148out:
1149        return rc;
1150}
1151
1152/* Process a class A or C. Both are treated the same in this implementation. */
1153static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1154                              enum ib_wc_status status)
1155{
1156        qp->resp.aeth_syndrome  = syndrome;
1157        qp->resp.status         = status;
1158
1159        /* indicate that we should go through the ERROR state */
1160        qp->resp.goto_error     = 1;
1161}
1162
1163static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1164{
1165        /* UC */
1166        if (qp->srq) {
1167                /* Class E */
1168                qp->resp.drop_msg = 1;
1169                if (qp->resp.wqe) {
1170                        qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1171                        return RESPST_COMPLETE;
1172                } else {
1173                        return RESPST_CLEANUP;
1174                }
1175        } else {
1176                /* Class D1. This packet may be the start of a
1177                 * new message and could be valid. The previous
1178                 * message is invalid and ignored. reset the
1179                 * recv wr to its original state
1180                 */
1181                if (qp->resp.wqe) {
1182                        qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1183                        qp->resp.wqe->dma.cur_sge = 0;
1184                        qp->resp.wqe->dma.sge_offset = 0;
1185                        qp->resp.opcode = -1;
1186                }
1187
1188                if (qp->resp.mr) {
1189                        rxe_drop_ref(qp->resp.mr);
1190                        qp->resp.mr = NULL;
1191                }
1192
1193                return RESPST_CLEANUP;
1194        }
1195}
1196
1197static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1198{
1199        struct sk_buff *skb;
1200
1201        while ((skb = skb_dequeue(&qp->req_pkts))) {
1202                rxe_drop_ref(qp);
1203                kfree_skb(skb);
1204        }
1205
1206        if (notify)
1207                return;
1208
1209        while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1210                advance_consumer(qp->rq.queue);
1211}
1212
1213int rxe_responder(void *arg)
1214{
1215        struct rxe_qp *qp = (struct rxe_qp *)arg;
1216        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1217        enum resp_states state;
1218        struct rxe_pkt_info *pkt = NULL;
1219        int ret = 0;
1220
1221        rxe_add_ref(qp);
1222
1223        qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1224
1225        if (!qp->valid) {
1226                ret = -EINVAL;
1227                goto done;
1228        }
1229
1230        switch (qp->resp.state) {
1231        case QP_STATE_RESET:
1232                state = RESPST_RESET;
1233                break;
1234
1235        default:
1236                state = RESPST_GET_REQ;
1237                break;
1238        }
1239
1240        while (1) {
1241                pr_debug("qp#%d state = %s\n", qp_num(qp),
1242                         resp_state_name[state]);
1243                switch (state) {
1244                case RESPST_GET_REQ:
1245                        state = get_req(qp, &pkt);
1246                        break;
1247                case RESPST_CHK_PSN:
1248                        state = check_psn(qp, pkt);
1249                        break;
1250                case RESPST_CHK_OP_SEQ:
1251                        state = check_op_seq(qp, pkt);
1252                        break;
1253                case RESPST_CHK_OP_VALID:
1254                        state = check_op_valid(qp, pkt);
1255                        break;
1256                case RESPST_CHK_RESOURCE:
1257                        state = check_resource(qp, pkt);
1258                        break;
1259                case RESPST_CHK_LENGTH:
1260                        state = check_length(qp, pkt);
1261                        break;
1262                case RESPST_CHK_RKEY:
1263                        state = check_rkey(qp, pkt);
1264                        break;
1265                case RESPST_EXECUTE:
1266                        state = execute(qp, pkt);
1267                        break;
1268                case RESPST_COMPLETE:
1269                        state = do_complete(qp, pkt);
1270                        break;
1271                case RESPST_READ_REPLY:
1272                        state = read_reply(qp, pkt);
1273                        break;
1274                case RESPST_ACKNOWLEDGE:
1275                        state = acknowledge(qp, pkt);
1276                        break;
1277                case RESPST_CLEANUP:
1278                        state = cleanup(qp, pkt);
1279                        break;
1280                case RESPST_DUPLICATE_REQUEST:
1281                        state = duplicate_request(qp, pkt);
1282                        break;
1283                case RESPST_ERR_PSN_OUT_OF_SEQ:
1284                        /* RC only - Class B. Drop packet. */
1285                        send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1286                        state = RESPST_CLEANUP;
1287                        break;
1288
1289                case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1290                case RESPST_ERR_MISSING_OPCODE_FIRST:
1291                case RESPST_ERR_MISSING_OPCODE_LAST_C:
1292                case RESPST_ERR_UNSUPPORTED_OPCODE:
1293                case RESPST_ERR_MISALIGNED_ATOMIC:
1294                        /* RC Only - Class C. */
1295                        do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1296                                          IB_WC_REM_INV_REQ_ERR);
1297                        state = RESPST_COMPLETE;
1298                        break;
1299
1300                case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1301                        state = do_class_d1e_error(qp);
1302                        break;
1303                case RESPST_ERR_RNR:
1304                        if (qp_type(qp) == IB_QPT_RC) {
1305                                rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1306                                /* RC - class B */
1307                                send_ack(qp, pkt, AETH_RNR_NAK |
1308                                         (~AETH_TYPE_MASK &
1309                                         qp->attr.min_rnr_timer),
1310                                         pkt->psn);
1311                        } else {
1312                                /* UD/UC - class D */
1313                                qp->resp.drop_msg = 1;
1314                        }
1315                        state = RESPST_CLEANUP;
1316                        break;
1317
1318                case RESPST_ERR_RKEY_VIOLATION:
1319                        if (qp_type(qp) == IB_QPT_RC) {
1320                                /* Class C */
1321                                do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1322                                                  IB_WC_REM_ACCESS_ERR);
1323                                state = RESPST_COMPLETE;
1324                        } else {
1325                                qp->resp.drop_msg = 1;
1326                                if (qp->srq) {
1327                                        /* UC/SRQ Class D */
1328                                        qp->resp.status = IB_WC_REM_ACCESS_ERR;
1329                                        state = RESPST_COMPLETE;
1330                                } else {
1331                                        /* UC/non-SRQ Class E. */
1332                                        state = RESPST_CLEANUP;
1333                                }
1334                        }
1335                        break;
1336
1337                case RESPST_ERR_LENGTH:
1338                        if (qp_type(qp) == IB_QPT_RC) {
1339                                /* Class C */
1340                                do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1341                                                  IB_WC_REM_INV_REQ_ERR);
1342                                state = RESPST_COMPLETE;
1343                        } else if (qp->srq) {
1344                                /* UC/UD - class E */
1345                                qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1346                                state = RESPST_COMPLETE;
1347                        } else {
1348                                /* UC/UD - class D */
1349                                qp->resp.drop_msg = 1;
1350                                state = RESPST_CLEANUP;
1351                        }
1352                        break;
1353
1354                case RESPST_ERR_MALFORMED_WQE:
1355                        /* All, Class A. */
1356                        do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1357                                          IB_WC_LOC_QP_OP_ERR);
1358                        state = RESPST_COMPLETE;
1359                        break;
1360
1361                case RESPST_ERR_CQ_OVERFLOW:
1362                        /* All - Class G */
1363                        state = RESPST_ERROR;
1364                        break;
1365
1366                case RESPST_DONE:
1367                        if (qp->resp.goto_error) {
1368                                state = RESPST_ERROR;
1369                                break;
1370                        }
1371
1372                        goto done;
1373
1374                case RESPST_EXIT:
1375                        if (qp->resp.goto_error) {
1376                                state = RESPST_ERROR;
1377                                break;
1378                        }
1379
1380                        goto exit;
1381
1382                case RESPST_RESET:
1383                        rxe_drain_req_pkts(qp, false);
1384                        qp->resp.wqe = NULL;
1385                        goto exit;
1386
1387                case RESPST_ERROR:
1388                        qp->resp.goto_error = 0;
1389                        pr_warn("qp#%d moved to error state\n", qp_num(qp));
1390                        rxe_qp_error(qp);
1391                        goto exit;
1392
1393                default:
1394                        WARN_ON_ONCE(1);
1395                }
1396        }
1397
1398exit:
1399        ret = -EAGAIN;
1400done:
1401        rxe_drop_ref(qp);
1402        return ret;
1403}
1404