linux/drivers/infiniband/sw/rxe/rxe_resp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#include <linux/skbuff.h>
   8
   9#include "rxe.h"
  10#include "rxe_loc.h"
  11#include "rxe_queue.h"
  12
  13enum resp_states {
  14        RESPST_NONE,
  15        RESPST_GET_REQ,
  16        RESPST_CHK_PSN,
  17        RESPST_CHK_OP_SEQ,
  18        RESPST_CHK_OP_VALID,
  19        RESPST_CHK_RESOURCE,
  20        RESPST_CHK_LENGTH,
  21        RESPST_CHK_RKEY,
  22        RESPST_EXECUTE,
  23        RESPST_READ_REPLY,
  24        RESPST_COMPLETE,
  25        RESPST_ACKNOWLEDGE,
  26        RESPST_CLEANUP,
  27        RESPST_DUPLICATE_REQUEST,
  28        RESPST_ERR_MALFORMED_WQE,
  29        RESPST_ERR_UNSUPPORTED_OPCODE,
  30        RESPST_ERR_MISALIGNED_ATOMIC,
  31        RESPST_ERR_PSN_OUT_OF_SEQ,
  32        RESPST_ERR_MISSING_OPCODE_FIRST,
  33        RESPST_ERR_MISSING_OPCODE_LAST_C,
  34        RESPST_ERR_MISSING_OPCODE_LAST_D1E,
  35        RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
  36        RESPST_ERR_RNR,
  37        RESPST_ERR_RKEY_VIOLATION,
  38        RESPST_ERR_INVALIDATE_RKEY,
  39        RESPST_ERR_LENGTH,
  40        RESPST_ERR_CQ_OVERFLOW,
  41        RESPST_ERROR,
  42        RESPST_RESET,
  43        RESPST_DONE,
  44        RESPST_EXIT,
  45};
  46
  47static char *resp_state_name[] = {
  48        [RESPST_NONE]                           = "NONE",
  49        [RESPST_GET_REQ]                        = "GET_REQ",
  50        [RESPST_CHK_PSN]                        = "CHK_PSN",
  51        [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
  52        [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
  53        [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
  54        [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
  55        [RESPST_CHK_RKEY]                       = "CHK_RKEY",
  56        [RESPST_EXECUTE]                        = "EXECUTE",
  57        [RESPST_READ_REPLY]                     = "READ_REPLY",
  58        [RESPST_COMPLETE]                       = "COMPLETE",
  59        [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
  60        [RESPST_CLEANUP]                        = "CLEANUP",
  61        [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
  62        [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
  63        [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
  64        [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
  65        [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
  66        [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
  67        [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
  68        [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
  69        [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
  70        [RESPST_ERR_RNR]                        = "ERR_RNR",
  71        [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
  72        [RESPST_ERR_INVALIDATE_RKEY]            = "ERR_INVALIDATE_RKEY_VIOLATION",
  73        [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
  74        [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
  75        [RESPST_ERROR]                          = "ERROR",
  76        [RESPST_RESET]                          = "RESET",
  77        [RESPST_DONE]                           = "DONE",
  78        [RESPST_EXIT]                           = "EXIT",
  79};
  80
  81/* rxe_recv calls here to add a request packet to the input queue */
  82void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
  83{
  84        int must_sched;
  85        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  86
  87        skb_queue_tail(&qp->req_pkts, skb);
  88
  89        must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
  90                        (skb_queue_len(&qp->req_pkts) > 1);
  91
  92        rxe_run_task(&qp->resp.task, must_sched);
  93}
  94
  95static inline enum resp_states get_req(struct rxe_qp *qp,
  96                                       struct rxe_pkt_info **pkt_p)
  97{
  98        struct sk_buff *skb;
  99
 100        if (qp->resp.state == QP_STATE_ERROR) {
 101                while ((skb = skb_dequeue(&qp->req_pkts))) {
 102                        rxe_drop_ref(qp);
 103                        kfree_skb(skb);
 104                        ib_device_put(qp->ibqp.device);
 105                }
 106
 107                /* go drain recv wr queue */
 108                return RESPST_CHK_RESOURCE;
 109        }
 110
 111        skb = skb_peek(&qp->req_pkts);
 112        if (!skb)
 113                return RESPST_EXIT;
 114
 115        *pkt_p = SKB_TO_PKT(skb);
 116
 117        return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
 118}
 119
 120static enum resp_states check_psn(struct rxe_qp *qp,
 121                                  struct rxe_pkt_info *pkt)
 122{
 123        int diff = psn_compare(pkt->psn, qp->resp.psn);
 124        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 125
 126        switch (qp_type(qp)) {
 127        case IB_QPT_RC:
 128                if (diff > 0) {
 129                        if (qp->resp.sent_psn_nak)
 130                                return RESPST_CLEANUP;
 131
 132                        qp->resp.sent_psn_nak = 1;
 133                        rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
 134                        return RESPST_ERR_PSN_OUT_OF_SEQ;
 135
 136                } else if (diff < 0) {
 137                        rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
 138                        return RESPST_DUPLICATE_REQUEST;
 139                }
 140
 141                if (qp->resp.sent_psn_nak)
 142                        qp->resp.sent_psn_nak = 0;
 143
 144                break;
 145
 146        case IB_QPT_UC:
 147                if (qp->resp.drop_msg || diff != 0) {
 148                        if (pkt->mask & RXE_START_MASK) {
 149                                qp->resp.drop_msg = 0;
 150                                return RESPST_CHK_OP_SEQ;
 151                        }
 152
 153                        qp->resp.drop_msg = 1;
 154                        return RESPST_CLEANUP;
 155                }
 156                break;
 157        default:
 158                break;
 159        }
 160
 161        return RESPST_CHK_OP_SEQ;
 162}
 163
 164static enum resp_states check_op_seq(struct rxe_qp *qp,
 165                                     struct rxe_pkt_info *pkt)
 166{
 167        switch (qp_type(qp)) {
 168        case IB_QPT_RC:
 169                switch (qp->resp.opcode) {
 170                case IB_OPCODE_RC_SEND_FIRST:
 171                case IB_OPCODE_RC_SEND_MIDDLE:
 172                        switch (pkt->opcode) {
 173                        case IB_OPCODE_RC_SEND_MIDDLE:
 174                        case IB_OPCODE_RC_SEND_LAST:
 175                        case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
 176                        case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
 177                                return RESPST_CHK_OP_VALID;
 178                        default:
 179                                return RESPST_ERR_MISSING_OPCODE_LAST_C;
 180                        }
 181
 182                case IB_OPCODE_RC_RDMA_WRITE_FIRST:
 183                case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 184                        switch (pkt->opcode) {
 185                        case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 186                        case IB_OPCODE_RC_RDMA_WRITE_LAST:
 187                        case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 188                                return RESPST_CHK_OP_VALID;
 189                        default:
 190                                return RESPST_ERR_MISSING_OPCODE_LAST_C;
 191                        }
 192
 193                default:
 194                        switch (pkt->opcode) {
 195                        case IB_OPCODE_RC_SEND_MIDDLE:
 196                        case IB_OPCODE_RC_SEND_LAST:
 197                        case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
 198                        case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
 199                        case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 200                        case IB_OPCODE_RC_RDMA_WRITE_LAST:
 201                        case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 202                                return RESPST_ERR_MISSING_OPCODE_FIRST;
 203                        default:
 204                                return RESPST_CHK_OP_VALID;
 205                        }
 206                }
 207                break;
 208
 209        case IB_QPT_UC:
 210                switch (qp->resp.opcode) {
 211                case IB_OPCODE_UC_SEND_FIRST:
 212                case IB_OPCODE_UC_SEND_MIDDLE:
 213                        switch (pkt->opcode) {
 214                        case IB_OPCODE_UC_SEND_MIDDLE:
 215                        case IB_OPCODE_UC_SEND_LAST:
 216                        case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
 217                                return RESPST_CHK_OP_VALID;
 218                        default:
 219                                return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
 220                        }
 221
 222                case IB_OPCODE_UC_RDMA_WRITE_FIRST:
 223                case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 224                        switch (pkt->opcode) {
 225                        case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 226                        case IB_OPCODE_UC_RDMA_WRITE_LAST:
 227                        case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 228                                return RESPST_CHK_OP_VALID;
 229                        default:
 230                                return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
 231                        }
 232
 233                default:
 234                        switch (pkt->opcode) {
 235                        case IB_OPCODE_UC_SEND_MIDDLE:
 236                        case IB_OPCODE_UC_SEND_LAST:
 237                        case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
 238                        case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 239                        case IB_OPCODE_UC_RDMA_WRITE_LAST:
 240                        case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 241                                qp->resp.drop_msg = 1;
 242                                return RESPST_CLEANUP;
 243                        default:
 244                                return RESPST_CHK_OP_VALID;
 245                        }
 246                }
 247                break;
 248
 249        default:
 250                return RESPST_CHK_OP_VALID;
 251        }
 252}
 253
 254static enum resp_states check_op_valid(struct rxe_qp *qp,
 255                                       struct rxe_pkt_info *pkt)
 256{
 257        switch (qp_type(qp)) {
 258        case IB_QPT_RC:
 259                if (((pkt->mask & RXE_READ_MASK) &&
 260                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
 261                    ((pkt->mask & RXE_WRITE_MASK) &&
 262                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
 263                    ((pkt->mask & RXE_ATOMIC_MASK) &&
 264                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
 265                        return RESPST_ERR_UNSUPPORTED_OPCODE;
 266                }
 267
 268                break;
 269
 270        case IB_QPT_UC:
 271                if ((pkt->mask & RXE_WRITE_MASK) &&
 272                    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
 273                        qp->resp.drop_msg = 1;
 274                        return RESPST_CLEANUP;
 275                }
 276
 277                break;
 278
 279        case IB_QPT_UD:
 280        case IB_QPT_SMI:
 281        case IB_QPT_GSI:
 282                break;
 283
 284        default:
 285                WARN_ON_ONCE(1);
 286                break;
 287        }
 288
 289        return RESPST_CHK_RESOURCE;
 290}
 291
 292static enum resp_states get_srq_wqe(struct rxe_qp *qp)
 293{
 294        struct rxe_srq *srq = qp->srq;
 295        struct rxe_queue *q = srq->rq.queue;
 296        struct rxe_recv_wqe *wqe;
 297        struct ib_event ev;
 298        unsigned int count;
 299        size_t size;
 300
 301        if (srq->error)
 302                return RESPST_ERR_RNR;
 303
 304        spin_lock_bh(&srq->rq.consumer_lock);
 305
 306        wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
 307        if (!wqe) {
 308                spin_unlock_bh(&srq->rq.consumer_lock);
 309                return RESPST_ERR_RNR;
 310        }
 311
 312        /* don't trust user space data */
 313        if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
 314                spin_unlock_bh(&srq->rq.consumer_lock);
 315                pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
 316                return RESPST_ERR_MALFORMED_WQE;
 317        }
 318        size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
 319        memcpy(&qp->resp.srq_wqe, wqe, size);
 320
 321        qp->resp.wqe = &qp->resp.srq_wqe.wqe;
 322        queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
 323        count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
 324
 325        if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
 326                srq->limit = 0;
 327                goto event;
 328        }
 329
 330        spin_unlock_bh(&srq->rq.consumer_lock);
 331        return RESPST_CHK_LENGTH;
 332
 333event:
 334        spin_unlock_bh(&srq->rq.consumer_lock);
 335        ev.device = qp->ibqp.device;
 336        ev.element.srq = qp->ibqp.srq;
 337        ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
 338        srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
 339        return RESPST_CHK_LENGTH;
 340}
 341
 342static enum resp_states check_resource(struct rxe_qp *qp,
 343                                       struct rxe_pkt_info *pkt)
 344{
 345        struct rxe_srq *srq = qp->srq;
 346
 347        if (qp->resp.state == QP_STATE_ERROR) {
 348                if (qp->resp.wqe) {
 349                        qp->resp.status = IB_WC_WR_FLUSH_ERR;
 350                        return RESPST_COMPLETE;
 351                } else if (!srq) {
 352                        qp->resp.wqe = queue_head(qp->rq.queue,
 353                                        QUEUE_TYPE_FROM_CLIENT);
 354                        if (qp->resp.wqe) {
 355                                qp->resp.status = IB_WC_WR_FLUSH_ERR;
 356                                return RESPST_COMPLETE;
 357                        } else {
 358                                return RESPST_EXIT;
 359                        }
 360                } else {
 361                        return RESPST_EXIT;
 362                }
 363        }
 364
 365        if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
 366                /* it is the requesters job to not send
 367                 * too many read/atomic ops, we just
 368                 * recycle the responder resource queue
 369                 */
 370                if (likely(qp->attr.max_dest_rd_atomic > 0))
 371                        return RESPST_CHK_LENGTH;
 372                else
 373                        return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
 374        }
 375
 376        if (pkt->mask & RXE_RWR_MASK) {
 377                if (srq)
 378                        return get_srq_wqe(qp);
 379
 380                qp->resp.wqe = queue_head(qp->rq.queue,
 381                                QUEUE_TYPE_FROM_CLIENT);
 382                return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
 383        }
 384
 385        return RESPST_CHK_LENGTH;
 386}
 387
 388static enum resp_states check_length(struct rxe_qp *qp,
 389                                     struct rxe_pkt_info *pkt)
 390{
 391        switch (qp_type(qp)) {
 392        case IB_QPT_RC:
 393                return RESPST_CHK_RKEY;
 394
 395        case IB_QPT_UC:
 396                return RESPST_CHK_RKEY;
 397
 398        default:
 399                return RESPST_CHK_RKEY;
 400        }
 401}
 402
 403static enum resp_states check_rkey(struct rxe_qp *qp,
 404                                   struct rxe_pkt_info *pkt)
 405{
 406        struct rxe_mr *mr = NULL;
 407        struct rxe_mw *mw = NULL;
 408        u64 va;
 409        u32 rkey;
 410        u32 resid;
 411        u32 pktlen;
 412        int mtu = qp->mtu;
 413        enum resp_states state;
 414        int access;
 415
 416        if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
 417                if (pkt->mask & RXE_RETH_MASK) {
 418                        qp->resp.va = reth_va(pkt);
 419                        qp->resp.offset = 0;
 420                        qp->resp.rkey = reth_rkey(pkt);
 421                        qp->resp.resid = reth_len(pkt);
 422                        qp->resp.length = reth_len(pkt);
 423                }
 424                access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
 425                                                     : IB_ACCESS_REMOTE_WRITE;
 426        } else if (pkt->mask & RXE_ATOMIC_MASK) {
 427                qp->resp.va = atmeth_va(pkt);
 428                qp->resp.offset = 0;
 429                qp->resp.rkey = atmeth_rkey(pkt);
 430                qp->resp.resid = sizeof(u64);
 431                access = IB_ACCESS_REMOTE_ATOMIC;
 432        } else {
 433                return RESPST_EXECUTE;
 434        }
 435
 436        /* A zero-byte op is not required to set an addr or rkey. */
 437        if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
 438            (pkt->mask & RXE_RETH_MASK) &&
 439            reth_len(pkt) == 0) {
 440                return RESPST_EXECUTE;
 441        }
 442
 443        va      = qp->resp.va;
 444        rkey    = qp->resp.rkey;
 445        resid   = qp->resp.resid;
 446        pktlen  = payload_size(pkt);
 447
 448        if (rkey_is_mw(rkey)) {
 449                mw = rxe_lookup_mw(qp, access, rkey);
 450                if (!mw) {
 451                        pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
 452                        state = RESPST_ERR_RKEY_VIOLATION;
 453                        goto err;
 454                }
 455
 456                mr = mw->mr;
 457                if (!mr) {
 458                        pr_err("%s: MW doesn't have an MR\n", __func__);
 459                        state = RESPST_ERR_RKEY_VIOLATION;
 460                        goto err;
 461                }
 462
 463                if (mw->access & IB_ZERO_BASED)
 464                        qp->resp.offset = mw->addr;
 465
 466                rxe_drop_ref(mw);
 467                rxe_add_ref(mr);
 468        } else {
 469                mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
 470                if (!mr) {
 471                        pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
 472                        state = RESPST_ERR_RKEY_VIOLATION;
 473                        goto err;
 474                }
 475        }
 476
 477        if (mr_check_range(mr, va + qp->resp.offset, resid)) {
 478                state = RESPST_ERR_RKEY_VIOLATION;
 479                goto err;
 480        }
 481
 482        if (pkt->mask & RXE_WRITE_MASK)  {
 483                if (resid > mtu) {
 484                        if (pktlen != mtu || bth_pad(pkt)) {
 485                                state = RESPST_ERR_LENGTH;
 486                                goto err;
 487                        }
 488                } else {
 489                        if (pktlen != resid) {
 490                                state = RESPST_ERR_LENGTH;
 491                                goto err;
 492                        }
 493                        if ((bth_pad(pkt) != (0x3 & (-resid)))) {
 494                                /* This case may not be exactly that
 495                                 * but nothing else fits.
 496                                 */
 497                                state = RESPST_ERR_LENGTH;
 498                                goto err;
 499                        }
 500                }
 501        }
 502
 503        WARN_ON_ONCE(qp->resp.mr);
 504
 505        qp->resp.mr = mr;
 506        return RESPST_EXECUTE;
 507
 508err:
 509        if (mr)
 510                rxe_drop_ref(mr);
 511        if (mw)
 512                rxe_drop_ref(mw);
 513
 514        return state;
 515}
 516
 517static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
 518                                     int data_len)
 519{
 520        int err;
 521
 522        err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
 523                        data_addr, data_len, RXE_TO_MR_OBJ);
 524        if (unlikely(err))
 525                return (err == -ENOSPC) ? RESPST_ERR_LENGTH
 526                                        : RESPST_ERR_MALFORMED_WQE;
 527
 528        return RESPST_NONE;
 529}
 530
 531static enum resp_states write_data_in(struct rxe_qp *qp,
 532                                      struct rxe_pkt_info *pkt)
 533{
 534        enum resp_states rc = RESPST_NONE;
 535        int     err;
 536        int data_len = payload_size(pkt);
 537
 538        err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
 539                          payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
 540        if (err) {
 541                rc = RESPST_ERR_RKEY_VIOLATION;
 542                goto out;
 543        }
 544
 545        qp->resp.va += data_len;
 546        qp->resp.resid -= data_len;
 547
 548out:
 549        return rc;
 550}
 551
 552/* Guarantee atomicity of atomic operations at the machine level. */
 553static DEFINE_SPINLOCK(atomic_ops_lock);
 554
 555static enum resp_states process_atomic(struct rxe_qp *qp,
 556                                       struct rxe_pkt_info *pkt)
 557{
 558        u64 *vaddr;
 559        enum resp_states ret;
 560        struct rxe_mr *mr = qp->resp.mr;
 561
 562        if (mr->state != RXE_MR_STATE_VALID) {
 563                ret = RESPST_ERR_RKEY_VIOLATION;
 564                goto out;
 565        }
 566
 567        vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
 568
 569        /* check vaddr is 8 bytes aligned. */
 570        if (!vaddr || (uintptr_t)vaddr & 7) {
 571                ret = RESPST_ERR_MISALIGNED_ATOMIC;
 572                goto out;
 573        }
 574
 575        spin_lock_bh(&atomic_ops_lock);
 576
 577        qp->resp.atomic_orig = *vaddr;
 578
 579        if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
 580            pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
 581                if (*vaddr == atmeth_comp(pkt))
 582                        *vaddr = atmeth_swap_add(pkt);
 583        } else {
 584                *vaddr += atmeth_swap_add(pkt);
 585        }
 586
 587        spin_unlock_bh(&atomic_ops_lock);
 588
 589        ret = RESPST_NONE;
 590out:
 591        return ret;
 592}
 593
 594static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
 595                                          struct rxe_pkt_info *pkt,
 596                                          struct rxe_pkt_info *ack,
 597                                          int opcode,
 598                                          int payload,
 599                                          u32 psn,
 600                                          u8 syndrome)
 601{
 602        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 603        struct sk_buff *skb;
 604        int paylen;
 605        int pad;
 606        int err;
 607
 608        /*
 609         * allocate packet
 610         */
 611        pad = (-payload) & 0x3;
 612        paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
 613
 614        skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
 615        if (!skb)
 616                return NULL;
 617
 618        ack->qp = qp;
 619        ack->opcode = opcode;
 620        ack->mask = rxe_opcode[opcode].mask;
 621        ack->paylen = paylen;
 622        ack->psn = psn;
 623
 624        bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
 625                 qp->attr.dest_qp_num, 0, psn);
 626
 627        if (ack->mask & RXE_AETH_MASK) {
 628                aeth_set_syn(ack, syndrome);
 629                aeth_set_msn(ack, qp->resp.msn);
 630        }
 631
 632        if (ack->mask & RXE_ATMACK_MASK)
 633                atmack_set_orig(ack, qp->resp.atomic_orig);
 634
 635        err = rxe_prepare(ack, skb);
 636        if (err) {
 637                kfree_skb(skb);
 638                return NULL;
 639        }
 640
 641        return skb;
 642}
 643
 644/* RDMA read response. If res is not NULL, then we have a current RDMA request
 645 * being processed or replayed.
 646 */
 647static enum resp_states read_reply(struct rxe_qp *qp,
 648                                   struct rxe_pkt_info *req_pkt)
 649{
 650        struct rxe_pkt_info ack_pkt;
 651        struct sk_buff *skb;
 652        int mtu = qp->mtu;
 653        enum resp_states state;
 654        int payload;
 655        int opcode;
 656        int err;
 657        struct resp_res *res = qp->resp.res;
 658
 659        if (!res) {
 660                /* This is the first time we process that request. Get a
 661                 * resource
 662                 */
 663                res = &qp->resp.resources[qp->resp.res_head];
 664
 665                free_rd_atomic_resource(qp, res);
 666                rxe_advance_resp_resource(qp);
 667
 668                res->type               = RXE_READ_MASK;
 669                res->replay             = 0;
 670
 671                res->read.va            = qp->resp.va +
 672                                          qp->resp.offset;
 673                res->read.va_org        = qp->resp.va +
 674                                          qp->resp.offset;
 675
 676                res->first_psn          = req_pkt->psn;
 677
 678                if (reth_len(req_pkt)) {
 679                        res->last_psn   = (req_pkt->psn +
 680                                           (reth_len(req_pkt) + mtu - 1) /
 681                                           mtu - 1) & BTH_PSN_MASK;
 682                } else {
 683                        res->last_psn   = res->first_psn;
 684                }
 685                res->cur_psn            = req_pkt->psn;
 686
 687                res->read.resid         = qp->resp.resid;
 688                res->read.length        = qp->resp.resid;
 689                res->read.rkey          = qp->resp.rkey;
 690
 691                /* note res inherits the reference to mr from qp */
 692                res->read.mr            = qp->resp.mr;
 693                qp->resp.mr             = NULL;
 694
 695                qp->resp.res            = res;
 696                res->state              = rdatm_res_state_new;
 697        }
 698
 699        if (res->state == rdatm_res_state_new) {
 700                if (res->read.resid <= mtu)
 701                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
 702                else
 703                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
 704        } else {
 705                if (res->read.resid > mtu)
 706                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
 707                else
 708                        opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
 709        }
 710
 711        res->state = rdatm_res_state_next;
 712
 713        payload = min_t(int, res->read.resid, mtu);
 714
 715        skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
 716                                 res->cur_psn, AETH_ACK_UNLIMITED);
 717        if (!skb)
 718                return RESPST_ERR_RNR;
 719
 720        err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
 721                          payload, RXE_FROM_MR_OBJ);
 722        if (err)
 723                pr_err("Failed copying memory\n");
 724
 725        if (bth_pad(&ack_pkt)) {
 726                u8 *pad = payload_addr(&ack_pkt) + payload;
 727
 728                memset(pad, 0, bth_pad(&ack_pkt));
 729        }
 730
 731        err = rxe_xmit_packet(qp, &ack_pkt, skb);
 732        if (err) {
 733                pr_err("Failed sending RDMA reply.\n");
 734                return RESPST_ERR_RNR;
 735        }
 736
 737        res->read.va += payload;
 738        res->read.resid -= payload;
 739        res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
 740
 741        if (res->read.resid > 0) {
 742                state = RESPST_DONE;
 743        } else {
 744                qp->resp.res = NULL;
 745                if (!res->replay)
 746                        qp->resp.opcode = -1;
 747                if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
 748                        qp->resp.psn = res->cur_psn;
 749                state = RESPST_CLEANUP;
 750        }
 751
 752        return state;
 753}
 754
 755static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
 756{
 757        if (rkey_is_mw(rkey))
 758                return rxe_invalidate_mw(qp, rkey);
 759        else
 760                return rxe_invalidate_mr(qp, rkey);
 761}
 762
 763/* Executes a new request. A retried request never reach that function (send
 764 * and writes are discarded, and reads and atomics are retried elsewhere.
 765 */
 766static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
 767{
 768        enum resp_states err;
 769        struct sk_buff *skb = PKT_TO_SKB(pkt);
 770        union rdma_network_hdr hdr;
 771
 772        if (pkt->mask & RXE_SEND_MASK) {
 773                if (qp_type(qp) == IB_QPT_UD ||
 774                    qp_type(qp) == IB_QPT_SMI ||
 775                    qp_type(qp) == IB_QPT_GSI) {
 776                        if (skb->protocol == htons(ETH_P_IP)) {
 777                                memset(&hdr.reserved, 0,
 778                                                sizeof(hdr.reserved));
 779                                memcpy(&hdr.roce4grh, ip_hdr(skb),
 780                                                sizeof(hdr.roce4grh));
 781                                err = send_data_in(qp, &hdr, sizeof(hdr));
 782                        } else {
 783                                err = send_data_in(qp, ipv6_hdr(skb),
 784                                                sizeof(hdr));
 785                        }
 786                        if (err)
 787                                return err;
 788                }
 789                err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
 790                if (err)
 791                        return err;
 792        } else if (pkt->mask & RXE_WRITE_MASK) {
 793                err = write_data_in(qp, pkt);
 794                if (err)
 795                        return err;
 796        } else if (pkt->mask & RXE_READ_MASK) {
 797                /* For RDMA Read we can increment the msn now. See C9-148. */
 798                qp->resp.msn++;
 799                return RESPST_READ_REPLY;
 800        } else if (pkt->mask & RXE_ATOMIC_MASK) {
 801                err = process_atomic(qp, pkt);
 802                if (err)
 803                        return err;
 804        } else {
 805                /* Unreachable */
 806                WARN_ON_ONCE(1);
 807        }
 808
 809        if (pkt->mask & RXE_IETH_MASK) {
 810                u32 rkey = ieth_rkey(pkt);
 811
 812                err = invalidate_rkey(qp, rkey);
 813                if (err)
 814                        return RESPST_ERR_INVALIDATE_RKEY;
 815        }
 816
 817        /* next expected psn, read handles this separately */
 818        qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 819        qp->resp.ack_psn = qp->resp.psn;
 820
 821        qp->resp.opcode = pkt->opcode;
 822        qp->resp.status = IB_WC_SUCCESS;
 823
 824        if (pkt->mask & RXE_COMP_MASK) {
 825                /* We successfully processed this new request. */
 826                qp->resp.msn++;
 827                return RESPST_COMPLETE;
 828        } else if (qp_type(qp) == IB_QPT_RC)
 829                return RESPST_ACKNOWLEDGE;
 830        else
 831                return RESPST_CLEANUP;
 832}
 833
 834static enum resp_states do_complete(struct rxe_qp *qp,
 835                                    struct rxe_pkt_info *pkt)
 836{
 837        struct rxe_cqe cqe;
 838        struct ib_wc *wc = &cqe.ibwc;
 839        struct ib_uverbs_wc *uwc = &cqe.uibwc;
 840        struct rxe_recv_wqe *wqe = qp->resp.wqe;
 841        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 842
 843        if (!wqe)
 844                goto finish;
 845
 846        memset(&cqe, 0, sizeof(cqe));
 847
 848        if (qp->rcq->is_user) {
 849                uwc->status             = qp->resp.status;
 850                uwc->qp_num             = qp->ibqp.qp_num;
 851                uwc->wr_id              = wqe->wr_id;
 852        } else {
 853                wc->status              = qp->resp.status;
 854                wc->qp                  = &qp->ibqp;
 855                wc->wr_id               = wqe->wr_id;
 856        }
 857
 858        if (wc->status == IB_WC_SUCCESS) {
 859                rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
 860                wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
 861                                pkt->mask & RXE_WRITE_MASK) ?
 862                                        IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
 863                wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
 864                                pkt->mask & RXE_WRITE_MASK) ?
 865                                        qp->resp.length : wqe->dma.length - wqe->dma.resid;
 866
 867                /* fields after byte_len are different between kernel and user
 868                 * space
 869                 */
 870                if (qp->rcq->is_user) {
 871                        uwc->wc_flags = IB_WC_GRH;
 872
 873                        if (pkt->mask & RXE_IMMDT_MASK) {
 874                                uwc->wc_flags |= IB_WC_WITH_IMM;
 875                                uwc->ex.imm_data = immdt_imm(pkt);
 876                        }
 877
 878                        if (pkt->mask & RXE_IETH_MASK) {
 879                                uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
 880                                uwc->ex.invalidate_rkey = ieth_rkey(pkt);
 881                        }
 882
 883                        if (pkt->mask & RXE_DETH_MASK)
 884                                uwc->src_qp = deth_sqp(pkt);
 885
 886                        uwc->port_num           = qp->attr.port_num;
 887                } else {
 888                        struct sk_buff *skb = PKT_TO_SKB(pkt);
 889
 890                        wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
 891                        if (skb->protocol == htons(ETH_P_IP))
 892                                wc->network_hdr_type = RDMA_NETWORK_IPV4;
 893                        else
 894                                wc->network_hdr_type = RDMA_NETWORK_IPV6;
 895
 896                        if (is_vlan_dev(skb->dev)) {
 897                                wc->wc_flags |= IB_WC_WITH_VLAN;
 898                                wc->vlan_id = vlan_dev_vlan_id(skb->dev);
 899                        }
 900
 901                        if (pkt->mask & RXE_IMMDT_MASK) {
 902                                wc->wc_flags |= IB_WC_WITH_IMM;
 903                                wc->ex.imm_data = immdt_imm(pkt);
 904                        }
 905
 906                        if (pkt->mask & RXE_IETH_MASK) {
 907                                wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 908                                wc->ex.invalidate_rkey = ieth_rkey(pkt);
 909                        }
 910
 911                        if (pkt->mask & RXE_DETH_MASK)
 912                                wc->src_qp = deth_sqp(pkt);
 913
 914                        wc->port_num            = qp->attr.port_num;
 915                }
 916        }
 917
 918        /* have copy for srq and reference for !srq */
 919        if (!qp->srq)
 920                queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
 921
 922        qp->resp.wqe = NULL;
 923
 924        if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
 925                return RESPST_ERR_CQ_OVERFLOW;
 926
 927finish:
 928        if (unlikely(qp->resp.state == QP_STATE_ERROR))
 929                return RESPST_CHK_RESOURCE;
 930        if (unlikely(!pkt))
 931                return RESPST_DONE;
 932        if (qp_type(qp) == IB_QPT_RC)
 933                return RESPST_ACKNOWLEDGE;
 934        else
 935                return RESPST_CLEANUP;
 936}
 937
 938static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 939                    u8 syndrome, u32 psn)
 940{
 941        int err = 0;
 942        struct rxe_pkt_info ack_pkt;
 943        struct sk_buff *skb;
 944
 945        skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
 946                                 0, psn, syndrome);
 947        if (!skb) {
 948                err = -ENOMEM;
 949                goto err1;
 950        }
 951
 952        err = rxe_xmit_packet(qp, &ack_pkt, skb);
 953        if (err)
 954                pr_err_ratelimited("Failed sending ack\n");
 955
 956err1:
 957        return err;
 958}
 959
 960static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 961                           u8 syndrome)
 962{
 963        int rc = 0;
 964        struct rxe_pkt_info ack_pkt;
 965        struct sk_buff *skb;
 966        struct resp_res *res;
 967
 968        skb = prepare_ack_packet(qp, pkt, &ack_pkt,
 969                                 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
 970                                 syndrome);
 971        if (!skb) {
 972                rc = -ENOMEM;
 973                goto out;
 974        }
 975
 976        res = &qp->resp.resources[qp->resp.res_head];
 977        free_rd_atomic_resource(qp, res);
 978        rxe_advance_resp_resource(qp);
 979
 980        skb_get(skb);
 981        res->type = RXE_ATOMIC_MASK;
 982        res->atomic.skb = skb;
 983        res->first_psn = ack_pkt.psn;
 984        res->last_psn  = ack_pkt.psn;
 985        res->cur_psn   = ack_pkt.psn;
 986
 987        rc = rxe_xmit_packet(qp, &ack_pkt, skb);
 988        if (rc) {
 989                pr_err_ratelimited("Failed sending ack\n");
 990                rxe_drop_ref(qp);
 991        }
 992out:
 993        return rc;
 994}
 995
 996static enum resp_states acknowledge(struct rxe_qp *qp,
 997                                    struct rxe_pkt_info *pkt)
 998{
 999        if (qp_type(qp) != IB_QPT_RC)
1000                return RESPST_CLEANUP;
1001
1002        if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1003                send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1004        else if (pkt->mask & RXE_ATOMIC_MASK)
1005                send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1006        else if (bth_ack(pkt))
1007                send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1008
1009        return RESPST_CLEANUP;
1010}
1011
1012static enum resp_states cleanup(struct rxe_qp *qp,
1013                                struct rxe_pkt_info *pkt)
1014{
1015        struct sk_buff *skb;
1016
1017        if (pkt) {
1018                skb = skb_dequeue(&qp->req_pkts);
1019                rxe_drop_ref(qp);
1020                kfree_skb(skb);
1021                ib_device_put(qp->ibqp.device);
1022        }
1023
1024        if (qp->resp.mr) {
1025                rxe_drop_ref(qp->resp.mr);
1026                qp->resp.mr = NULL;
1027        }
1028
1029        return RESPST_DONE;
1030}
1031
1032static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1033{
1034        int i;
1035
1036        for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1037                struct resp_res *res = &qp->resp.resources[i];
1038
1039                if (res->type == 0)
1040                        continue;
1041
1042                if (psn_compare(psn, res->first_psn) >= 0 &&
1043                    psn_compare(psn, res->last_psn) <= 0) {
1044                        return res;
1045                }
1046        }
1047
1048        return NULL;
1049}
1050
1051static enum resp_states duplicate_request(struct rxe_qp *qp,
1052                                          struct rxe_pkt_info *pkt)
1053{
1054        enum resp_states rc;
1055        u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1056
1057        if (pkt->mask & RXE_SEND_MASK ||
1058            pkt->mask & RXE_WRITE_MASK) {
1059                /* SEND. Ack again and cleanup. C9-105. */
1060                send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1061                return RESPST_CLEANUP;
1062        } else if (pkt->mask & RXE_READ_MASK) {
1063                struct resp_res *res;
1064
1065                res = find_resource(qp, pkt->psn);
1066                if (!res) {
1067                        /* Resource not found. Class D error.  Drop the
1068                         * request.
1069                         */
1070                        rc = RESPST_CLEANUP;
1071                        goto out;
1072                } else {
1073                        /* Ensure this new request is the same as the previous
1074                         * one or a subset of it.
1075                         */
1076                        u64 iova = reth_va(pkt);
1077                        u32 resid = reth_len(pkt);
1078
1079                        if (iova < res->read.va_org ||
1080                            resid > res->read.length ||
1081                            (iova + resid) > (res->read.va_org +
1082                                              res->read.length)) {
1083                                rc = RESPST_CLEANUP;
1084                                goto out;
1085                        }
1086
1087                        if (reth_rkey(pkt) != res->read.rkey) {
1088                                rc = RESPST_CLEANUP;
1089                                goto out;
1090                        }
1091
1092                        res->cur_psn = pkt->psn;
1093                        res->state = (pkt->psn == res->first_psn) ?
1094                                        rdatm_res_state_new :
1095                                        rdatm_res_state_replay;
1096                        res->replay = 1;
1097
1098                        /* Reset the resource, except length. */
1099                        res->read.va_org = iova;
1100                        res->read.va = iova;
1101                        res->read.resid = resid;
1102
1103                        /* Replay the RDMA read reply. */
1104                        qp->resp.res = res;
1105                        rc = RESPST_READ_REPLY;
1106                        goto out;
1107                }
1108        } else {
1109                struct resp_res *res;
1110
1111                /* Find the operation in our list of responder resources. */
1112                res = find_resource(qp, pkt->psn);
1113                if (res) {
1114                        skb_get(res->atomic.skb);
1115                        /* Resend the result. */
1116                        rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1117                        if (rc) {
1118                                pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1119                                rc = RESPST_CLEANUP;
1120                                goto out;
1121                        }
1122                }
1123
1124                /* Resource not found. Class D error. Drop the request. */
1125                rc = RESPST_CLEANUP;
1126                goto out;
1127        }
1128out:
1129        return rc;
1130}
1131
1132/* Process a class A or C. Both are treated the same in this implementation. */
1133static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1134                              enum ib_wc_status status)
1135{
1136        qp->resp.aeth_syndrome  = syndrome;
1137        qp->resp.status         = status;
1138
1139        /* indicate that we should go through the ERROR state */
1140        qp->resp.goto_error     = 1;
1141}
1142
1143static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1144{
1145        /* UC */
1146        if (qp->srq) {
1147                /* Class E */
1148                qp->resp.drop_msg = 1;
1149                if (qp->resp.wqe) {
1150                        qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1151                        return RESPST_COMPLETE;
1152                } else {
1153                        return RESPST_CLEANUP;
1154                }
1155        } else {
1156                /* Class D1. This packet may be the start of a
1157                 * new message and could be valid. The previous
1158                 * message is invalid and ignored. reset the
1159                 * recv wr to its original state
1160                 */
1161                if (qp->resp.wqe) {
1162                        qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1163                        qp->resp.wqe->dma.cur_sge = 0;
1164                        qp->resp.wqe->dma.sge_offset = 0;
1165                        qp->resp.opcode = -1;
1166                }
1167
1168                if (qp->resp.mr) {
1169                        rxe_drop_ref(qp->resp.mr);
1170                        qp->resp.mr = NULL;
1171                }
1172
1173                return RESPST_CLEANUP;
1174        }
1175}
1176
1177static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1178{
1179        struct sk_buff *skb;
1180        struct rxe_queue *q = qp->rq.queue;
1181
1182        while ((skb = skb_dequeue(&qp->req_pkts))) {
1183                rxe_drop_ref(qp);
1184                kfree_skb(skb);
1185                ib_device_put(qp->ibqp.device);
1186        }
1187
1188        if (notify)
1189                return;
1190
1191        while (!qp->srq && q && queue_head(q, q->type))
1192                queue_advance_consumer(q, q->type);
1193}
1194
1195int rxe_responder(void *arg)
1196{
1197        struct rxe_qp *qp = (struct rxe_qp *)arg;
1198        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1199        enum resp_states state;
1200        struct rxe_pkt_info *pkt = NULL;
1201        int ret = 0;
1202
1203        rxe_add_ref(qp);
1204
1205        qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1206
1207        if (!qp->valid) {
1208                ret = -EINVAL;
1209                goto done;
1210        }
1211
1212        switch (qp->resp.state) {
1213        case QP_STATE_RESET:
1214                state = RESPST_RESET;
1215                break;
1216
1217        default:
1218                state = RESPST_GET_REQ;
1219                break;
1220        }
1221
1222        while (1) {
1223                pr_debug("qp#%d state = %s\n", qp_num(qp),
1224                         resp_state_name[state]);
1225                switch (state) {
1226                case RESPST_GET_REQ:
1227                        state = get_req(qp, &pkt);
1228                        break;
1229                case RESPST_CHK_PSN:
1230                        state = check_psn(qp, pkt);
1231                        break;
1232                case RESPST_CHK_OP_SEQ:
1233                        state = check_op_seq(qp, pkt);
1234                        break;
1235                case RESPST_CHK_OP_VALID:
1236                        state = check_op_valid(qp, pkt);
1237                        break;
1238                case RESPST_CHK_RESOURCE:
1239                        state = check_resource(qp, pkt);
1240                        break;
1241                case RESPST_CHK_LENGTH:
1242                        state = check_length(qp, pkt);
1243                        break;
1244                case RESPST_CHK_RKEY:
1245                        state = check_rkey(qp, pkt);
1246                        break;
1247                case RESPST_EXECUTE:
1248                        state = execute(qp, pkt);
1249                        break;
1250                case RESPST_COMPLETE:
1251                        state = do_complete(qp, pkt);
1252                        break;
1253                case RESPST_READ_REPLY:
1254                        state = read_reply(qp, pkt);
1255                        break;
1256                case RESPST_ACKNOWLEDGE:
1257                        state = acknowledge(qp, pkt);
1258                        break;
1259                case RESPST_CLEANUP:
1260                        state = cleanup(qp, pkt);
1261                        break;
1262                case RESPST_DUPLICATE_REQUEST:
1263                        state = duplicate_request(qp, pkt);
1264                        break;
1265                case RESPST_ERR_PSN_OUT_OF_SEQ:
1266                        /* RC only - Class B. Drop packet. */
1267                        send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1268                        state = RESPST_CLEANUP;
1269                        break;
1270
1271                case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1272                case RESPST_ERR_MISSING_OPCODE_FIRST:
1273                case RESPST_ERR_MISSING_OPCODE_LAST_C:
1274                case RESPST_ERR_UNSUPPORTED_OPCODE:
1275                case RESPST_ERR_MISALIGNED_ATOMIC:
1276                        /* RC Only - Class C. */
1277                        do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1278                                          IB_WC_REM_INV_REQ_ERR);
1279                        state = RESPST_COMPLETE;
1280                        break;
1281
1282                case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1283                        state = do_class_d1e_error(qp);
1284                        break;
1285                case RESPST_ERR_RNR:
1286                        if (qp_type(qp) == IB_QPT_RC) {
1287                                rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1288                                /* RC - class B */
1289                                send_ack(qp, pkt, AETH_RNR_NAK |
1290                                         (~AETH_TYPE_MASK &
1291                                         qp->attr.min_rnr_timer),
1292                                         pkt->psn);
1293                        } else {
1294                                /* UD/UC - class D */
1295                                qp->resp.drop_msg = 1;
1296                        }
1297                        state = RESPST_CLEANUP;
1298                        break;
1299
1300                case RESPST_ERR_RKEY_VIOLATION:
1301                        if (qp_type(qp) == IB_QPT_RC) {
1302                                /* Class C */
1303                                do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1304                                                  IB_WC_REM_ACCESS_ERR);
1305                                state = RESPST_COMPLETE;
1306                        } else {
1307                                qp->resp.drop_msg = 1;
1308                                if (qp->srq) {
1309                                        /* UC/SRQ Class D */
1310                                        qp->resp.status = IB_WC_REM_ACCESS_ERR;
1311                                        state = RESPST_COMPLETE;
1312                                } else {
1313                                        /* UC/non-SRQ Class E. */
1314                                        state = RESPST_CLEANUP;
1315                                }
1316                        }
1317                        break;
1318
1319                case RESPST_ERR_INVALIDATE_RKEY:
1320                        /* RC - Class J. */
1321                        qp->resp.goto_error = 1;
1322                        qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1323                        state = RESPST_COMPLETE;
1324                        break;
1325
1326                case RESPST_ERR_LENGTH:
1327                        if (qp_type(qp) == IB_QPT_RC) {
1328                                /* Class C */
1329                                do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1330                                                  IB_WC_REM_INV_REQ_ERR);
1331                                state = RESPST_COMPLETE;
1332                        } else if (qp->srq) {
1333                                /* UC/UD - class E */
1334                                qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1335                                state = RESPST_COMPLETE;
1336                        } else {
1337                                /* UC/UD - class D */
1338                                qp->resp.drop_msg = 1;
1339                                state = RESPST_CLEANUP;
1340                        }
1341                        break;
1342
1343                case RESPST_ERR_MALFORMED_WQE:
1344                        /* All, Class A. */
1345                        do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1346                                          IB_WC_LOC_QP_OP_ERR);
1347                        state = RESPST_COMPLETE;
1348                        break;
1349
1350                case RESPST_ERR_CQ_OVERFLOW:
1351                        /* All - Class G */
1352                        state = RESPST_ERROR;
1353                        break;
1354
1355                case RESPST_DONE:
1356                        if (qp->resp.goto_error) {
1357                                state = RESPST_ERROR;
1358                                break;
1359                        }
1360
1361                        goto done;
1362
1363                case RESPST_EXIT:
1364                        if (qp->resp.goto_error) {
1365                                state = RESPST_ERROR;
1366                                break;
1367                        }
1368
1369                        goto exit;
1370
1371                case RESPST_RESET:
1372                        rxe_drain_req_pkts(qp, false);
1373                        qp->resp.wqe = NULL;
1374                        goto exit;
1375
1376                case RESPST_ERROR:
1377                        qp->resp.goto_error = 0;
1378                        pr_warn("qp#%d moved to error state\n", qp_num(qp));
1379                        rxe_qp_error(qp);
1380                        goto exit;
1381
1382                default:
1383                        WARN_ON_ONCE(1);
1384                }
1385        }
1386
1387exit:
1388        ret = -EAGAIN;
1389done:
1390        rxe_drop_ref(qp);
1391        return ret;
1392}
1393