linux/drivers/infiniband/sw/rxe/rxe_comp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/skbuff.h>
  35
  36#include "rxe.h"
  37#include "rxe_loc.h"
  38#include "rxe_queue.h"
  39#include "rxe_task.h"
  40
  41enum comp_state {
  42        COMPST_GET_ACK,
  43        COMPST_GET_WQE,
  44        COMPST_COMP_WQE,
  45        COMPST_COMP_ACK,
  46        COMPST_CHECK_PSN,
  47        COMPST_CHECK_ACK,
  48        COMPST_READ,
  49        COMPST_ATOMIC,
  50        COMPST_WRITE_SEND,
  51        COMPST_UPDATE_COMP,
  52        COMPST_ERROR_RETRY,
  53        COMPST_RNR_RETRY,
  54        COMPST_ERROR,
  55        COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
  56        COMPST_DONE, /* The completer finished successflly */
  57};
  58
  59static char *comp_state_name[] =  {
  60        [COMPST_GET_ACK]                = "GET ACK",
  61        [COMPST_GET_WQE]                = "GET WQE",
  62        [COMPST_COMP_WQE]               = "COMP WQE",
  63        [COMPST_COMP_ACK]               = "COMP ACK",
  64        [COMPST_CHECK_PSN]              = "CHECK PSN",
  65        [COMPST_CHECK_ACK]              = "CHECK ACK",
  66        [COMPST_READ]                   = "READ",
  67        [COMPST_ATOMIC]                 = "ATOMIC",
  68        [COMPST_WRITE_SEND]             = "WRITE/SEND",
  69        [COMPST_UPDATE_COMP]            = "UPDATE COMP",
  70        [COMPST_ERROR_RETRY]            = "ERROR RETRY",
  71        [COMPST_RNR_RETRY]              = "RNR RETRY",
  72        [COMPST_ERROR]                  = "ERROR",
  73        [COMPST_EXIT]                   = "EXIT",
  74        [COMPST_DONE]                   = "DONE",
  75};
  76
  77static unsigned long rnrnak_usec[32] = {
  78        [IB_RNR_TIMER_655_36] = 655360,
  79        [IB_RNR_TIMER_000_01] = 10,
  80        [IB_RNR_TIMER_000_02] = 20,
  81        [IB_RNR_TIMER_000_03] = 30,
  82        [IB_RNR_TIMER_000_04] = 40,
  83        [IB_RNR_TIMER_000_06] = 60,
  84        [IB_RNR_TIMER_000_08] = 80,
  85        [IB_RNR_TIMER_000_12] = 120,
  86        [IB_RNR_TIMER_000_16] = 160,
  87        [IB_RNR_TIMER_000_24] = 240,
  88        [IB_RNR_TIMER_000_32] = 320,
  89        [IB_RNR_TIMER_000_48] = 480,
  90        [IB_RNR_TIMER_000_64] = 640,
  91        [IB_RNR_TIMER_000_96] = 960,
  92        [IB_RNR_TIMER_001_28] = 1280,
  93        [IB_RNR_TIMER_001_92] = 1920,
  94        [IB_RNR_TIMER_002_56] = 2560,
  95        [IB_RNR_TIMER_003_84] = 3840,
  96        [IB_RNR_TIMER_005_12] = 5120,
  97        [IB_RNR_TIMER_007_68] = 7680,
  98        [IB_RNR_TIMER_010_24] = 10240,
  99        [IB_RNR_TIMER_015_36] = 15360,
 100        [IB_RNR_TIMER_020_48] = 20480,
 101        [IB_RNR_TIMER_030_72] = 30720,
 102        [IB_RNR_TIMER_040_96] = 40960,
 103        [IB_RNR_TIMER_061_44] = 61410,
 104        [IB_RNR_TIMER_081_92] = 81920,
 105        [IB_RNR_TIMER_122_88] = 122880,
 106        [IB_RNR_TIMER_163_84] = 163840,
 107        [IB_RNR_TIMER_245_76] = 245760,
 108        [IB_RNR_TIMER_327_68] = 327680,
 109        [IB_RNR_TIMER_491_52] = 491520,
 110};
 111
 112static inline unsigned long rnrnak_jiffies(u8 timeout)
 113{
 114        return max_t(unsigned long,
 115                usecs_to_jiffies(rnrnak_usec[timeout]), 1);
 116}
 117
 118static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
 119{
 120        switch (opcode) {
 121        case IB_WR_RDMA_WRITE:                  return IB_WC_RDMA_WRITE;
 122        case IB_WR_RDMA_WRITE_WITH_IMM:         return IB_WC_RDMA_WRITE;
 123        case IB_WR_SEND:                        return IB_WC_SEND;
 124        case IB_WR_SEND_WITH_IMM:               return IB_WC_SEND;
 125        case IB_WR_RDMA_READ:                   return IB_WC_RDMA_READ;
 126        case IB_WR_ATOMIC_CMP_AND_SWP:          return IB_WC_COMP_SWAP;
 127        case IB_WR_ATOMIC_FETCH_AND_ADD:        return IB_WC_FETCH_ADD;
 128        case IB_WR_LSO:                         return IB_WC_LSO;
 129        case IB_WR_SEND_WITH_INV:               return IB_WC_SEND;
 130        case IB_WR_RDMA_READ_WITH_INV:          return IB_WC_RDMA_READ;
 131        case IB_WR_LOCAL_INV:                   return IB_WC_LOCAL_INV;
 132        case IB_WR_REG_MR:                      return IB_WC_REG_MR;
 133
 134        default:
 135                return 0xff;
 136        }
 137}
 138
 139void retransmit_timer(unsigned long data)
 140{
 141        struct rxe_qp *qp = (struct rxe_qp *)data;
 142
 143        if (qp->valid) {
 144                qp->comp.timeout = 1;
 145                rxe_run_task(&qp->comp.task, 1);
 146        }
 147}
 148
 149void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
 150                        struct sk_buff *skb)
 151{
 152        int must_sched;
 153
 154        skb_queue_tail(&qp->resp_pkts, skb);
 155
 156        must_sched = skb_queue_len(&qp->resp_pkts) > 1;
 157        if (must_sched != 0)
 158                rxe_counter_inc(rxe, RXE_CNT_COMPLETER_SCHED);
 159        rxe_run_task(&qp->comp.task, must_sched);
 160}
 161
 162static inline enum comp_state get_wqe(struct rxe_qp *qp,
 163                                      struct rxe_pkt_info *pkt,
 164                                      struct rxe_send_wqe **wqe_p)
 165{
 166        struct rxe_send_wqe *wqe;
 167
 168        /* we come here whether or not we found a response packet to see if
 169         * there are any posted WQEs
 170         */
 171        wqe = queue_head(qp->sq.queue);
 172        *wqe_p = wqe;
 173
 174        /* no WQE or requester has not started it yet */
 175        if (!wqe || wqe->state == wqe_state_posted)
 176                return pkt ? COMPST_DONE : COMPST_EXIT;
 177
 178        /* WQE does not require an ack */
 179        if (wqe->state == wqe_state_done)
 180                return COMPST_COMP_WQE;
 181
 182        /* WQE caused an error */
 183        if (wqe->state == wqe_state_error)
 184                return COMPST_ERROR;
 185
 186        /* we have a WQE, if we also have an ack check its PSN */
 187        return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
 188}
 189
 190static inline void reset_retry_counters(struct rxe_qp *qp)
 191{
 192        qp->comp.retry_cnt = qp->attr.retry_cnt;
 193        qp->comp.rnr_retry = qp->attr.rnr_retry;
 194}
 195
 196static inline enum comp_state check_psn(struct rxe_qp *qp,
 197                                        struct rxe_pkt_info *pkt,
 198                                        struct rxe_send_wqe *wqe)
 199{
 200        s32 diff;
 201
 202        /* check to see if response is past the oldest WQE. if it is, complete
 203         * send/write or error read/atomic
 204         */
 205        diff = psn_compare(pkt->psn, wqe->last_psn);
 206        if (diff > 0) {
 207                if (wqe->state == wqe_state_pending) {
 208                        if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
 209                                return COMPST_ERROR_RETRY;
 210
 211                        reset_retry_counters(qp);
 212                        return COMPST_COMP_WQE;
 213                } else {
 214                        return COMPST_DONE;
 215                }
 216        }
 217
 218        /* compare response packet to expected response */
 219        diff = psn_compare(pkt->psn, qp->comp.psn);
 220        if (diff < 0) {
 221                /* response is most likely a retried packet if it matches an
 222                 * uncompleted WQE go complete it else ignore it
 223                 */
 224                if (pkt->psn == wqe->last_psn)
 225                        return COMPST_COMP_ACK;
 226                else
 227                        return COMPST_DONE;
 228        } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
 229                return COMPST_DONE;
 230        } else {
 231                return COMPST_CHECK_ACK;
 232        }
 233}
 234
 235static inline enum comp_state check_ack(struct rxe_qp *qp,
 236                                        struct rxe_pkt_info *pkt,
 237                                        struct rxe_send_wqe *wqe)
 238{
 239        unsigned int mask = pkt->mask;
 240        u8 syn;
 241        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 242
 243        /* Check the sequence only */
 244        switch (qp->comp.opcode) {
 245        case -1:
 246                /* Will catch all *_ONLY cases. */
 247                if (!(mask & RXE_START_MASK))
 248                        return COMPST_ERROR;
 249
 250                break;
 251
 252        case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
 253        case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
 254                if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
 255                    pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
 256                        return COMPST_ERROR;
 257                }
 258                break;
 259        default:
 260                WARN_ON_ONCE(1);
 261        }
 262
 263        /* Check operation validity. */
 264        switch (pkt->opcode) {
 265        case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
 266        case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
 267        case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
 268                syn = aeth_syn(pkt);
 269
 270                if ((syn & AETH_TYPE_MASK) != AETH_ACK)
 271                        return COMPST_ERROR;
 272
 273                /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
 274                 * doesn't have an AETH)
 275                 */
 276        case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
 277                if (wqe->wr.opcode != IB_WR_RDMA_READ &&
 278                    wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
 279                        return COMPST_ERROR;
 280                }
 281                reset_retry_counters(qp);
 282                return COMPST_READ;
 283
 284        case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
 285                syn = aeth_syn(pkt);
 286
 287                if ((syn & AETH_TYPE_MASK) != AETH_ACK)
 288                        return COMPST_ERROR;
 289
 290                if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
 291                    wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
 292                        return COMPST_ERROR;
 293                reset_retry_counters(qp);
 294                return COMPST_ATOMIC;
 295
 296        case IB_OPCODE_RC_ACKNOWLEDGE:
 297                syn = aeth_syn(pkt);
 298                switch (syn & AETH_TYPE_MASK) {
 299                case AETH_ACK:
 300                        reset_retry_counters(qp);
 301                        return COMPST_WRITE_SEND;
 302
 303                case AETH_RNR_NAK:
 304                        rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
 305                        return COMPST_RNR_RETRY;
 306
 307                case AETH_NAK:
 308                        switch (syn) {
 309                        case AETH_NAK_PSN_SEQ_ERROR:
 310                                /* a nak implicitly acks all packets with psns
 311                                 * before
 312                                 */
 313                                if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
 314                                        rxe_counter_inc(rxe,
 315                                                        RXE_CNT_RCV_SEQ_ERR);
 316                                        qp->comp.psn = pkt->psn;
 317                                        if (qp->req.wait_psn) {
 318                                                qp->req.wait_psn = 0;
 319                                                rxe_run_task(&qp->req.task, 1);
 320                                        }
 321                                }
 322                                return COMPST_ERROR_RETRY;
 323
 324                        case AETH_NAK_INVALID_REQ:
 325                                wqe->status = IB_WC_REM_INV_REQ_ERR;
 326                                return COMPST_ERROR;
 327
 328                        case AETH_NAK_REM_ACC_ERR:
 329                                wqe->status = IB_WC_REM_ACCESS_ERR;
 330                                return COMPST_ERROR;
 331
 332                        case AETH_NAK_REM_OP_ERR:
 333                                wqe->status = IB_WC_REM_OP_ERR;
 334                                return COMPST_ERROR;
 335
 336                        default:
 337                                pr_warn("unexpected nak %x\n", syn);
 338                                wqe->status = IB_WC_REM_OP_ERR;
 339                                return COMPST_ERROR;
 340                        }
 341
 342                default:
 343                        return COMPST_ERROR;
 344                }
 345                break;
 346
 347        default:
 348                pr_warn("unexpected opcode\n");
 349        }
 350
 351        return COMPST_ERROR;
 352}
 353
 354static inline enum comp_state do_read(struct rxe_qp *qp,
 355                                      struct rxe_pkt_info *pkt,
 356                                      struct rxe_send_wqe *wqe)
 357{
 358        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 359        int ret;
 360
 361        ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
 362                        &wqe->dma, payload_addr(pkt),
 363                        payload_size(pkt), to_mem_obj, NULL);
 364        if (ret)
 365                return COMPST_ERROR;
 366
 367        if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
 368                return COMPST_COMP_ACK;
 369        else
 370                return COMPST_UPDATE_COMP;
 371}
 372
 373static inline enum comp_state do_atomic(struct rxe_qp *qp,
 374                                        struct rxe_pkt_info *pkt,
 375                                        struct rxe_send_wqe *wqe)
 376{
 377        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 378        int ret;
 379
 380        u64 atomic_orig = atmack_orig(pkt);
 381
 382        ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
 383                        &wqe->dma, &atomic_orig,
 384                        sizeof(u64), to_mem_obj, NULL);
 385        if (ret)
 386                return COMPST_ERROR;
 387        else
 388                return COMPST_COMP_ACK;
 389}
 390
 391static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 392                          struct rxe_cqe *cqe)
 393{
 394        memset(cqe, 0, sizeof(*cqe));
 395
 396        if (!qp->is_user) {
 397                struct ib_wc            *wc     = &cqe->ibwc;
 398
 399                wc->wr_id               = wqe->wr.wr_id;
 400                wc->status              = wqe->status;
 401                wc->opcode              = wr_to_wc_opcode(wqe->wr.opcode);
 402                if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
 403                    wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
 404                        wc->wc_flags = IB_WC_WITH_IMM;
 405                wc->byte_len            = wqe->dma.length;
 406                wc->qp                  = &qp->ibqp;
 407        } else {
 408                struct ib_uverbs_wc     *uwc    = &cqe->uibwc;
 409
 410                uwc->wr_id              = wqe->wr.wr_id;
 411                uwc->status             = wqe->status;
 412                uwc->opcode             = wr_to_wc_opcode(wqe->wr.opcode);
 413                if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
 414                    wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
 415                        uwc->wc_flags = IB_WC_WITH_IMM;
 416                uwc->byte_len           = wqe->dma.length;
 417                uwc->qp_num             = qp->ibqp.qp_num;
 418        }
 419}
 420
 421/*
 422 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
 423 * ---------8<---------8<-------------
 424 * ...Note that if a completion error occurs, a Work Completion
 425 * will always be generated, even if the signaling
 426 * indicator requests an Unsignaled Completion.
 427 * ---------8<---------8<-------------
 428 */
 429static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 430{
 431        struct rxe_cqe cqe;
 432
 433        if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
 434            (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 435            wqe->status != IB_WC_SUCCESS) {
 436                make_send_cqe(qp, wqe, &cqe);
 437                advance_consumer(qp->sq.queue);
 438                rxe_cq_post(qp->scq, &cqe, 0);
 439        } else {
 440                advance_consumer(qp->sq.queue);
 441        }
 442
 443        /*
 444         * we completed something so let req run again
 445         * if it is trying to fence
 446         */
 447        if (qp->req.wait_fence) {
 448                qp->req.wait_fence = 0;
 449                rxe_run_task(&qp->req.task, 1);
 450        }
 451}
 452
 453static inline enum comp_state complete_ack(struct rxe_qp *qp,
 454                                           struct rxe_pkt_info *pkt,
 455                                           struct rxe_send_wqe *wqe)
 456{
 457        unsigned long flags;
 458
 459        if (wqe->has_rd_atomic) {
 460                wqe->has_rd_atomic = 0;
 461                atomic_inc(&qp->req.rd_atomic);
 462                if (qp->req.need_rd_atomic) {
 463                        qp->comp.timeout_retry = 0;
 464                        qp->req.need_rd_atomic = 0;
 465                        rxe_run_task(&qp->req.task, 1);
 466                }
 467        }
 468
 469        if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
 470                /* state_lock used by requester & completer */
 471                spin_lock_irqsave(&qp->state_lock, flags);
 472                if ((qp->req.state == QP_STATE_DRAIN) &&
 473                    (qp->comp.psn == qp->req.psn)) {
 474                        qp->req.state = QP_STATE_DRAINED;
 475                        spin_unlock_irqrestore(&qp->state_lock, flags);
 476
 477                        if (qp->ibqp.event_handler) {
 478                                struct ib_event ev;
 479
 480                                ev.device = qp->ibqp.device;
 481                                ev.element.qp = &qp->ibqp;
 482                                ev.event = IB_EVENT_SQ_DRAINED;
 483                                qp->ibqp.event_handler(&ev,
 484                                        qp->ibqp.qp_context);
 485                        }
 486                } else {
 487                        spin_unlock_irqrestore(&qp->state_lock, flags);
 488                }
 489        }
 490
 491        do_complete(qp, wqe);
 492
 493        if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
 494                return COMPST_UPDATE_COMP;
 495        else
 496                return COMPST_DONE;
 497}
 498
 499static inline enum comp_state complete_wqe(struct rxe_qp *qp,
 500                                           struct rxe_pkt_info *pkt,
 501                                           struct rxe_send_wqe *wqe)
 502{
 503        qp->comp.opcode = -1;
 504
 505        if (pkt) {
 506                if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
 507                        qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 508
 509                if (qp->req.wait_psn) {
 510                        qp->req.wait_psn = 0;
 511                        rxe_run_task(&qp->req.task, 1);
 512                }
 513        }
 514
 515        do_complete(qp, wqe);
 516
 517        return COMPST_GET_WQE;
 518}
 519
 520static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
 521{
 522        struct sk_buff *skb;
 523        struct rxe_send_wqe *wqe;
 524
 525        while ((skb = skb_dequeue(&qp->resp_pkts))) {
 526                rxe_drop_ref(qp);
 527                kfree_skb(skb);
 528        }
 529
 530        while ((wqe = queue_head(qp->sq.queue))) {
 531                if (notify) {
 532                        wqe->status = IB_WC_WR_FLUSH_ERR;
 533                        do_complete(qp, wqe);
 534                } else {
 535                        advance_consumer(qp->sq.queue);
 536                }
 537        }
 538}
 539
 540int rxe_completer(void *arg)
 541{
 542        struct rxe_qp *qp = (struct rxe_qp *)arg;
 543        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 544        struct rxe_send_wqe *wqe = wqe;
 545        struct sk_buff *skb = NULL;
 546        struct rxe_pkt_info *pkt = NULL;
 547        enum comp_state state;
 548
 549        rxe_add_ref(qp);
 550
 551        if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
 552            qp->req.state == QP_STATE_RESET) {
 553                rxe_drain_resp_pkts(qp, qp->valid &&
 554                                    qp->req.state == QP_STATE_ERROR);
 555                goto exit;
 556        }
 557
 558        if (qp->comp.timeout) {
 559                qp->comp.timeout_retry = 1;
 560                qp->comp.timeout = 0;
 561        } else {
 562                qp->comp.timeout_retry = 0;
 563        }
 564
 565        if (qp->req.need_retry)
 566                goto exit;
 567
 568        state = COMPST_GET_ACK;
 569
 570        while (1) {
 571                pr_debug("qp#%d state = %s\n", qp_num(qp),
 572                         comp_state_name[state]);
 573                switch (state) {
 574                case COMPST_GET_ACK:
 575                        skb = skb_dequeue(&qp->resp_pkts);
 576                        if (skb) {
 577                                pkt = SKB_TO_PKT(skb);
 578                                qp->comp.timeout_retry = 0;
 579                        }
 580                        state = COMPST_GET_WQE;
 581                        break;
 582
 583                case COMPST_GET_WQE:
 584                        state = get_wqe(qp, pkt, &wqe);
 585                        break;
 586
 587                case COMPST_CHECK_PSN:
 588                        state = check_psn(qp, pkt, wqe);
 589                        break;
 590
 591                case COMPST_CHECK_ACK:
 592                        state = check_ack(qp, pkt, wqe);
 593                        break;
 594
 595                case COMPST_READ:
 596                        state = do_read(qp, pkt, wqe);
 597                        break;
 598
 599                case COMPST_ATOMIC:
 600                        state = do_atomic(qp, pkt, wqe);
 601                        break;
 602
 603                case COMPST_WRITE_SEND:
 604                        if (wqe->state == wqe_state_pending &&
 605                            wqe->last_psn == pkt->psn)
 606                                state = COMPST_COMP_ACK;
 607                        else
 608                                state = COMPST_UPDATE_COMP;
 609                        break;
 610
 611                case COMPST_COMP_ACK:
 612                        state = complete_ack(qp, pkt, wqe);
 613                        break;
 614
 615                case COMPST_COMP_WQE:
 616                        state = complete_wqe(qp, pkt, wqe);
 617                        break;
 618
 619                case COMPST_UPDATE_COMP:
 620                        if (pkt->mask & RXE_END_MASK)
 621                                qp->comp.opcode = -1;
 622                        else
 623                                qp->comp.opcode = pkt->opcode;
 624
 625                        if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
 626                                qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 627
 628                        if (qp->req.wait_psn) {
 629                                qp->req.wait_psn = 0;
 630                                rxe_run_task(&qp->req.task, 1);
 631                        }
 632
 633                        state = COMPST_DONE;
 634                        break;
 635
 636                case COMPST_DONE:
 637                        if (pkt) {
 638                                rxe_drop_ref(pkt->qp);
 639                                kfree_skb(skb);
 640                                skb = NULL;
 641                        }
 642                        goto done;
 643
 644                case COMPST_EXIT:
 645                        if (qp->comp.timeout_retry && wqe) {
 646                                state = COMPST_ERROR_RETRY;
 647                                break;
 648                        }
 649
 650                        /* re reset the timeout counter if
 651                         * (1) QP is type RC
 652                         * (2) the QP is alive
 653                         * (3) there is a packet sent by the requester that
 654                         *     might be acked (we still might get spurious
 655                         *     timeouts but try to keep them as few as possible)
 656                         * (4) the timeout parameter is set
 657                         */
 658                        if ((qp_type(qp) == IB_QPT_RC) &&
 659                            (qp->req.state == QP_STATE_READY) &&
 660                            (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
 661                            qp->qp_timeout_jiffies)
 662                                mod_timer(&qp->retrans_timer,
 663                                          jiffies + qp->qp_timeout_jiffies);
 664                        WARN_ON_ONCE(skb);
 665                        goto exit;
 666
 667                case COMPST_ERROR_RETRY:
 668                        /* we come here if the retry timer fired and we did
 669                         * not receive a response packet. try to retry the send
 670                         * queue if that makes sense and the limits have not
 671                         * been exceeded. remember that some timeouts are
 672                         * spurious since we do not reset the timer but kick
 673                         * it down the road or let it expire
 674                         */
 675
 676                        /* there is nothing to retry in this case */
 677                        if (!wqe || (wqe->state == wqe_state_posted)) {
 678                                WARN_ON_ONCE(skb);
 679                                goto exit;
 680                        }
 681
 682                        if (qp->comp.retry_cnt > 0) {
 683                                if (qp->comp.retry_cnt != 7)
 684                                        qp->comp.retry_cnt--;
 685
 686                                /* no point in retrying if we have already
 687                                 * seen the last ack that the requester could
 688                                 * have caused
 689                                 */
 690                                if (psn_compare(qp->req.psn,
 691                                                qp->comp.psn) > 0) {
 692                                        /* tell the requester to retry the
 693                                         * send queue next time around
 694                                         */
 695                                        rxe_counter_inc(rxe,
 696                                                        RXE_CNT_COMP_RETRY);
 697                                        qp->req.need_retry = 1;
 698                                        rxe_run_task(&qp->req.task, 1);
 699                                }
 700
 701                                if (pkt) {
 702                                        rxe_drop_ref(pkt->qp);
 703                                        kfree_skb(skb);
 704                                        skb = NULL;
 705                                }
 706
 707                                WARN_ON_ONCE(skb);
 708                                goto exit;
 709
 710                        } else {
 711                                rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
 712                                wqe->status = IB_WC_RETRY_EXC_ERR;
 713                                state = COMPST_ERROR;
 714                        }
 715                        break;
 716
 717                case COMPST_RNR_RETRY:
 718                        if (qp->comp.rnr_retry > 0) {
 719                                if (qp->comp.rnr_retry != 7)
 720                                        qp->comp.rnr_retry--;
 721
 722                                qp->req.need_retry = 1;
 723                                pr_debug("qp#%d set rnr nak timer\n",
 724                                         qp_num(qp));
 725                                mod_timer(&qp->rnr_nak_timer,
 726                                          jiffies + rnrnak_jiffies(aeth_syn(pkt)
 727                                                & ~AETH_TYPE_MASK));
 728                                rxe_drop_ref(pkt->qp);
 729                                kfree_skb(skb);
 730                                skb = NULL;
 731                                goto exit;
 732                        } else {
 733                                rxe_counter_inc(rxe,
 734                                                RXE_CNT_RNR_RETRY_EXCEEDED);
 735                                wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
 736                                state = COMPST_ERROR;
 737                        }
 738                        break;
 739
 740                case COMPST_ERROR:
 741                        WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
 742                        do_complete(qp, wqe);
 743                        rxe_qp_error(qp);
 744
 745                        if (pkt) {
 746                                rxe_drop_ref(pkt->qp);
 747                                kfree_skb(skb);
 748                                skb = NULL;
 749                        }
 750
 751                        WARN_ON_ONCE(skb);
 752                        goto exit;
 753                }
 754        }
 755
 756exit:
 757        /* we come here if we are done with processing and want the task to
 758         * exit from the loop calling us
 759         */
 760        WARN_ON_ONCE(skb);
 761        rxe_drop_ref(qp);
 762        return -EAGAIN;
 763
 764done:
 765        /* we come here if we have processed a packet we want the task to call
 766         * us again to see if there is anything else to do
 767         */
 768        WARN_ON_ONCE(skb);
 769        rxe_drop_ref(qp);
 770        return 0;
 771}
 772