linux/drivers/infiniband/sw/rxe/rxe_qp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#include <linux/skbuff.h>
   8#include <linux/delay.h>
   9#include <linux/sched.h>
  10#include <linux/vmalloc.h>
  11#include <rdma/uverbs_ioctl.h>
  12
  13#include "rxe.h"
  14#include "rxe_loc.h"
  15#include "rxe_queue.h"
  16#include "rxe_task.h"
  17
  18static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
  19                          int has_srq)
  20{
  21        if (cap->max_send_wr > rxe->attr.max_qp_wr) {
  22                pr_warn("invalid send wr = %d > %d\n",
  23                        cap->max_send_wr, rxe->attr.max_qp_wr);
  24                goto err1;
  25        }
  26
  27        if (cap->max_send_sge > rxe->attr.max_send_sge) {
  28                pr_warn("invalid send sge = %d > %d\n",
  29                        cap->max_send_sge, rxe->attr.max_send_sge);
  30                goto err1;
  31        }
  32
  33        if (!has_srq) {
  34                if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
  35                        pr_warn("invalid recv wr = %d > %d\n",
  36                                cap->max_recv_wr, rxe->attr.max_qp_wr);
  37                        goto err1;
  38                }
  39
  40                if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
  41                        pr_warn("invalid recv sge = %d > %d\n",
  42                                cap->max_recv_sge, rxe->attr.max_recv_sge);
  43                        goto err1;
  44                }
  45        }
  46
  47        if (cap->max_inline_data > rxe->max_inline_data) {
  48                pr_warn("invalid max inline data = %d > %d\n",
  49                        cap->max_inline_data, rxe->max_inline_data);
  50                goto err1;
  51        }
  52
  53        return 0;
  54
  55err1:
  56        return -EINVAL;
  57}
  58
  59int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
  60{
  61        struct ib_qp_cap *cap = &init->cap;
  62        struct rxe_port *port;
  63        int port_num = init->port_num;
  64
  65        switch (init->qp_type) {
  66        case IB_QPT_SMI:
  67        case IB_QPT_GSI:
  68        case IB_QPT_RC:
  69        case IB_QPT_UC:
  70        case IB_QPT_UD:
  71                break;
  72        default:
  73                return -EOPNOTSUPP;
  74        }
  75
  76        if (!init->recv_cq || !init->send_cq) {
  77                pr_warn("missing cq\n");
  78                goto err1;
  79        }
  80
  81        if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
  82                goto err1;
  83
  84        if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
  85                if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
  86                        pr_warn("invalid port = %d\n", port_num);
  87                        goto err1;
  88                }
  89
  90                port = &rxe->port;
  91
  92                if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
  93                        pr_warn("SMI QP exists for port %d\n", port_num);
  94                        goto err1;
  95                }
  96
  97                if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
  98                        pr_warn("GSI QP exists for port %d\n", port_num);
  99                        goto err1;
 100                }
 101        }
 102
 103        return 0;
 104
 105err1:
 106        return -EINVAL;
 107}
 108
 109static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
 110{
 111        qp->resp.res_head = 0;
 112        qp->resp.res_tail = 0;
 113        qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
 114
 115        if (!qp->resp.resources)
 116                return -ENOMEM;
 117
 118        return 0;
 119}
 120
 121static void free_rd_atomic_resources(struct rxe_qp *qp)
 122{
 123        if (qp->resp.resources) {
 124                int i;
 125
 126                for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
 127                        struct resp_res *res = &qp->resp.resources[i];
 128
 129                        free_rd_atomic_resource(qp, res);
 130                }
 131                kfree(qp->resp.resources);
 132                qp->resp.resources = NULL;
 133        }
 134}
 135
 136void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
 137{
 138        if (res->type == RXE_ATOMIC_MASK) {
 139                kfree_skb(res->atomic.skb);
 140        } else if (res->type == RXE_READ_MASK) {
 141                if (res->read.mr)
 142                        rxe_drop_ref(res->read.mr);
 143        }
 144        res->type = 0;
 145}
 146
 147static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
 148{
 149        int i;
 150        struct resp_res *res;
 151
 152        if (qp->resp.resources) {
 153                for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
 154                        res = &qp->resp.resources[i];
 155                        free_rd_atomic_resource(qp, res);
 156                }
 157        }
 158}
 159
 160static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
 161                             struct ib_qp_init_attr *init)
 162{
 163        struct rxe_port *port;
 164        u32 qpn;
 165
 166        qp->sq_sig_type         = init->sq_sig_type;
 167        qp->attr.path_mtu       = 1;
 168        qp->mtu                 = ib_mtu_enum_to_int(qp->attr.path_mtu);
 169
 170        qpn                     = qp->pelem.index;
 171        port                    = &rxe->port;
 172
 173        switch (init->qp_type) {
 174        case IB_QPT_SMI:
 175                qp->ibqp.qp_num         = 0;
 176                port->qp_smi_index      = qpn;
 177                qp->attr.port_num       = init->port_num;
 178                break;
 179
 180        case IB_QPT_GSI:
 181                qp->ibqp.qp_num         = 1;
 182                port->qp_gsi_index      = qpn;
 183                qp->attr.port_num       = init->port_num;
 184                break;
 185
 186        default:
 187                qp->ibqp.qp_num         = qpn;
 188                break;
 189        }
 190
 191        INIT_LIST_HEAD(&qp->grp_list);
 192
 193        skb_queue_head_init(&qp->send_pkts);
 194
 195        spin_lock_init(&qp->grp_lock);
 196        spin_lock_init(&qp->state_lock);
 197
 198        atomic_set(&qp->ssn, 0);
 199        atomic_set(&qp->skb_out, 0);
 200}
 201
 202static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
 203                           struct ib_qp_init_attr *init, struct ib_udata *udata,
 204                           struct rxe_create_qp_resp __user *uresp)
 205{
 206        int err;
 207        int wqe_size;
 208        enum queue_type type;
 209
 210        err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
 211        if (err < 0)
 212                return err;
 213        qp->sk->sk->sk_user_data = qp;
 214
 215        /* pick a source UDP port number for this QP based on
 216         * the source QPN. this spreads traffic for different QPs
 217         * across different NIC RX queues (while using a single
 218         * flow for a given QP to maintain packet order).
 219         * the port number must be in the Dynamic Ports range
 220         * (0xc000 - 0xffff).
 221         */
 222        qp->src_port = RXE_ROCE_V2_SPORT +
 223                (hash_32_generic(qp_num(qp), 14) & 0x3fff);
 224        qp->sq.max_wr           = init->cap.max_send_wr;
 225
 226        /* These caps are limited by rxe_qp_chk_cap() done by the caller */
 227        wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
 228                         init->cap.max_inline_data);
 229        qp->sq.max_sge = init->cap.max_send_sge =
 230                wqe_size / sizeof(struct ib_sge);
 231        qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
 232        wqe_size += sizeof(struct rxe_send_wqe);
 233
 234        type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
 235        qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
 236                                wqe_size, type);
 237        if (!qp->sq.queue)
 238                return -ENOMEM;
 239
 240        err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
 241                           qp->sq.queue->buf, qp->sq.queue->buf_size,
 242                           &qp->sq.queue->ip);
 243
 244        if (err) {
 245                vfree(qp->sq.queue->buf);
 246                kfree(qp->sq.queue);
 247                qp->sq.queue = NULL;
 248                return err;
 249        }
 250
 251        if (qp->is_user)
 252                qp->req.wqe_index = producer_index(qp->sq.queue,
 253                                                QUEUE_TYPE_FROM_USER);
 254        else
 255                qp->req.wqe_index = producer_index(qp->sq.queue,
 256                                                QUEUE_TYPE_KERNEL);
 257
 258        qp->req.state           = QP_STATE_RESET;
 259        qp->req.opcode          = -1;
 260        qp->comp.opcode         = -1;
 261
 262        spin_lock_init(&qp->sq.sq_lock);
 263        skb_queue_head_init(&qp->req_pkts);
 264
 265        rxe_init_task(rxe, &qp->req.task, qp,
 266                      rxe_requester, "req");
 267        rxe_init_task(rxe, &qp->comp.task, qp,
 268                      rxe_completer, "comp");
 269
 270        qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
 271        if (init->qp_type == IB_QPT_RC) {
 272                timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
 273                timer_setup(&qp->retrans_timer, retransmit_timer, 0);
 274        }
 275        return 0;
 276}
 277
 278static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
 279                            struct ib_qp_init_attr *init,
 280                            struct ib_udata *udata,
 281                            struct rxe_create_qp_resp __user *uresp)
 282{
 283        int err;
 284        int wqe_size;
 285        enum queue_type type;
 286
 287        if (!qp->srq) {
 288                qp->rq.max_wr           = init->cap.max_recv_wr;
 289                qp->rq.max_sge          = init->cap.max_recv_sge;
 290
 291                wqe_size = rcv_wqe_size(qp->rq.max_sge);
 292
 293                pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
 294                         qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
 295
 296                type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
 297                qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
 298                                        wqe_size, type);
 299                if (!qp->rq.queue)
 300                        return -ENOMEM;
 301
 302                err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
 303                                   qp->rq.queue->buf, qp->rq.queue->buf_size,
 304                                   &qp->rq.queue->ip);
 305                if (err) {
 306                        vfree(qp->rq.queue->buf);
 307                        kfree(qp->rq.queue);
 308                        qp->rq.queue = NULL;
 309                        return err;
 310                }
 311        }
 312
 313        spin_lock_init(&qp->rq.producer_lock);
 314        spin_lock_init(&qp->rq.consumer_lock);
 315
 316        qp->rq.is_user = qp->is_user;
 317
 318        skb_queue_head_init(&qp->resp_pkts);
 319
 320        rxe_init_task(rxe, &qp->resp.task, qp,
 321                      rxe_responder, "resp");
 322
 323        qp->resp.opcode         = OPCODE_NONE;
 324        qp->resp.msn            = 0;
 325        qp->resp.state          = QP_STATE_RESET;
 326
 327        return 0;
 328}
 329
 330/* called by the create qp verb */
 331int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
 332                     struct ib_qp_init_attr *init,
 333                     struct rxe_create_qp_resp __user *uresp,
 334                     struct ib_pd *ibpd,
 335                     struct ib_udata *udata)
 336{
 337        int err;
 338        struct rxe_cq *rcq = to_rcq(init->recv_cq);
 339        struct rxe_cq *scq = to_rcq(init->send_cq);
 340        struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
 341
 342        rxe_add_ref(pd);
 343        rxe_add_ref(rcq);
 344        rxe_add_ref(scq);
 345        if (srq)
 346                rxe_add_ref(srq);
 347
 348        qp->pd                  = pd;
 349        qp->rcq                 = rcq;
 350        qp->scq                 = scq;
 351        qp->srq                 = srq;
 352
 353        rxe_qp_init_misc(rxe, qp, init);
 354
 355        err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
 356        if (err)
 357                goto err1;
 358
 359        err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
 360        if (err)
 361                goto err2;
 362
 363        qp->attr.qp_state = IB_QPS_RESET;
 364        qp->valid = 1;
 365
 366        return 0;
 367
 368err2:
 369        rxe_queue_cleanup(qp->sq.queue);
 370err1:
 371        qp->pd = NULL;
 372        qp->rcq = NULL;
 373        qp->scq = NULL;
 374        qp->srq = NULL;
 375
 376        if (srq)
 377                rxe_drop_ref(srq);
 378        rxe_drop_ref(scq);
 379        rxe_drop_ref(rcq);
 380        rxe_drop_ref(pd);
 381
 382        return err;
 383}
 384
 385/* called by the query qp verb */
 386int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
 387{
 388        init->event_handler             = qp->ibqp.event_handler;
 389        init->qp_context                = qp->ibqp.qp_context;
 390        init->send_cq                   = qp->ibqp.send_cq;
 391        init->recv_cq                   = qp->ibqp.recv_cq;
 392        init->srq                       = qp->ibqp.srq;
 393
 394        init->cap.max_send_wr           = qp->sq.max_wr;
 395        init->cap.max_send_sge          = qp->sq.max_sge;
 396        init->cap.max_inline_data       = qp->sq.max_inline;
 397
 398        if (!qp->srq) {
 399                init->cap.max_recv_wr           = qp->rq.max_wr;
 400                init->cap.max_recv_sge          = qp->rq.max_sge;
 401        }
 402
 403        init->sq_sig_type               = qp->sq_sig_type;
 404
 405        init->qp_type                   = qp->ibqp.qp_type;
 406        init->port_num                  = 1;
 407
 408        return 0;
 409}
 410
 411/* called by the modify qp verb, this routine checks all the parameters before
 412 * making any changes
 413 */
 414int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
 415                    struct ib_qp_attr *attr, int mask)
 416{
 417        enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
 418                                        attr->cur_qp_state : qp->attr.qp_state;
 419        enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
 420                                        attr->qp_state : cur_state;
 421
 422        if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
 423                pr_warn("invalid mask or state for qp\n");
 424                goto err1;
 425        }
 426
 427        if (mask & IB_QP_STATE) {
 428                if (cur_state == IB_QPS_SQD) {
 429                        if (qp->req.state == QP_STATE_DRAIN &&
 430                            new_state != IB_QPS_ERR)
 431                                goto err1;
 432                }
 433        }
 434
 435        if (mask & IB_QP_PORT) {
 436                if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
 437                        pr_warn("invalid port %d\n", attr->port_num);
 438                        goto err1;
 439                }
 440        }
 441
 442        if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
 443                goto err1;
 444
 445        if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
 446                goto err1;
 447
 448        if (mask & IB_QP_ALT_PATH) {
 449                if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
 450                        goto err1;
 451                if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
 452                        pr_warn("invalid alt port %d\n", attr->alt_port_num);
 453                        goto err1;
 454                }
 455                if (attr->alt_timeout > 31) {
 456                        pr_warn("invalid QP alt timeout %d > 31\n",
 457                                attr->alt_timeout);
 458                        goto err1;
 459                }
 460        }
 461
 462        if (mask & IB_QP_PATH_MTU) {
 463                struct rxe_port *port = &rxe->port;
 464
 465                enum ib_mtu max_mtu = port->attr.max_mtu;
 466                enum ib_mtu mtu = attr->path_mtu;
 467
 468                if (mtu > max_mtu) {
 469                        pr_debug("invalid mtu (%d) > (%d)\n",
 470                                 ib_mtu_enum_to_int(mtu),
 471                                 ib_mtu_enum_to_int(max_mtu));
 472                        goto err1;
 473                }
 474        }
 475
 476        if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
 477                if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
 478                        pr_warn("invalid max_rd_atomic %d > %d\n",
 479                                attr->max_rd_atomic,
 480                                rxe->attr.max_qp_rd_atom);
 481                        goto err1;
 482                }
 483        }
 484
 485        if (mask & IB_QP_TIMEOUT) {
 486                if (attr->timeout > 31) {
 487                        pr_warn("invalid QP timeout %d > 31\n",
 488                                attr->timeout);
 489                        goto err1;
 490                }
 491        }
 492
 493        return 0;
 494
 495err1:
 496        return -EINVAL;
 497}
 498
 499/* move the qp to the reset state */
 500static void rxe_qp_reset(struct rxe_qp *qp)
 501{
 502        /* stop tasks from running */
 503        rxe_disable_task(&qp->resp.task);
 504
 505        /* stop request/comp */
 506        if (qp->sq.queue) {
 507                if (qp_type(qp) == IB_QPT_RC)
 508                        rxe_disable_task(&qp->comp.task);
 509                rxe_disable_task(&qp->req.task);
 510        }
 511
 512        /* move qp to the reset state */
 513        qp->req.state = QP_STATE_RESET;
 514        qp->resp.state = QP_STATE_RESET;
 515
 516        /* let state machines reset themselves drain work and packet queues
 517         * etc.
 518         */
 519        __rxe_do_task(&qp->resp.task);
 520
 521        if (qp->sq.queue) {
 522                __rxe_do_task(&qp->comp.task);
 523                __rxe_do_task(&qp->req.task);
 524                rxe_queue_reset(qp->sq.queue);
 525        }
 526
 527        /* cleanup attributes */
 528        atomic_set(&qp->ssn, 0);
 529        qp->req.opcode = -1;
 530        qp->req.need_retry = 0;
 531        qp->req.noack_pkts = 0;
 532        qp->resp.msn = 0;
 533        qp->resp.opcode = -1;
 534        qp->resp.drop_msg = 0;
 535        qp->resp.goto_error = 0;
 536        qp->resp.sent_psn_nak = 0;
 537
 538        if (qp->resp.mr) {
 539                rxe_drop_ref(qp->resp.mr);
 540                qp->resp.mr = NULL;
 541        }
 542
 543        cleanup_rd_atomic_resources(qp);
 544
 545        /* reenable tasks */
 546        rxe_enable_task(&qp->resp.task);
 547
 548        if (qp->sq.queue) {
 549                if (qp_type(qp) == IB_QPT_RC)
 550                        rxe_enable_task(&qp->comp.task);
 551
 552                rxe_enable_task(&qp->req.task);
 553        }
 554}
 555
 556/* drain the send queue */
 557static void rxe_qp_drain(struct rxe_qp *qp)
 558{
 559        if (qp->sq.queue) {
 560                if (qp->req.state != QP_STATE_DRAINED) {
 561                        qp->req.state = QP_STATE_DRAIN;
 562                        if (qp_type(qp) == IB_QPT_RC)
 563                                rxe_run_task(&qp->comp.task, 1);
 564                        else
 565                                __rxe_do_task(&qp->comp.task);
 566                        rxe_run_task(&qp->req.task, 1);
 567                }
 568        }
 569}
 570
 571/* move the qp to the error state */
 572void rxe_qp_error(struct rxe_qp *qp)
 573{
 574        qp->req.state = QP_STATE_ERROR;
 575        qp->resp.state = QP_STATE_ERROR;
 576        qp->attr.qp_state = IB_QPS_ERR;
 577
 578        /* drain work and packet queues */
 579        rxe_run_task(&qp->resp.task, 1);
 580
 581        if (qp_type(qp) == IB_QPT_RC)
 582                rxe_run_task(&qp->comp.task, 1);
 583        else
 584                __rxe_do_task(&qp->comp.task);
 585        rxe_run_task(&qp->req.task, 1);
 586}
 587
 588/* called by the modify qp verb */
 589int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
 590                     struct ib_udata *udata)
 591{
 592        int err;
 593
 594        if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
 595                int max_rd_atomic = attr->max_rd_atomic ?
 596                        roundup_pow_of_two(attr->max_rd_atomic) : 0;
 597
 598                qp->attr.max_rd_atomic = max_rd_atomic;
 599                atomic_set(&qp->req.rd_atomic, max_rd_atomic);
 600        }
 601
 602        if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
 603                int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
 604                        roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
 605
 606                qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
 607
 608                free_rd_atomic_resources(qp);
 609
 610                err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
 611                if (err)
 612                        return err;
 613        }
 614
 615        if (mask & IB_QP_CUR_STATE)
 616                qp->attr.cur_qp_state = attr->qp_state;
 617
 618        if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
 619                qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
 620
 621        if (mask & IB_QP_ACCESS_FLAGS)
 622                qp->attr.qp_access_flags = attr->qp_access_flags;
 623
 624        if (mask & IB_QP_PKEY_INDEX)
 625                qp->attr.pkey_index = attr->pkey_index;
 626
 627        if (mask & IB_QP_PORT)
 628                qp->attr.port_num = attr->port_num;
 629
 630        if (mask & IB_QP_QKEY)
 631                qp->attr.qkey = attr->qkey;
 632
 633        if (mask & IB_QP_AV)
 634                rxe_init_av(&attr->ah_attr, &qp->pri_av);
 635
 636        if (mask & IB_QP_ALT_PATH) {
 637                rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
 638                qp->attr.alt_port_num = attr->alt_port_num;
 639                qp->attr.alt_pkey_index = attr->alt_pkey_index;
 640                qp->attr.alt_timeout = attr->alt_timeout;
 641        }
 642
 643        if (mask & IB_QP_PATH_MTU) {
 644                qp->attr.path_mtu = attr->path_mtu;
 645                qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
 646        }
 647
 648        if (mask & IB_QP_TIMEOUT) {
 649                qp->attr.timeout = attr->timeout;
 650                if (attr->timeout == 0) {
 651                        qp->qp_timeout_jiffies = 0;
 652                } else {
 653                        /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
 654                        int j = nsecs_to_jiffies(4096ULL << attr->timeout);
 655
 656                        qp->qp_timeout_jiffies = j ? j : 1;
 657                }
 658        }
 659
 660        if (mask & IB_QP_RETRY_CNT) {
 661                qp->attr.retry_cnt = attr->retry_cnt;
 662                qp->comp.retry_cnt = attr->retry_cnt;
 663                pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
 664                         attr->retry_cnt);
 665        }
 666
 667        if (mask & IB_QP_RNR_RETRY) {
 668                qp->attr.rnr_retry = attr->rnr_retry;
 669                qp->comp.rnr_retry = attr->rnr_retry;
 670                pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
 671                         attr->rnr_retry);
 672        }
 673
 674        if (mask & IB_QP_RQ_PSN) {
 675                qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
 676                qp->resp.psn = qp->attr.rq_psn;
 677                pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
 678                         qp->resp.psn);
 679        }
 680
 681        if (mask & IB_QP_MIN_RNR_TIMER) {
 682                qp->attr.min_rnr_timer = attr->min_rnr_timer;
 683                pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
 684                         attr->min_rnr_timer);
 685        }
 686
 687        if (mask & IB_QP_SQ_PSN) {
 688                qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
 689                qp->req.psn = qp->attr.sq_psn;
 690                qp->comp.psn = qp->attr.sq_psn;
 691                pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
 692        }
 693
 694        if (mask & IB_QP_PATH_MIG_STATE)
 695                qp->attr.path_mig_state = attr->path_mig_state;
 696
 697        if (mask & IB_QP_DEST_QPN)
 698                qp->attr.dest_qp_num = attr->dest_qp_num;
 699
 700        if (mask & IB_QP_STATE) {
 701                qp->attr.qp_state = attr->qp_state;
 702
 703                switch (attr->qp_state) {
 704                case IB_QPS_RESET:
 705                        pr_debug("qp#%d state -> RESET\n", qp_num(qp));
 706                        rxe_qp_reset(qp);
 707                        break;
 708
 709                case IB_QPS_INIT:
 710                        pr_debug("qp#%d state -> INIT\n", qp_num(qp));
 711                        qp->req.state = QP_STATE_INIT;
 712                        qp->resp.state = QP_STATE_INIT;
 713                        break;
 714
 715                case IB_QPS_RTR:
 716                        pr_debug("qp#%d state -> RTR\n", qp_num(qp));
 717                        qp->resp.state = QP_STATE_READY;
 718                        break;
 719
 720                case IB_QPS_RTS:
 721                        pr_debug("qp#%d state -> RTS\n", qp_num(qp));
 722                        qp->req.state = QP_STATE_READY;
 723                        break;
 724
 725                case IB_QPS_SQD:
 726                        pr_debug("qp#%d state -> SQD\n", qp_num(qp));
 727                        rxe_qp_drain(qp);
 728                        break;
 729
 730                case IB_QPS_SQE:
 731                        pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
 732                        /* Not possible from modify_qp. */
 733                        break;
 734
 735                case IB_QPS_ERR:
 736                        pr_debug("qp#%d state -> ERR\n", qp_num(qp));
 737                        rxe_qp_error(qp);
 738                        break;
 739                }
 740        }
 741
 742        return 0;
 743}
 744
 745/* called by the query qp verb */
 746int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
 747{
 748        *attr = qp->attr;
 749
 750        attr->rq_psn                            = qp->resp.psn;
 751        attr->sq_psn                            = qp->req.psn;
 752
 753        attr->cap.max_send_wr                   = qp->sq.max_wr;
 754        attr->cap.max_send_sge                  = qp->sq.max_sge;
 755        attr->cap.max_inline_data               = qp->sq.max_inline;
 756
 757        if (!qp->srq) {
 758                attr->cap.max_recv_wr           = qp->rq.max_wr;
 759                attr->cap.max_recv_sge          = qp->rq.max_sge;
 760        }
 761
 762        rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
 763        rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
 764
 765        if (qp->req.state == QP_STATE_DRAIN) {
 766                attr->sq_draining = 1;
 767                /* applications that get this state
 768                 * typically spin on it. yield the
 769                 * processor
 770                 */
 771                cond_resched();
 772        } else {
 773                attr->sq_draining = 0;
 774        }
 775
 776        pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
 777
 778        return 0;
 779}
 780
 781/* called by the destroy qp verb */
 782void rxe_qp_destroy(struct rxe_qp *qp)
 783{
 784        qp->valid = 0;
 785        qp->qp_timeout_jiffies = 0;
 786        rxe_cleanup_task(&qp->resp.task);
 787
 788        if (qp_type(qp) == IB_QPT_RC) {
 789                del_timer_sync(&qp->retrans_timer);
 790                del_timer_sync(&qp->rnr_nak_timer);
 791        }
 792
 793        rxe_cleanup_task(&qp->req.task);
 794        rxe_cleanup_task(&qp->comp.task);
 795
 796        /* flush out any receive wr's or pending requests */
 797        __rxe_do_task(&qp->req.task);
 798        if (qp->sq.queue) {
 799                __rxe_do_task(&qp->comp.task);
 800                __rxe_do_task(&qp->req.task);
 801        }
 802}
 803
 804/* called when the last reference to the qp is dropped */
 805static void rxe_qp_do_cleanup(struct work_struct *work)
 806{
 807        struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
 808
 809        rxe_drop_all_mcast_groups(qp);
 810
 811        if (qp->sq.queue)
 812                rxe_queue_cleanup(qp->sq.queue);
 813
 814        if (qp->srq)
 815                rxe_drop_ref(qp->srq);
 816
 817        if (qp->rq.queue)
 818                rxe_queue_cleanup(qp->rq.queue);
 819
 820        if (qp->scq)
 821                rxe_drop_ref(qp->scq);
 822        if (qp->rcq)
 823                rxe_drop_ref(qp->rcq);
 824        if (qp->pd)
 825                rxe_drop_ref(qp->pd);
 826
 827        if (qp->resp.mr) {
 828                rxe_drop_ref(qp->resp.mr);
 829                qp->resp.mr = NULL;
 830        }
 831
 832        if (qp_type(qp) == IB_QPT_RC)
 833                sk_dst_reset(qp->sk->sk);
 834
 835        free_rd_atomic_resources(qp);
 836
 837        kernel_sock_shutdown(qp->sk, SHUT_RDWR);
 838        sock_release(qp->sk);
 839}
 840
 841/* called when the last reference to the qp is dropped */
 842void rxe_qp_cleanup(struct rxe_pool_entry *arg)
 843{
 844        struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
 845
 846        execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
 847}
 848