linux/net/rds/iw_cm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/in.h>
  35#include <linux/vmalloc.h>
  36
  37#include "rds.h"
  38#include "iw.h"
  39
  40/*
  41 * Set the selected protocol version
  42 */
  43static void rds_iw_set_protocol(struct rds_connection *conn, unsigned int version)
  44{
  45        conn->c_version = version;
  46}
  47
  48/*
  49 * Set up flow control
  50 */
  51static void rds_iw_set_flow_control(struct rds_connection *conn, u32 credits)
  52{
  53        struct rds_iw_connection *ic = conn->c_transport_data;
  54
  55        if (rds_iw_sysctl_flow_control && credits != 0) {
  56                /* We're doing flow control */
  57                ic->i_flowctl = 1;
  58                rds_iw_send_add_credits(conn, credits);
  59        } else {
  60                ic->i_flowctl = 0;
  61        }
  62}
  63
  64/*
  65 * Connection established.
  66 * We get here for both outgoing and incoming connection.
  67 */
  68void rds_iw_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
  69{
  70        const struct rds_iw_connect_private *dp = NULL;
  71        struct rds_iw_connection *ic = conn->c_transport_data;
  72        struct rds_iw_device *rds_iwdev;
  73        int err;
  74
  75        if (event->param.conn.private_data_len) {
  76                dp = event->param.conn.private_data;
  77
  78                rds_iw_set_protocol(conn,
  79                                RDS_PROTOCOL(dp->dp_protocol_major,
  80                                        dp->dp_protocol_minor));
  81                rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
  82        }
  83
  84        /* update ib_device with this local ipaddr & conn */
  85        rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
  86        err = rds_iw_update_cm_id(rds_iwdev, ic->i_cm_id);
  87        if (err)
  88                printk(KERN_ERR "rds_iw_update_ipaddr failed (%d)\n", err);
  89        rds_iw_add_conn(rds_iwdev, conn);
  90
  91        /* If the peer gave us the last packet it saw, process this as if
  92         * we had received a regular ACK. */
  93        if (dp && dp->dp_ack_seq)
  94                rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
  95
  96        printk(KERN_NOTICE "RDS/IW: connected to %pI4<->%pI4 version %u.%u%s\n",
  97                        &conn->c_laddr, &conn->c_faddr,
  98                        RDS_PROTOCOL_MAJOR(conn->c_version),
  99                        RDS_PROTOCOL_MINOR(conn->c_version),
 100                        ic->i_flowctl ? ", flow control" : "");
 101
 102        rds_connect_complete(conn);
 103}
 104
 105static void rds_iw_cm_fill_conn_param(struct rds_connection *conn,
 106                        struct rdma_conn_param *conn_param,
 107                        struct rds_iw_connect_private *dp,
 108                        u32 protocol_version)
 109{
 110        struct rds_iw_connection *ic = conn->c_transport_data;
 111
 112        memset(conn_param, 0, sizeof(struct rdma_conn_param));
 113        /* XXX tune these? */
 114        conn_param->responder_resources = 1;
 115        conn_param->initiator_depth = 1;
 116
 117        if (dp) {
 118                memset(dp, 0, sizeof(*dp));
 119                dp->dp_saddr = conn->c_laddr;
 120                dp->dp_daddr = conn->c_faddr;
 121                dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
 122                dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
 123                dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IW_SUPPORTED_PROTOCOLS);
 124                dp->dp_ack_seq = rds_iw_piggyb_ack(ic);
 125
 126                /* Advertise flow control */
 127                if (ic->i_flowctl) {
 128                        unsigned int credits;
 129
 130                        credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
 131                        dp->dp_credit = cpu_to_be32(credits);
 132                        atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
 133                }
 134
 135                conn_param->private_data = dp;
 136                conn_param->private_data_len = sizeof(*dp);
 137        }
 138}
 139
 140static void rds_iw_cq_event_handler(struct ib_event *event, void *data)
 141{
 142        rdsdebug("event %u data %p\n", event->event, data);
 143}
 144
 145static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
 146{
 147        struct rds_connection *conn = data;
 148        struct rds_iw_connection *ic = conn->c_transport_data;
 149
 150        rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
 151
 152        switch (event->event) {
 153        case IB_EVENT_COMM_EST:
 154                rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
 155                break;
 156        case IB_EVENT_QP_REQ_ERR:
 157        case IB_EVENT_QP_FATAL:
 158        default:
 159                rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n",
 160                        event->event, &conn->c_laddr,
 161                        &conn->c_faddr);
 162                break;
 163        }
 164}
 165
 166/*
 167 * Create a QP
 168 */
 169static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
 170                struct rds_iw_device *rds_iwdev,
 171                struct rds_iw_work_ring *send_ring,
 172                void (*send_cq_handler)(struct ib_cq *, void *),
 173                struct rds_iw_work_ring *recv_ring,
 174                void (*recv_cq_handler)(struct ib_cq *, void *),
 175                void *context)
 176{
 177        struct ib_device *dev = rds_iwdev->dev;
 178        unsigned int send_size, recv_size;
 179        int ret;
 180
 181        /* The offset of 1 is to accomodate the additional ACK WR. */
 182        send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);
 183        recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);
 184        rds_iw_ring_resize(send_ring, send_size - 1);
 185        rds_iw_ring_resize(recv_ring, recv_size - 1);
 186
 187        memset(attr, 0, sizeof(*attr));
 188        attr->event_handler = rds_iw_qp_event_handler;
 189        attr->qp_context = context;
 190        attr->cap.max_send_wr = send_size;
 191        attr->cap.max_recv_wr = recv_size;
 192        attr->cap.max_send_sge = rds_iwdev->max_sge;
 193        attr->cap.max_recv_sge = RDS_IW_RECV_SGE;
 194        attr->sq_sig_type = IB_SIGNAL_REQ_WR;
 195        attr->qp_type = IB_QPT_RC;
 196
 197        attr->send_cq = ib_create_cq(dev, send_cq_handler,
 198                                     rds_iw_cq_event_handler,
 199                                     context, send_size, 0);
 200        if (IS_ERR(attr->send_cq)) {
 201                ret = PTR_ERR(attr->send_cq);
 202                attr->send_cq = NULL;
 203                rdsdebug("ib_create_cq send failed: %d\n", ret);
 204                goto out;
 205        }
 206
 207        attr->recv_cq = ib_create_cq(dev, recv_cq_handler,
 208                                     rds_iw_cq_event_handler,
 209                                     context, recv_size, 0);
 210        if (IS_ERR(attr->recv_cq)) {
 211                ret = PTR_ERR(attr->recv_cq);
 212                attr->recv_cq = NULL;
 213                rdsdebug("ib_create_cq send failed: %d\n", ret);
 214                goto out;
 215        }
 216
 217        ret = ib_req_notify_cq(attr->send_cq, IB_CQ_NEXT_COMP);
 218        if (ret) {
 219                rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
 220                goto out;
 221        }
 222
 223        ret = ib_req_notify_cq(attr->recv_cq, IB_CQ_SOLICITED);
 224        if (ret) {
 225                rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
 226                goto out;
 227        }
 228
 229out:
 230        if (ret) {
 231                if (attr->send_cq)
 232                        ib_destroy_cq(attr->send_cq);
 233                if (attr->recv_cq)
 234                        ib_destroy_cq(attr->recv_cq);
 235        }
 236        return ret;
 237}
 238
 239/*
 240 * This needs to be very careful to not leave IS_ERR pointers around for
 241 * cleanup to trip over.
 242 */
 243static int rds_iw_setup_qp(struct rds_connection *conn)
 244{
 245        struct rds_iw_connection *ic = conn->c_transport_data;
 246        struct ib_device *dev = ic->i_cm_id->device;
 247        struct ib_qp_init_attr attr;
 248        struct rds_iw_device *rds_iwdev;
 249        int ret;
 250
 251        /* rds_iw_add_one creates a rds_iw_device object per IB device,
 252         * and allocates a protection domain, memory range and MR pool
 253         * for each.  If that fails for any reason, it will not register
 254         * the rds_iwdev at all.
 255         */
 256        rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
 257        if (rds_iwdev == NULL) {
 258                if (printk_ratelimit())
 259                        printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
 260                                        dev->name);
 261                return -EOPNOTSUPP;
 262        }
 263
 264        /* Protection domain and memory range */
 265        ic->i_pd = rds_iwdev->pd;
 266        ic->i_mr = rds_iwdev->mr;
 267
 268        ret = rds_iw_init_qp_attrs(&attr, rds_iwdev,
 269                        &ic->i_send_ring, rds_iw_send_cq_comp_handler,
 270                        &ic->i_recv_ring, rds_iw_recv_cq_comp_handler,
 271                        conn);
 272        if (ret < 0)
 273                goto out;
 274
 275        ic->i_send_cq = attr.send_cq;
 276        ic->i_recv_cq = attr.recv_cq;
 277
 278        /*
 279         * XXX this can fail if max_*_wr is too large?  Are we supposed
 280         * to back off until we get a value that the hardware can support?
 281         */
 282        ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
 283        if (ret) {
 284                rdsdebug("rdma_create_qp failed: %d\n", ret);
 285                goto out;
 286        }
 287
 288        ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
 289                                           ic->i_send_ring.w_nr *
 290                                                sizeof(struct rds_header),
 291                                           &ic->i_send_hdrs_dma, GFP_KERNEL);
 292        if (ic->i_send_hdrs == NULL) {
 293                ret = -ENOMEM;
 294                rdsdebug("ib_dma_alloc_coherent send failed\n");
 295                goto out;
 296        }
 297
 298        ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
 299                                           ic->i_recv_ring.w_nr *
 300                                                sizeof(struct rds_header),
 301                                           &ic->i_recv_hdrs_dma, GFP_KERNEL);
 302        if (ic->i_recv_hdrs == NULL) {
 303                ret = -ENOMEM;
 304                rdsdebug("ib_dma_alloc_coherent recv failed\n");
 305                goto out;
 306        }
 307
 308        ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
 309                                       &ic->i_ack_dma, GFP_KERNEL);
 310        if (ic->i_ack == NULL) {
 311                ret = -ENOMEM;
 312                rdsdebug("ib_dma_alloc_coherent ack failed\n");
 313                goto out;
 314        }
 315
 316        ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
 317        if (ic->i_sends == NULL) {
 318                ret = -ENOMEM;
 319                rdsdebug("send allocation failed\n");
 320                goto out;
 321        }
 322        rds_iw_send_init_ring(ic);
 323
 324        ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
 325        if (ic->i_recvs == NULL) {
 326                ret = -ENOMEM;
 327                rdsdebug("recv allocation failed\n");
 328                goto out;
 329        }
 330
 331        rds_iw_recv_init_ring(ic);
 332        rds_iw_recv_init_ack(ic);
 333
 334        /* Post receive buffers - as a side effect, this will update
 335         * the posted credit count. */
 336        rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
 337
 338        rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
 339                 ic->i_send_cq, ic->i_recv_cq);
 340
 341out:
 342        return ret;
 343}
 344
 345static u32 rds_iw_protocol_compatible(const struct rds_iw_connect_private *dp)
 346{
 347        u16 common;
 348        u32 version = 0;
 349
 350        /* rdma_cm private data is odd - when there is any private data in the
 351         * request, we will be given a pretty large buffer without telling us the
 352         * original size. The only way to tell the difference is by looking at
 353         * the contents, which are initialized to zero.
 354         * If the protocol version fields aren't set, this is a connection attempt
 355         * from an older version. This could could be 3.0 or 2.0 - we can't tell.
 356         * We really should have changed this for OFED 1.3 :-( */
 357        if (dp->dp_protocol_major == 0)
 358                return RDS_PROTOCOL_3_0;
 359
 360        common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IW_SUPPORTED_PROTOCOLS;
 361        if (dp->dp_protocol_major == 3 && common) {
 362                version = RDS_PROTOCOL_3_0;
 363                while ((common >>= 1) != 0)
 364                        version++;
 365        } else if (printk_ratelimit()) {
 366                printk(KERN_NOTICE "RDS: Connection from %pI4 using "
 367                        "incompatible protocol version %u.%u\n",
 368                        &dp->dp_saddr,
 369                        dp->dp_protocol_major,
 370                        dp->dp_protocol_minor);
 371        }
 372        return version;
 373}
 374
 375int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
 376                                    struct rdma_cm_event *event)
 377{
 378        const struct rds_iw_connect_private *dp = event->param.conn.private_data;
 379        struct rds_iw_connect_private dp_rep;
 380        struct rds_connection *conn = NULL;
 381        struct rds_iw_connection *ic = NULL;
 382        struct rdma_conn_param conn_param;
 383        struct rds_iw_device *rds_iwdev;
 384        u32 version;
 385        int err, destroy = 1;
 386
 387        /* Check whether the remote protocol version matches ours. */
 388        version = rds_iw_protocol_compatible(dp);
 389        if (!version)
 390                goto out;
 391
 392        rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u\n",
 393                 &dp->dp_saddr, &dp->dp_daddr,
 394                 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
 395
 396        conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
 397                               GFP_KERNEL);
 398        if (IS_ERR(conn)) {
 399                rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
 400                conn = NULL;
 401                goto out;
 402        }
 403
 404        /*
 405         * The connection request may occur while the
 406         * previous connection exist, e.g. in case of failover.
 407         * But as connections may be initiated simultaneously
 408         * by both hosts, we have a random backoff mechanism -
 409         * see the comment above rds_queue_reconnect()
 410         */
 411        mutex_lock(&conn->c_cm_lock);
 412        if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
 413                if (rds_conn_state(conn) == RDS_CONN_UP) {
 414                        rdsdebug("incoming connect while connecting\n");
 415                        rds_conn_drop(conn);
 416                        rds_iw_stats_inc(s_iw_listen_closed_stale);
 417                } else
 418                if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
 419                        /* Wait and see - our connect may still be succeeding */
 420                        rds_iw_stats_inc(s_iw_connect_raced);
 421                }
 422                mutex_unlock(&conn->c_cm_lock);
 423                goto out;
 424        }
 425
 426        ic = conn->c_transport_data;
 427
 428        rds_iw_set_protocol(conn, version);
 429        rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
 430
 431        /* If the peer gave us the last packet it saw, process this as if
 432         * we had received a regular ACK. */
 433        if (dp->dp_ack_seq)
 434                rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
 435
 436        BUG_ON(cm_id->context);
 437        BUG_ON(ic->i_cm_id);
 438
 439        ic->i_cm_id = cm_id;
 440        cm_id->context = conn;
 441
 442        rds_iwdev = ib_get_client_data(cm_id->device, &rds_iw_client);
 443        ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
 444
 445        /* We got halfway through setting up the ib_connection, if we
 446         * fail now, we have to take the long route out of this mess. */
 447        destroy = 0;
 448
 449        err = rds_iw_setup_qp(conn);
 450        if (err) {
 451                rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
 452                goto out;
 453        }
 454
 455        rds_iw_cm_fill_conn_param(conn, &conn_param, &dp_rep, version);
 456
 457        /* rdma_accept() calls rdma_reject() internally if it fails */
 458        err = rdma_accept(cm_id, &conn_param);
 459        mutex_unlock(&conn->c_cm_lock);
 460        if (err) {
 461                rds_iw_conn_error(conn, "rdma_accept failed (%d)\n", err);
 462                goto out;
 463        }
 464
 465        return 0;
 466
 467out:
 468        rdma_reject(cm_id, NULL, 0);
 469        return destroy;
 470}
 471
 472
 473int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id)
 474{
 475        struct rds_connection *conn = cm_id->context;
 476        struct rds_iw_connection *ic = conn->c_transport_data;
 477        struct rdma_conn_param conn_param;
 478        struct rds_iw_connect_private dp;
 479        int ret;
 480
 481        /* If the peer doesn't do protocol negotiation, we must
 482         * default to RDSv3.0 */
 483        rds_iw_set_protocol(conn, RDS_PROTOCOL_3_0);
 484        ic->i_flowctl = rds_iw_sysctl_flow_control;     /* advertise flow control */
 485
 486        ret = rds_iw_setup_qp(conn);
 487        if (ret) {
 488                rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", ret);
 489                goto out;
 490        }
 491
 492        rds_iw_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION);
 493
 494        ret = rdma_connect(cm_id, &conn_param);
 495        if (ret)
 496                rds_iw_conn_error(conn, "rdma_connect failed (%d)\n", ret);
 497
 498out:
 499        /* Beware - returning non-zero tells the rdma_cm to destroy
 500         * the cm_id. We should certainly not do it as long as we still
 501         * "own" the cm_id. */
 502        if (ret) {
 503                struct rds_iw_connection *ic = conn->c_transport_data;
 504
 505                if (ic->i_cm_id == cm_id)
 506                        ret = 0;
 507        }
 508        return ret;
 509}
 510
 511int rds_iw_conn_connect(struct rds_connection *conn)
 512{
 513        struct rds_iw_connection *ic = conn->c_transport_data;
 514        struct rds_iw_device *rds_iwdev;
 515        struct sockaddr_in src, dest;
 516        int ret;
 517
 518        /* XXX I wonder what affect the port space has */
 519        /* delegate cm event handler to rdma_transport */
 520        ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
 521                                     RDMA_PS_TCP);
 522        if (IS_ERR(ic->i_cm_id)) {
 523                ret = PTR_ERR(ic->i_cm_id);
 524                ic->i_cm_id = NULL;
 525                rdsdebug("rdma_create_id() failed: %d\n", ret);
 526                goto out;
 527        }
 528
 529        rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
 530
 531        src.sin_family = AF_INET;
 532        src.sin_addr.s_addr = (__force u32)conn->c_laddr;
 533        src.sin_port = (__force u16)htons(0);
 534
 535        /* First, bind to the local address and device. */
 536        ret = rdma_bind_addr(ic->i_cm_id, (struct sockaddr *) &src);
 537        if (ret) {
 538                rdsdebug("rdma_bind_addr(%pI4) failed: %d\n",
 539                                &conn->c_laddr, ret);
 540                rdma_destroy_id(ic->i_cm_id);
 541                ic->i_cm_id = NULL;
 542                goto out;
 543        }
 544
 545        rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
 546        ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
 547
 548        dest.sin_family = AF_INET;
 549        dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
 550        dest.sin_port = (__force u16)htons(RDS_PORT);
 551
 552        ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
 553                                (struct sockaddr *)&dest,
 554                                RDS_RDMA_RESOLVE_TIMEOUT_MS);
 555        if (ret) {
 556                rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
 557                         ret);
 558                rdma_destroy_id(ic->i_cm_id);
 559                ic->i_cm_id = NULL;
 560        }
 561
 562out:
 563        return ret;
 564}
 565
 566/*
 567 * This is so careful about only cleaning up resources that were built up
 568 * so that it can be called at any point during startup.  In fact it
 569 * can be called multiple times for a given connection.
 570 */
 571void rds_iw_conn_shutdown(struct rds_connection *conn)
 572{
 573        struct rds_iw_connection *ic = conn->c_transport_data;
 574        int err = 0;
 575        struct ib_qp_attr qp_attr;
 576
 577        rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
 578                 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
 579                 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
 580
 581        if (ic->i_cm_id) {
 582                struct ib_device *dev = ic->i_cm_id->device;
 583
 584                rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
 585                err = rdma_disconnect(ic->i_cm_id);
 586                if (err) {
 587                        /* Actually this may happen quite frequently, when
 588                         * an outgoing connect raced with an incoming connect.
 589                         */
 590                        rdsdebug("rds_iw_conn_shutdown: failed to disconnect,"
 591                                   " cm: %p err %d\n", ic->i_cm_id, err);
 592                }
 593
 594                if (ic->i_cm_id->qp) {
 595                        qp_attr.qp_state = IB_QPS_ERR;
 596                        ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
 597                }
 598
 599                wait_event(rds_iw_ring_empty_wait,
 600                        rds_iw_ring_empty(&ic->i_send_ring) &&
 601                        rds_iw_ring_empty(&ic->i_recv_ring));
 602
 603                if (ic->i_send_hdrs)
 604                        ib_dma_free_coherent(dev,
 605                                           ic->i_send_ring.w_nr *
 606                                                sizeof(struct rds_header),
 607                                           ic->i_send_hdrs,
 608                                           ic->i_send_hdrs_dma);
 609
 610                if (ic->i_recv_hdrs)
 611                        ib_dma_free_coherent(dev,
 612                                           ic->i_recv_ring.w_nr *
 613                                                sizeof(struct rds_header),
 614                                           ic->i_recv_hdrs,
 615                                           ic->i_recv_hdrs_dma);
 616
 617                if (ic->i_ack)
 618                        ib_dma_free_coherent(dev, sizeof(struct rds_header),
 619                                             ic->i_ack, ic->i_ack_dma);
 620
 621                if (ic->i_sends)
 622                        rds_iw_send_clear_ring(ic);
 623                if (ic->i_recvs)
 624                        rds_iw_recv_clear_ring(ic);
 625
 626                if (ic->i_cm_id->qp)
 627                        rdma_destroy_qp(ic->i_cm_id);
 628                if (ic->i_send_cq)
 629                        ib_destroy_cq(ic->i_send_cq);
 630                if (ic->i_recv_cq)
 631                        ib_destroy_cq(ic->i_recv_cq);
 632
 633                /*
 634                 * If associated with an rds_iw_device:
 635                 *      Move connection back to the nodev list.
 636                 *      Remove cm_id from the device cm_id list.
 637                 */
 638                if (ic->rds_iwdev)
 639                        rds_iw_remove_conn(ic->rds_iwdev, conn);
 640
 641                rdma_destroy_id(ic->i_cm_id);
 642
 643                ic->i_cm_id = NULL;
 644                ic->i_pd = NULL;
 645                ic->i_mr = NULL;
 646                ic->i_send_cq = NULL;
 647                ic->i_recv_cq = NULL;
 648                ic->i_send_hdrs = NULL;
 649                ic->i_recv_hdrs = NULL;
 650                ic->i_ack = NULL;
 651        }
 652        BUG_ON(ic->rds_iwdev);
 653
 654        /* Clear pending transmit */
 655        if (ic->i_rm) {
 656                rds_message_put(ic->i_rm);
 657                ic->i_rm = NULL;
 658        }
 659
 660        /* Clear the ACK state */
 661        clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 662#ifdef KERNEL_HAS_ATOMIC64
 663        atomic64_set(&ic->i_ack_next, 0);
 664#else
 665        ic->i_ack_next = 0;
 666#endif
 667        ic->i_ack_recv = 0;
 668
 669        /* Clear flow control state */
 670        ic->i_flowctl = 0;
 671        atomic_set(&ic->i_credits, 0);
 672
 673        rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
 674        rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
 675
 676        if (ic->i_iwinc) {
 677                rds_inc_put(&ic->i_iwinc->ii_inc);
 678                ic->i_iwinc = NULL;
 679        }
 680
 681        vfree(ic->i_sends);
 682        ic->i_sends = NULL;
 683        vfree(ic->i_recvs);
 684        ic->i_recvs = NULL;
 685        rdsdebug("shutdown complete\n");
 686}
 687
 688int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 689{
 690        struct rds_iw_connection *ic;
 691        unsigned long flags;
 692
 693        /* XXX too lazy? */
 694        ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
 695        if (ic == NULL)
 696                return -ENOMEM;
 697
 698        INIT_LIST_HEAD(&ic->iw_node);
 699        mutex_init(&ic->i_recv_mutex);
 700#ifndef KERNEL_HAS_ATOMIC64
 701        spin_lock_init(&ic->i_ack_lock);
 702#endif
 703
 704        /*
 705         * rds_iw_conn_shutdown() waits for these to be emptied so they
 706         * must be initialized before it can be called.
 707         */
 708        rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
 709        rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
 710
 711        ic->conn = conn;
 712        conn->c_transport_data = ic;
 713
 714        spin_lock_irqsave(&iw_nodev_conns_lock, flags);
 715        list_add_tail(&ic->iw_node, &iw_nodev_conns);
 716        spin_unlock_irqrestore(&iw_nodev_conns_lock, flags);
 717
 718
 719        rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
 720        return 0;
 721}
 722
 723/*
 724 * Free a connection. Connection must be shut down and not set for reconnect.
 725 */
 726void rds_iw_conn_free(void *arg)
 727{
 728        struct rds_iw_connection *ic = arg;
 729        spinlock_t      *lock_ptr;
 730
 731        rdsdebug("ic %p\n", ic);
 732
 733        /*
 734         * Conn is either on a dev's list or on the nodev list.
 735         * A race with shutdown() or connect() would cause problems
 736         * (since rds_iwdev would change) but that should never happen.
 737         */
 738        lock_ptr = ic->rds_iwdev ? &ic->rds_iwdev->spinlock : &iw_nodev_conns_lock;
 739
 740        spin_lock_irq(lock_ptr);
 741        list_del(&ic->iw_node);
 742        spin_unlock_irq(lock_ptr);
 743
 744        kfree(ic);
 745}
 746
 747/*
 748 * An error occurred on the connection
 749 */
 750void
 751__rds_iw_conn_error(struct rds_connection *conn, const char *fmt, ...)
 752{
 753        va_list ap;
 754
 755        rds_conn_drop(conn);
 756
 757        va_start(ap, fmt);
 758        vprintk(fmt, ap);
 759        va_end(ap);
 760}
 761