linux/drivers/infiniband/core/iwcm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   4 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
   5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
   7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
   8 *
   9 * This software is available to you under a choice of one of two
  10 * licenses.  You may choose to be licensed under the terms of the GNU
  11 * General Public License (GPL) Version 2, available from the file
  12 * COPYING in the main directory of this source tree, or the
  13 * OpenIB.org BSD license below:
  14 *
  15 *     Redistribution and use in source and binary forms, with or
  16 *     without modification, are permitted provided that the following
  17 *     conditions are met:
  18 *
  19 *      - Redistributions of source code must retain the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer.
  22 *
  23 *      - Redistributions in binary form must reproduce the above
  24 *        copyright notice, this list of conditions and the following
  25 *        disclaimer in the documentation and/or other materials
  26 *        provided with the distribution.
  27 *
  28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  35 * SOFTWARE.
  36 *
  37 */
  38#include <linux/dma-mapping.h>
  39#include <linux/err.h>
  40#include <linux/idr.h>
  41#include <linux/interrupt.h>
  42#include <linux/rbtree.h>
  43#include <linux/sched.h>
  44#include <linux/spinlock.h>
  45#include <linux/workqueue.h>
  46#include <linux/completion.h>
  47#include <linux/slab.h>
  48#include <linux/module.h>
  49#include <linux/sysctl.h>
  50
  51#include <rdma/iw_cm.h>
  52#include <rdma/ib_addr.h>
  53#include <rdma/iw_portmap.h>
  54#include <rdma/rdma_netlink.h>
  55
  56#include "iwcm.h"
  57
  58MODULE_AUTHOR("Tom Tucker");
  59MODULE_DESCRIPTION("iWARP CM");
  60MODULE_LICENSE("Dual BSD/GPL");
  61
  62static const char * const iwcm_rej_reason_strs[] = {
  63        [ECONNRESET]                    = "reset by remote host",
  64        [ECONNREFUSED]                  = "refused by remote application",
  65        [ETIMEDOUT]                     = "setup timeout",
  66};
  67
  68const char *__attribute_const__ iwcm_reject_msg(int reason)
  69{
  70        size_t index;
  71
  72        /* iWARP uses negative errnos */
  73        index = -reason;
  74
  75        if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
  76            iwcm_rej_reason_strs[index])
  77                return iwcm_rej_reason_strs[index];
  78        else
  79                return "unrecognized reason";
  80}
  81EXPORT_SYMBOL(iwcm_reject_msg);
  82
  83static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
  84        [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
  85        [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
  86        [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
  87        [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
  88        [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
  89        [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
  90        [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
  91        [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
  92};
  93
  94static struct workqueue_struct *iwcm_wq;
  95struct iwcm_work {
  96        struct work_struct work;
  97        struct iwcm_id_private *cm_id;
  98        struct list_head list;
  99        struct iw_cm_event event;
 100        struct list_head free_list;
 101};
 102
 103static unsigned int default_backlog = 256;
 104
 105static struct ctl_table_header *iwcm_ctl_table_hdr;
 106static struct ctl_table iwcm_ctl_table[] = {
 107        {
 108                .procname       = "default_backlog",
 109                .data           = &default_backlog,
 110                .maxlen         = sizeof(default_backlog),
 111                .mode           = 0644,
 112                .proc_handler   = proc_dointvec,
 113        },
 114        { }
 115};
 116
 117/*
 118 * The following services provide a mechanism for pre-allocating iwcm_work
 119 * elements.  The design pre-allocates them  based on the cm_id type:
 120 *      LISTENING IDS:  Get enough elements preallocated to handle the
 121 *                      listen backlog.
 122 *      ACTIVE IDS:     4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
 123 *      PASSIVE IDS:    3: ESTABLISHED, DISCONNECT, CLOSE
 124 *
 125 * Allocating them in connect and listen avoids having to deal
 126 * with allocation failures on the event upcall from the provider (which
 127 * is called in the interrupt context).
 128 *
 129 * One exception is when creating the cm_id for incoming connection requests.
 130 * There are two cases:
 131 * 1) in the event upcall, cm_event_handler(), for a listening cm_id.  If
 132 *    the backlog is exceeded, then no more connection request events will
 133 *    be processed.  cm_event_handler() returns -ENOMEM in this case.  Its up
 134 *    to the provider to reject the connection request.
 135 * 2) in the connection request workqueue handler, cm_conn_req_handler().
 136 *    If work elements cannot be allocated for the new connect request cm_id,
 137 *    then IWCM will call the provider reject method.  This is ok since
 138 *    cm_conn_req_handler() runs in the workqueue thread context.
 139 */
 140
 141static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
 142{
 143        struct iwcm_work *work;
 144
 145        if (list_empty(&cm_id_priv->work_free_list))
 146                return NULL;
 147        work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
 148                          free_list);
 149        list_del_init(&work->free_list);
 150        return work;
 151}
 152
 153static void put_work(struct iwcm_work *work)
 154{
 155        list_add(&work->free_list, &work->cm_id->work_free_list);
 156}
 157
 158static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
 159{
 160        struct list_head *e, *tmp;
 161
 162        list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
 163                kfree(list_entry(e, struct iwcm_work, free_list));
 164}
 165
 166static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
 167{
 168        struct iwcm_work *work;
 169
 170        BUG_ON(!list_empty(&cm_id_priv->work_free_list));
 171        while (count--) {
 172                work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
 173                if (!work) {
 174                        dealloc_work_entries(cm_id_priv);
 175                        return -ENOMEM;
 176                }
 177                work->cm_id = cm_id_priv;
 178                INIT_LIST_HEAD(&work->list);
 179                put_work(work);
 180        }
 181        return 0;
 182}
 183
 184/*
 185 * Save private data from incoming connection requests to
 186 * iw_cm_event, so the low level driver doesn't have to. Adjust
 187 * the event ptr to point to the local copy.
 188 */
 189static int copy_private_data(struct iw_cm_event *event)
 190{
 191        void *p;
 192
 193        p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
 194        if (!p)
 195                return -ENOMEM;
 196        event->private_data = p;
 197        return 0;
 198}
 199
 200static void free_cm_id(struct iwcm_id_private *cm_id_priv)
 201{
 202        dealloc_work_entries(cm_id_priv);
 203        kfree(cm_id_priv);
 204}
 205
 206/*
 207 * Release a reference on cm_id. If the last reference is being
 208 * released, free the cm_id and return 1.
 209 */
 210static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
 211{
 212        BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
 213        if (atomic_dec_and_test(&cm_id_priv->refcount)) {
 214                BUG_ON(!list_empty(&cm_id_priv->work_list));
 215                free_cm_id(cm_id_priv);
 216                return 1;
 217        }
 218
 219        return 0;
 220}
 221
 222static void add_ref(struct iw_cm_id *cm_id)
 223{
 224        struct iwcm_id_private *cm_id_priv;
 225        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 226        atomic_inc(&cm_id_priv->refcount);
 227}
 228
 229static void rem_ref(struct iw_cm_id *cm_id)
 230{
 231        struct iwcm_id_private *cm_id_priv;
 232
 233        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 234
 235        (void)iwcm_deref_id(cm_id_priv);
 236}
 237
 238static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
 239
 240struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
 241                                 iw_cm_handler cm_handler,
 242                                 void *context)
 243{
 244        struct iwcm_id_private *cm_id_priv;
 245
 246        cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
 247        if (!cm_id_priv)
 248                return ERR_PTR(-ENOMEM);
 249
 250        cm_id_priv->state = IW_CM_STATE_IDLE;
 251        cm_id_priv->id.device = device;
 252        cm_id_priv->id.cm_handler = cm_handler;
 253        cm_id_priv->id.context = context;
 254        cm_id_priv->id.event_handler = cm_event_handler;
 255        cm_id_priv->id.add_ref = add_ref;
 256        cm_id_priv->id.rem_ref = rem_ref;
 257        spin_lock_init(&cm_id_priv->lock);
 258        atomic_set(&cm_id_priv->refcount, 1);
 259        init_waitqueue_head(&cm_id_priv->connect_wait);
 260        init_completion(&cm_id_priv->destroy_comp);
 261        INIT_LIST_HEAD(&cm_id_priv->work_list);
 262        INIT_LIST_HEAD(&cm_id_priv->work_free_list);
 263
 264        return &cm_id_priv->id;
 265}
 266EXPORT_SYMBOL(iw_create_cm_id);
 267
 268
 269static int iwcm_modify_qp_err(struct ib_qp *qp)
 270{
 271        struct ib_qp_attr qp_attr;
 272
 273        if (!qp)
 274                return -EINVAL;
 275
 276        qp_attr.qp_state = IB_QPS_ERR;
 277        return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
 278}
 279
 280/*
 281 * This is really the RDMAC CLOSING state. It is most similar to the
 282 * IB SQD QP state.
 283 */
 284static int iwcm_modify_qp_sqd(struct ib_qp *qp)
 285{
 286        struct ib_qp_attr qp_attr;
 287
 288        BUG_ON(qp == NULL);
 289        qp_attr.qp_state = IB_QPS_SQD;
 290        return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
 291}
 292
 293/*
 294 * CM_ID <-- CLOSING
 295 *
 296 * Block if a passive or active connection is currently being processed. Then
 297 * process the event as follows:
 298 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
 299 *   based on the abrupt flag
 300 * - If the connection is already in the CLOSING or IDLE state, the peer is
 301 *   disconnecting concurrently with us and we've already seen the
 302 *   DISCONNECT event -- ignore the request and return 0
 303 * - Disconnect on a listening endpoint returns -EINVAL
 304 */
 305int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
 306{
 307        struct iwcm_id_private *cm_id_priv;
 308        unsigned long flags;
 309        int ret = 0;
 310        struct ib_qp *qp = NULL;
 311
 312        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 313        /* Wait if we're currently in a connect or accept downcall */
 314        wait_event(cm_id_priv->connect_wait,
 315                   !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
 316
 317        spin_lock_irqsave(&cm_id_priv->lock, flags);
 318        switch (cm_id_priv->state) {
 319        case IW_CM_STATE_ESTABLISHED:
 320                cm_id_priv->state = IW_CM_STATE_CLOSING;
 321
 322                /* QP could be <nul> for user-mode client */
 323                if (cm_id_priv->qp)
 324                        qp = cm_id_priv->qp;
 325                else
 326                        ret = -EINVAL;
 327                break;
 328        case IW_CM_STATE_LISTEN:
 329                ret = -EINVAL;
 330                break;
 331        case IW_CM_STATE_CLOSING:
 332                /* remote peer closed first */
 333        case IW_CM_STATE_IDLE:
 334                /* accept or connect returned !0 */
 335                break;
 336        case IW_CM_STATE_CONN_RECV:
 337                /*
 338                 * App called disconnect before/without calling accept after
 339                 * connect_request event delivered.
 340                 */
 341                break;
 342        case IW_CM_STATE_CONN_SENT:
 343                /* Can only get here if wait above fails */
 344        default:
 345                BUG();
 346        }
 347        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 348
 349        if (qp) {
 350                if (abrupt)
 351                        ret = iwcm_modify_qp_err(qp);
 352                else
 353                        ret = iwcm_modify_qp_sqd(qp);
 354
 355                /*
 356                 * If both sides are disconnecting the QP could
 357                 * already be in ERR or SQD states
 358                 */
 359                ret = 0;
 360        }
 361
 362        return ret;
 363}
 364EXPORT_SYMBOL(iw_cm_disconnect);
 365
 366/*
 367 * CM_ID <-- DESTROYING
 368 *
 369 * Clean up all resources associated with the connection and release
 370 * the initial reference taken by iw_create_cm_id.
 371 */
 372static void destroy_cm_id(struct iw_cm_id *cm_id)
 373{
 374        struct iwcm_id_private *cm_id_priv;
 375        unsigned long flags;
 376
 377        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 378        /*
 379         * Wait if we're currently in a connect or accept downcall. A
 380         * listening endpoint should never block here.
 381         */
 382        wait_event(cm_id_priv->connect_wait,
 383                   !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
 384
 385        /*
 386         * Since we're deleting the cm_id, drop any events that
 387         * might arrive before the last dereference.
 388         */
 389        set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
 390
 391        spin_lock_irqsave(&cm_id_priv->lock, flags);
 392        switch (cm_id_priv->state) {
 393        case IW_CM_STATE_LISTEN:
 394                cm_id_priv->state = IW_CM_STATE_DESTROYING;
 395                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 396                /* destroy the listening endpoint */
 397                cm_id->device->ops.iw_destroy_listen(cm_id);
 398                spin_lock_irqsave(&cm_id_priv->lock, flags);
 399                break;
 400        case IW_CM_STATE_ESTABLISHED:
 401                cm_id_priv->state = IW_CM_STATE_DESTROYING;
 402                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 403                /* Abrupt close of the connection */
 404                (void)iwcm_modify_qp_err(cm_id_priv->qp);
 405                spin_lock_irqsave(&cm_id_priv->lock, flags);
 406                break;
 407        case IW_CM_STATE_IDLE:
 408        case IW_CM_STATE_CLOSING:
 409                cm_id_priv->state = IW_CM_STATE_DESTROYING;
 410                break;
 411        case IW_CM_STATE_CONN_RECV:
 412                /*
 413                 * App called destroy before/without calling accept after
 414                 * receiving connection request event notification or
 415                 * returned non zero from the event callback function.
 416                 * In either case, must tell the provider to reject.
 417                 */
 418                cm_id_priv->state = IW_CM_STATE_DESTROYING;
 419                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 420                cm_id->device->ops.iw_reject(cm_id, NULL, 0);
 421                spin_lock_irqsave(&cm_id_priv->lock, flags);
 422                break;
 423        case IW_CM_STATE_CONN_SENT:
 424        case IW_CM_STATE_DESTROYING:
 425        default:
 426                BUG();
 427                break;
 428        }
 429        if (cm_id_priv->qp) {
 430                cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
 431                cm_id_priv->qp = NULL;
 432        }
 433        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 434
 435        if (cm_id->mapped) {
 436                iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
 437                iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
 438        }
 439
 440        (void)iwcm_deref_id(cm_id_priv);
 441}
 442
 443/*
 444 * This function is only called by the application thread and cannot
 445 * be called by the event thread. The function will wait for all
 446 * references to be released on the cm_id and then kfree the cm_id
 447 * object.
 448 */
 449void iw_destroy_cm_id(struct iw_cm_id *cm_id)
 450{
 451        destroy_cm_id(cm_id);
 452}
 453EXPORT_SYMBOL(iw_destroy_cm_id);
 454
 455/**
 456 * iw_cm_check_wildcard - If IP address is 0 then use original
 457 * @pm_addr: sockaddr containing the ip to check for wildcard
 458 * @cm_addr: sockaddr containing the actual IP address
 459 * @cm_outaddr: sockaddr to set IP addr which leaving port
 460 *
 461 *  Checks the pm_addr for wildcard and then sets cm_outaddr's
 462 *  IP to the actual (cm_addr).
 463 */
 464static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
 465                                 struct sockaddr_storage *cm_addr,
 466                                 struct sockaddr_storage *cm_outaddr)
 467{
 468        if (pm_addr->ss_family == AF_INET) {
 469                struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
 470
 471                if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
 472                        struct sockaddr_in *cm4_addr =
 473                                (struct sockaddr_in *)cm_addr;
 474                        struct sockaddr_in *cm4_outaddr =
 475                                (struct sockaddr_in *)cm_outaddr;
 476
 477                        cm4_outaddr->sin_addr = cm4_addr->sin_addr;
 478                }
 479        } else {
 480                struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
 481
 482                if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
 483                        struct sockaddr_in6 *cm6_addr =
 484                                (struct sockaddr_in6 *)cm_addr;
 485                        struct sockaddr_in6 *cm6_outaddr =
 486                                (struct sockaddr_in6 *)cm_outaddr;
 487
 488                        cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
 489                }
 490        }
 491}
 492
 493/**
 494 * iw_cm_map - Use portmapper to map the ports
 495 * @cm_id: connection manager pointer
 496 * @active: Indicates the active side when true
 497 * returns nonzero for error only if iwpm_create_mapinfo() fails
 498 *
 499 * Tries to add a mapping for a port using the Portmapper. If
 500 * successful in mapping the IP/Port it will check the remote
 501 * mapped IP address for a wildcard IP address and replace the
 502 * zero IP address with the remote_addr.
 503 */
 504static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
 505{
 506        const char *devname = dev_name(&cm_id->device->dev);
 507        const char *ifname = cm_id->device->iw_ifname;
 508        struct iwpm_dev_data pm_reg_msg = {};
 509        struct iwpm_sa_data pm_msg;
 510        int status;
 511
 512        if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
 513            strlen(ifname) >= sizeof(pm_reg_msg.if_name))
 514                return -EINVAL;
 515
 516        cm_id->m_local_addr = cm_id->local_addr;
 517        cm_id->m_remote_addr = cm_id->remote_addr;
 518
 519        strcpy(pm_reg_msg.dev_name, devname);
 520        strcpy(pm_reg_msg.if_name, ifname);
 521
 522        if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
 523            !iwpm_valid_pid())
 524                return 0;
 525
 526        cm_id->mapped = true;
 527        pm_msg.loc_addr = cm_id->local_addr;
 528        pm_msg.rem_addr = cm_id->remote_addr;
 529        pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
 530                       IWPM_FLAGS_NO_PORT_MAP : 0;
 531        if (active)
 532                status = iwpm_add_and_query_mapping(&pm_msg,
 533                                                    RDMA_NL_IWCM);
 534        else
 535                status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
 536
 537        if (!status) {
 538                cm_id->m_local_addr = pm_msg.mapped_loc_addr;
 539                if (active) {
 540                        cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
 541                        iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
 542                                             &cm_id->remote_addr,
 543                                             &cm_id->m_remote_addr);
 544                }
 545        }
 546
 547        return iwpm_create_mapinfo(&cm_id->local_addr,
 548                                   &cm_id->m_local_addr,
 549                                   RDMA_NL_IWCM, pm_msg.flags);
 550}
 551
 552/*
 553 * CM_ID <-- LISTEN
 554 *
 555 * Start listening for connect requests. Generates one CONNECT_REQUEST
 556 * event for each inbound connect request.
 557 */
 558int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
 559{
 560        struct iwcm_id_private *cm_id_priv;
 561        unsigned long flags;
 562        int ret;
 563
 564        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 565
 566        if (!backlog)
 567                backlog = default_backlog;
 568
 569        ret = alloc_work_entries(cm_id_priv, backlog);
 570        if (ret)
 571                return ret;
 572
 573        spin_lock_irqsave(&cm_id_priv->lock, flags);
 574        switch (cm_id_priv->state) {
 575        case IW_CM_STATE_IDLE:
 576                cm_id_priv->state = IW_CM_STATE_LISTEN;
 577                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 578                ret = iw_cm_map(cm_id, false);
 579                if (!ret)
 580                        ret = cm_id->device->ops.iw_create_listen(cm_id,
 581                                                                  backlog);
 582                if (ret)
 583                        cm_id_priv->state = IW_CM_STATE_IDLE;
 584                spin_lock_irqsave(&cm_id_priv->lock, flags);
 585                break;
 586        default:
 587                ret = -EINVAL;
 588        }
 589        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 590
 591        return ret;
 592}
 593EXPORT_SYMBOL(iw_cm_listen);
 594
 595/*
 596 * CM_ID <-- IDLE
 597 *
 598 * Rejects an inbound connection request. No events are generated.
 599 */
 600int iw_cm_reject(struct iw_cm_id *cm_id,
 601                 const void *private_data,
 602                 u8 private_data_len)
 603{
 604        struct iwcm_id_private *cm_id_priv;
 605        unsigned long flags;
 606        int ret;
 607
 608        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 609        set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 610
 611        spin_lock_irqsave(&cm_id_priv->lock, flags);
 612        if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
 613                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 614                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 615                wake_up_all(&cm_id_priv->connect_wait);
 616                return -EINVAL;
 617        }
 618        cm_id_priv->state = IW_CM_STATE_IDLE;
 619        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 620
 621        ret = cm_id->device->ops.iw_reject(cm_id, private_data,
 622                                          private_data_len);
 623
 624        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 625        wake_up_all(&cm_id_priv->connect_wait);
 626
 627        return ret;
 628}
 629EXPORT_SYMBOL(iw_cm_reject);
 630
 631/*
 632 * CM_ID <-- ESTABLISHED
 633 *
 634 * Accepts an inbound connection request and generates an ESTABLISHED
 635 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
 636 * until the ESTABLISHED event is received from the provider.
 637 */
 638int iw_cm_accept(struct iw_cm_id *cm_id,
 639                 struct iw_cm_conn_param *iw_param)
 640{
 641        struct iwcm_id_private *cm_id_priv;
 642        struct ib_qp *qp;
 643        unsigned long flags;
 644        int ret;
 645
 646        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 647        set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 648
 649        spin_lock_irqsave(&cm_id_priv->lock, flags);
 650        if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
 651                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 652                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 653                wake_up_all(&cm_id_priv->connect_wait);
 654                return -EINVAL;
 655        }
 656        /* Get the ib_qp given the QPN */
 657        qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
 658        if (!qp) {
 659                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 660                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 661                wake_up_all(&cm_id_priv->connect_wait);
 662                return -EINVAL;
 663        }
 664        cm_id->device->ops.iw_add_ref(qp);
 665        cm_id_priv->qp = qp;
 666        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 667
 668        ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
 669        if (ret) {
 670                /* An error on accept precludes provider events */
 671                BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
 672                cm_id_priv->state = IW_CM_STATE_IDLE;
 673                spin_lock_irqsave(&cm_id_priv->lock, flags);
 674                if (cm_id_priv->qp) {
 675                        cm_id->device->ops.iw_rem_ref(qp);
 676                        cm_id_priv->qp = NULL;
 677                }
 678                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 679                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 680                wake_up_all(&cm_id_priv->connect_wait);
 681        }
 682
 683        return ret;
 684}
 685EXPORT_SYMBOL(iw_cm_accept);
 686
 687/*
 688 * Active Side: CM_ID <-- CONN_SENT
 689 *
 690 * If successful, results in the generation of a CONNECT_REPLY
 691 * event. iw_cm_disconnect and iw_cm_destroy will block until the
 692 * CONNECT_REPLY event is received from the provider.
 693 */
 694int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
 695{
 696        struct iwcm_id_private *cm_id_priv;
 697        int ret;
 698        unsigned long flags;
 699        struct ib_qp *qp;
 700
 701        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 702
 703        ret = alloc_work_entries(cm_id_priv, 4);
 704        if (ret)
 705                return ret;
 706
 707        set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 708        spin_lock_irqsave(&cm_id_priv->lock, flags);
 709
 710        if (cm_id_priv->state != IW_CM_STATE_IDLE) {
 711                ret = -EINVAL;
 712                goto err;
 713        }
 714
 715        /* Get the ib_qp given the QPN */
 716        qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
 717        if (!qp) {
 718                ret = -EINVAL;
 719                goto err;
 720        }
 721        cm_id->device->ops.iw_add_ref(qp);
 722        cm_id_priv->qp = qp;
 723        cm_id_priv->state = IW_CM_STATE_CONN_SENT;
 724        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 725
 726        ret = iw_cm_map(cm_id, true);
 727        if (!ret)
 728                ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
 729        if (!ret)
 730                return 0;       /* success */
 731
 732        spin_lock_irqsave(&cm_id_priv->lock, flags);
 733        if (cm_id_priv->qp) {
 734                cm_id->device->ops.iw_rem_ref(qp);
 735                cm_id_priv->qp = NULL;
 736        }
 737        cm_id_priv->state = IW_CM_STATE_IDLE;
 738err:
 739        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 740        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 741        wake_up_all(&cm_id_priv->connect_wait);
 742        return ret;
 743}
 744EXPORT_SYMBOL(iw_cm_connect);
 745
 746/*
 747 * Passive Side: new CM_ID <-- CONN_RECV
 748 *
 749 * Handles an inbound connect request. The function creates a new
 750 * iw_cm_id to represent the new connection and inherits the client
 751 * callback function and other attributes from the listening parent.
 752 *
 753 * The work item contains a pointer to the listen_cm_id and the event. The
 754 * listen_cm_id contains the client cm_handler, context and
 755 * device. These are copied when the device is cloned. The event
 756 * contains the new four tuple.
 757 *
 758 * An error on the child should not affect the parent, so this
 759 * function does not return a value.
 760 */
 761static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
 762                                struct iw_cm_event *iw_event)
 763{
 764        unsigned long flags;
 765        struct iw_cm_id *cm_id;
 766        struct iwcm_id_private *cm_id_priv;
 767        int ret;
 768
 769        /*
 770         * The provider should never generate a connection request
 771         * event with a bad status.
 772         */
 773        BUG_ON(iw_event->status);
 774
 775        cm_id = iw_create_cm_id(listen_id_priv->id.device,
 776                                listen_id_priv->id.cm_handler,
 777                                listen_id_priv->id.context);
 778        /* If the cm_id could not be created, ignore the request */
 779        if (IS_ERR(cm_id))
 780                goto out;
 781
 782        cm_id->provider_data = iw_event->provider_data;
 783        cm_id->m_local_addr = iw_event->local_addr;
 784        cm_id->m_remote_addr = iw_event->remote_addr;
 785        cm_id->local_addr = listen_id_priv->id.local_addr;
 786
 787        ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
 788                                   &iw_event->remote_addr,
 789                                   &cm_id->remote_addr,
 790                                   RDMA_NL_IWCM);
 791        if (ret) {
 792                cm_id->remote_addr = iw_event->remote_addr;
 793        } else {
 794                iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
 795                                     &iw_event->local_addr,
 796                                     &cm_id->local_addr);
 797                iw_event->local_addr = cm_id->local_addr;
 798                iw_event->remote_addr = cm_id->remote_addr;
 799        }
 800
 801        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 802        cm_id_priv->state = IW_CM_STATE_CONN_RECV;
 803
 804        /*
 805         * We could be destroying the listening id. If so, ignore this
 806         * upcall.
 807         */
 808        spin_lock_irqsave(&listen_id_priv->lock, flags);
 809        if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
 810                spin_unlock_irqrestore(&listen_id_priv->lock, flags);
 811                iw_cm_reject(cm_id, NULL, 0);
 812                iw_destroy_cm_id(cm_id);
 813                goto out;
 814        }
 815        spin_unlock_irqrestore(&listen_id_priv->lock, flags);
 816
 817        ret = alloc_work_entries(cm_id_priv, 3);
 818        if (ret) {
 819                iw_cm_reject(cm_id, NULL, 0);
 820                iw_destroy_cm_id(cm_id);
 821                goto out;
 822        }
 823
 824        /* Call the client CM handler */
 825        ret = cm_id->cm_handler(cm_id, iw_event);
 826        if (ret) {
 827                iw_cm_reject(cm_id, NULL, 0);
 828                iw_destroy_cm_id(cm_id);
 829        }
 830
 831out:
 832        if (iw_event->private_data_len)
 833                kfree(iw_event->private_data);
 834}
 835
 836/*
 837 * Passive Side: CM_ID <-- ESTABLISHED
 838 *
 839 * The provider generated an ESTABLISHED event which means that
 840 * the MPA negotion has completed successfully and we are now in MPA
 841 * FPDU mode.
 842 *
 843 * This event can only be received in the CONN_RECV state. If the
 844 * remote peer closed, the ESTABLISHED event would be received followed
 845 * by the CLOSE event. If the app closes, it will block until we wake
 846 * it up after processing this event.
 847 */
 848static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
 849                               struct iw_cm_event *iw_event)
 850{
 851        unsigned long flags;
 852        int ret;
 853
 854        spin_lock_irqsave(&cm_id_priv->lock, flags);
 855
 856        /*
 857         * We clear the CONNECT_WAIT bit here to allow the callback
 858         * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
 859         * from a callback handler is not allowed.
 860         */
 861        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 862        BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
 863        cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
 864        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 865        ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 866        wake_up_all(&cm_id_priv->connect_wait);
 867
 868        return ret;
 869}
 870
 871/*
 872 * Active Side: CM_ID <-- ESTABLISHED
 873 *
 874 * The app has called connect and is waiting for the established event to
 875 * post it's requests to the server. This event will wake up anyone
 876 * blocked in iw_cm_disconnect or iw_destroy_id.
 877 */
 878static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
 879                               struct iw_cm_event *iw_event)
 880{
 881        unsigned long flags;
 882        int ret;
 883
 884        spin_lock_irqsave(&cm_id_priv->lock, flags);
 885        /*
 886         * Clear the connect wait bit so a callback function calling
 887         * iw_cm_disconnect will not wait and deadlock this thread
 888         */
 889        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 890        BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
 891        if (iw_event->status == 0) {
 892                cm_id_priv->id.m_local_addr = iw_event->local_addr;
 893                cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
 894                iw_event->local_addr = cm_id_priv->id.local_addr;
 895                iw_event->remote_addr = cm_id_priv->id.remote_addr;
 896                cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
 897        } else {
 898                /* REJECTED or RESET */
 899                cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
 900                cm_id_priv->qp = NULL;
 901                cm_id_priv->state = IW_CM_STATE_IDLE;
 902        }
 903        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 904        ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 905
 906        if (iw_event->private_data_len)
 907                kfree(iw_event->private_data);
 908
 909        /* Wake up waiters on connect complete */
 910        wake_up_all(&cm_id_priv->connect_wait);
 911
 912        return ret;
 913}
 914
 915/*
 916 * CM_ID <-- CLOSING
 917 *
 918 * If in the ESTABLISHED state, move to CLOSING.
 919 */
 920static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
 921                                  struct iw_cm_event *iw_event)
 922{
 923        unsigned long flags;
 924
 925        spin_lock_irqsave(&cm_id_priv->lock, flags);
 926        if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
 927                cm_id_priv->state = IW_CM_STATE_CLOSING;
 928        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 929}
 930
 931/*
 932 * CM_ID <-- IDLE
 933 *
 934 * If in the ESTBLISHED or CLOSING states, the QP will have have been
 935 * moved by the provider to the ERR state. Disassociate the CM_ID from
 936 * the QP,  move to IDLE, and remove the 'connected' reference.
 937 *
 938 * If in some other state, the cm_id was destroyed asynchronously.
 939 * This is the last reference that will result in waking up
 940 * the app thread blocked in iw_destroy_cm_id.
 941 */
 942static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
 943                                  struct iw_cm_event *iw_event)
 944{
 945        unsigned long flags;
 946        int ret = 0;
 947        spin_lock_irqsave(&cm_id_priv->lock, flags);
 948
 949        if (cm_id_priv->qp) {
 950                cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
 951                cm_id_priv->qp = NULL;
 952        }
 953        switch (cm_id_priv->state) {
 954        case IW_CM_STATE_ESTABLISHED:
 955        case IW_CM_STATE_CLOSING:
 956                cm_id_priv->state = IW_CM_STATE_IDLE;
 957                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 958                ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 959                spin_lock_irqsave(&cm_id_priv->lock, flags);
 960                break;
 961        case IW_CM_STATE_DESTROYING:
 962                break;
 963        default:
 964                BUG();
 965        }
 966        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 967
 968        return ret;
 969}
 970
 971static int process_event(struct iwcm_id_private *cm_id_priv,
 972                         struct iw_cm_event *iw_event)
 973{
 974        int ret = 0;
 975
 976        switch (iw_event->event) {
 977        case IW_CM_EVENT_CONNECT_REQUEST:
 978                cm_conn_req_handler(cm_id_priv, iw_event);
 979                break;
 980        case IW_CM_EVENT_CONNECT_REPLY:
 981                ret = cm_conn_rep_handler(cm_id_priv, iw_event);
 982                break;
 983        case IW_CM_EVENT_ESTABLISHED:
 984                ret = cm_conn_est_handler(cm_id_priv, iw_event);
 985                break;
 986        case IW_CM_EVENT_DISCONNECT:
 987                cm_disconnect_handler(cm_id_priv, iw_event);
 988                break;
 989        case IW_CM_EVENT_CLOSE:
 990                ret = cm_close_handler(cm_id_priv, iw_event);
 991                break;
 992        default:
 993                BUG();
 994        }
 995
 996        return ret;
 997}
 998
 999/*
1000 * Process events on the work_list for the cm_id. If the callback
1001 * function requests that the cm_id be deleted, a flag is set in the
1002 * cm_id flags to indicate that when the last reference is
1003 * removed, the cm_id is to be destroyed. This is necessary to
1004 * distinguish between an object that will be destroyed by the app
1005 * thread asleep on the destroy_comp list vs. an object destroyed
1006 * here synchronously when the last reference is removed.
1007 */
1008static void cm_work_handler(struct work_struct *_work)
1009{
1010        struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
1011        struct iw_cm_event levent;
1012        struct iwcm_id_private *cm_id_priv = work->cm_id;
1013        unsigned long flags;
1014        int empty;
1015        int ret = 0;
1016
1017        spin_lock_irqsave(&cm_id_priv->lock, flags);
1018        empty = list_empty(&cm_id_priv->work_list);
1019        while (!empty) {
1020                work = list_entry(cm_id_priv->work_list.next,
1021                                  struct iwcm_work, list);
1022                list_del_init(&work->list);
1023                empty = list_empty(&cm_id_priv->work_list);
1024                levent = work->event;
1025                put_work(work);
1026                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1027
1028                if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
1029                        ret = process_event(cm_id_priv, &levent);
1030                        if (ret)
1031                                destroy_cm_id(&cm_id_priv->id);
1032                } else
1033                        pr_debug("dropping event %d\n", levent.event);
1034                if (iwcm_deref_id(cm_id_priv))
1035                        return;
1036                if (empty)
1037                        return;
1038                spin_lock_irqsave(&cm_id_priv->lock, flags);
1039        }
1040        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1041}
1042
1043/*
1044 * This function is called on interrupt context. Schedule events on
1045 * the iwcm_wq thread to allow callback functions to downcall into
1046 * the CM and/or block.  Events are queued to a per-CM_ID
1047 * work_list. If this is the first event on the work_list, the work
1048 * element is also queued on the iwcm_wq thread.
1049 *
1050 * Each event holds a reference on the cm_id. Until the last posted
1051 * event has been delivered and processed, the cm_id cannot be
1052 * deleted.
1053 *
1054 * Returns:
1055 *            0 - the event was handled.
1056 *      -ENOMEM - the event was not handled due to lack of resources.
1057 */
1058static int cm_event_handler(struct iw_cm_id *cm_id,
1059                             struct iw_cm_event *iw_event)
1060{
1061        struct iwcm_work *work;
1062        struct iwcm_id_private *cm_id_priv;
1063        unsigned long flags;
1064        int ret = 0;
1065
1066        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1067
1068        spin_lock_irqsave(&cm_id_priv->lock, flags);
1069        work = get_work(cm_id_priv);
1070        if (!work) {
1071                ret = -ENOMEM;
1072                goto out;
1073        }
1074
1075        INIT_WORK(&work->work, cm_work_handler);
1076        work->cm_id = cm_id_priv;
1077        work->event = *iw_event;
1078
1079        if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
1080             work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
1081            work->event.private_data_len) {
1082                ret = copy_private_data(&work->event);
1083                if (ret) {
1084                        put_work(work);
1085                        goto out;
1086                }
1087        }
1088
1089        atomic_inc(&cm_id_priv->refcount);
1090        if (list_empty(&cm_id_priv->work_list)) {
1091                list_add_tail(&work->list, &cm_id_priv->work_list);
1092                queue_work(iwcm_wq, &work->work);
1093        } else
1094                list_add_tail(&work->list, &cm_id_priv->work_list);
1095out:
1096        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1097        return ret;
1098}
1099
1100static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
1101                                  struct ib_qp_attr *qp_attr,
1102                                  int *qp_attr_mask)
1103{
1104        unsigned long flags;
1105        int ret;
1106
1107        spin_lock_irqsave(&cm_id_priv->lock, flags);
1108        switch (cm_id_priv->state) {
1109        case IW_CM_STATE_IDLE:
1110        case IW_CM_STATE_CONN_SENT:
1111        case IW_CM_STATE_CONN_RECV:
1112        case IW_CM_STATE_ESTABLISHED:
1113                *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1114                qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
1115                                           IB_ACCESS_REMOTE_READ;
1116                ret = 0;
1117                break;
1118        default:
1119                ret = -EINVAL;
1120                break;
1121        }
1122        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1123        return ret;
1124}
1125
1126static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1127                                  struct ib_qp_attr *qp_attr,
1128                                  int *qp_attr_mask)
1129{
1130        unsigned long flags;
1131        int ret;
1132
1133        spin_lock_irqsave(&cm_id_priv->lock, flags);
1134        switch (cm_id_priv->state) {
1135        case IW_CM_STATE_IDLE:
1136        case IW_CM_STATE_CONN_SENT:
1137        case IW_CM_STATE_CONN_RECV:
1138        case IW_CM_STATE_ESTABLISHED:
1139                *qp_attr_mask = 0;
1140                ret = 0;
1141                break;
1142        default:
1143                ret = -EINVAL;
1144                break;
1145        }
1146        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1147        return ret;
1148}
1149
1150int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1151                       struct ib_qp_attr *qp_attr,
1152                       int *qp_attr_mask)
1153{
1154        struct iwcm_id_private *cm_id_priv;
1155        int ret;
1156
1157        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1158        switch (qp_attr->qp_state) {
1159        case IB_QPS_INIT:
1160        case IB_QPS_RTR:
1161                ret = iwcm_init_qp_init_attr(cm_id_priv,
1162                                             qp_attr, qp_attr_mask);
1163                break;
1164        case IB_QPS_RTS:
1165                ret = iwcm_init_qp_rts_attr(cm_id_priv,
1166                                            qp_attr, qp_attr_mask);
1167                break;
1168        default:
1169                ret = -EINVAL;
1170                break;
1171        }
1172        return ret;
1173}
1174EXPORT_SYMBOL(iw_cm_init_qp_attr);
1175
1176static int __init iw_cm_init(void)
1177{
1178        int ret;
1179
1180        ret = iwpm_init(RDMA_NL_IWCM);
1181        if (ret)
1182                pr_err("iw_cm: couldn't init iwpm\n");
1183        else
1184                rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
1185        iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
1186        if (!iwcm_wq)
1187                return -ENOMEM;
1188
1189        iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1190                                                 iwcm_ctl_table);
1191        if (!iwcm_ctl_table_hdr) {
1192                pr_err("iw_cm: couldn't register sysctl paths\n");
1193                destroy_workqueue(iwcm_wq);
1194                return -ENOMEM;
1195        }
1196
1197        return 0;
1198}
1199
1200static void __exit iw_cm_cleanup(void)
1201{
1202        unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1203        destroy_workqueue(iwcm_wq);
1204        rdma_nl_unregister(RDMA_NL_IWCM);
1205        iwpm_exit(RDMA_NL_IWCM);
1206}
1207
1208MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
1209
1210module_init(iw_cm_init);
1211module_exit(iw_cm_cleanup);
1212