linux/drivers/infiniband/core/ucma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *      copyright notice, this list of conditions and the following
  16 *      disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *      copyright notice, this list of conditions and the following
  20 *      disclaimer in the documentation and/or other materials
  21 *      provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/completion.h>
  34#include <linux/file.h>
  35#include <linux/mutex.h>
  36#include <linux/poll.h>
  37#include <linux/sched.h>
  38#include <linux/idr.h>
  39#include <linux/in.h>
  40#include <linux/in6.h>
  41#include <linux/miscdevice.h>
  42#include <linux/slab.h>
  43#include <linux/sysctl.h>
  44#include <linux/module.h>
  45#include <linux/nsproxy.h>
  46
  47#include <linux/nospec.h>
  48
  49#include <rdma/rdma_user_cm.h>
  50#include <rdma/ib_marshall.h>
  51#include <rdma/rdma_cm.h>
  52#include <rdma/rdma_cm_ib.h>
  53#include <rdma/ib_addr.h>
  54#include <rdma/ib.h>
  55#include <rdma/ib_cm.h>
  56#include <rdma/rdma_netlink.h>
  57#include "core_priv.h"
  58
  59MODULE_AUTHOR("Sean Hefty");
  60MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  61MODULE_LICENSE("Dual BSD/GPL");
  62
  63static unsigned int max_backlog = 1024;
  64
  65static struct ctl_table_header *ucma_ctl_table_hdr;
  66static struct ctl_table ucma_ctl_table[] = {
  67        {
  68                .procname       = "max_backlog",
  69                .data           = &max_backlog,
  70                .maxlen         = sizeof max_backlog,
  71                .mode           = 0644,
  72                .proc_handler   = proc_dointvec,
  73        },
  74        { }
  75};
  76
  77struct ucma_file {
  78        struct mutex            mut;
  79        struct file             *filp;
  80        struct list_head        ctx_list;
  81        struct list_head        event_list;
  82        wait_queue_head_t       poll_wait;
  83        struct workqueue_struct *close_wq;
  84};
  85
  86struct ucma_context {
  87        u32                     id;
  88        struct completion       comp;
  89        refcount_t              ref;
  90        int                     events_reported;
  91        int                     backlog;
  92
  93        struct ucma_file        *file;
  94        struct rdma_cm_id       *cm_id;
  95        struct mutex            mutex;
  96        u64                     uid;
  97
  98        struct list_head        list;
  99        struct list_head        mc_list;
 100        /* mark that device is in process of destroying the internal HW
 101         * resources, protected by the ctx_table lock
 102         */
 103        int                     closing;
 104        /* sync between removal event and id destroy, protected by file mut */
 105        int                     destroying;
 106        struct work_struct      close_work;
 107};
 108
 109struct ucma_multicast {
 110        struct ucma_context     *ctx;
 111        u32                     id;
 112        int                     events_reported;
 113
 114        u64                     uid;
 115        u8                      join_state;
 116        struct list_head        list;
 117        struct sockaddr_storage addr;
 118};
 119
 120struct ucma_event {
 121        struct ucma_context     *ctx;
 122        struct ucma_multicast   *mc;
 123        struct list_head        list;
 124        struct rdma_cm_id       *cm_id;
 125        struct rdma_ucm_event_resp resp;
 126        struct work_struct      close_work;
 127};
 128
 129static DEFINE_XARRAY_ALLOC(ctx_table);
 130static DEFINE_XARRAY_ALLOC(multicast_table);
 131
 132static const struct file_operations ucma_fops;
 133
 134static inline struct ucma_context *_ucma_find_context(int id,
 135                                                      struct ucma_file *file)
 136{
 137        struct ucma_context *ctx;
 138
 139        ctx = xa_load(&ctx_table, id);
 140        if (!ctx)
 141                ctx = ERR_PTR(-ENOENT);
 142        else if (ctx->file != file || !ctx->cm_id)
 143                ctx = ERR_PTR(-EINVAL);
 144        return ctx;
 145}
 146
 147static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
 148{
 149        struct ucma_context *ctx;
 150
 151        xa_lock(&ctx_table);
 152        ctx = _ucma_find_context(id, file);
 153        if (!IS_ERR(ctx)) {
 154                if (ctx->closing)
 155                        ctx = ERR_PTR(-EIO);
 156                else
 157                        refcount_inc(&ctx->ref);
 158        }
 159        xa_unlock(&ctx_table);
 160        return ctx;
 161}
 162
 163static void ucma_put_ctx(struct ucma_context *ctx)
 164{
 165        if (refcount_dec_and_test(&ctx->ref))
 166                complete(&ctx->comp);
 167}
 168
 169/*
 170 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
 171 * CM_ID is bound.
 172 */
 173static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
 174{
 175        struct ucma_context *ctx = ucma_get_ctx(file, id);
 176
 177        if (IS_ERR(ctx))
 178                return ctx;
 179        if (!ctx->cm_id->device) {
 180                ucma_put_ctx(ctx);
 181                return ERR_PTR(-EINVAL);
 182        }
 183        return ctx;
 184}
 185
 186static void ucma_close_event_id(struct work_struct *work)
 187{
 188        struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
 189
 190        rdma_destroy_id(uevent_close->cm_id);
 191        kfree(uevent_close);
 192}
 193
 194static void ucma_close_id(struct work_struct *work)
 195{
 196        struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
 197
 198        /* once all inflight tasks are finished, we close all underlying
 199         * resources. The context is still alive till its explicit destryoing
 200         * by its creator.
 201         */
 202        ucma_put_ctx(ctx);
 203        wait_for_completion(&ctx->comp);
 204        /* No new events will be generated after destroying the id. */
 205        rdma_destroy_id(ctx->cm_id);
 206}
 207
 208static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
 209{
 210        struct ucma_context *ctx;
 211
 212        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 213        if (!ctx)
 214                return NULL;
 215
 216        INIT_WORK(&ctx->close_work, ucma_close_id);
 217        refcount_set(&ctx->ref, 1);
 218        init_completion(&ctx->comp);
 219        INIT_LIST_HEAD(&ctx->mc_list);
 220        ctx->file = file;
 221        mutex_init(&ctx->mutex);
 222
 223        if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
 224                goto error;
 225
 226        list_add_tail(&ctx->list, &file->ctx_list);
 227        return ctx;
 228
 229error:
 230        kfree(ctx);
 231        return NULL;
 232}
 233
 234static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
 235{
 236        struct ucma_multicast *mc;
 237
 238        mc = kzalloc(sizeof(*mc), GFP_KERNEL);
 239        if (!mc)
 240                return NULL;
 241
 242        mc->ctx = ctx;
 243        if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL))
 244                goto error;
 245
 246        list_add_tail(&mc->list, &ctx->mc_list);
 247        return mc;
 248
 249error:
 250        kfree(mc);
 251        return NULL;
 252}
 253
 254static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
 255                                 struct rdma_conn_param *src)
 256{
 257        if (src->private_data_len)
 258                memcpy(dst->private_data, src->private_data,
 259                       src->private_data_len);
 260        dst->private_data_len = src->private_data_len;
 261        dst->responder_resources =src->responder_resources;
 262        dst->initiator_depth = src->initiator_depth;
 263        dst->flow_control = src->flow_control;
 264        dst->retry_count = src->retry_count;
 265        dst->rnr_retry_count = src->rnr_retry_count;
 266        dst->srq = src->srq;
 267        dst->qp_num = src->qp_num;
 268}
 269
 270static void ucma_copy_ud_event(struct ib_device *device,
 271                               struct rdma_ucm_ud_param *dst,
 272                               struct rdma_ud_param *src)
 273{
 274        if (src->private_data_len)
 275                memcpy(dst->private_data, src->private_data,
 276                       src->private_data_len);
 277        dst->private_data_len = src->private_data_len;
 278        ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
 279        dst->qp_num = src->qp_num;
 280        dst->qkey = src->qkey;
 281}
 282
 283static void ucma_set_event_context(struct ucma_context *ctx,
 284                                   struct rdma_cm_event *event,
 285                                   struct ucma_event *uevent)
 286{
 287        uevent->ctx = ctx;
 288        switch (event->event) {
 289        case RDMA_CM_EVENT_MULTICAST_JOIN:
 290        case RDMA_CM_EVENT_MULTICAST_ERROR:
 291                uevent->mc = (struct ucma_multicast *)
 292                             event->param.ud.private_data;
 293                uevent->resp.uid = uevent->mc->uid;
 294                uevent->resp.id = uevent->mc->id;
 295                break;
 296        default:
 297                uevent->resp.uid = ctx->uid;
 298                uevent->resp.id = ctx->id;
 299                break;
 300        }
 301}
 302
 303/* Called with file->mut locked for the relevant context. */
 304static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
 305{
 306        struct ucma_context *ctx = cm_id->context;
 307        struct ucma_event *con_req_eve;
 308        int event_found = 0;
 309
 310        if (ctx->destroying)
 311                return;
 312
 313        /* only if context is pointing to cm_id that it owns it and can be
 314         * queued to be closed, otherwise that cm_id is an inflight one that
 315         * is part of that context event list pending to be detached and
 316         * reattached to its new context as part of ucma_get_event,
 317         * handled separately below.
 318         */
 319        if (ctx->cm_id == cm_id) {
 320                xa_lock(&ctx_table);
 321                ctx->closing = 1;
 322                xa_unlock(&ctx_table);
 323                queue_work(ctx->file->close_wq, &ctx->close_work);
 324                return;
 325        }
 326
 327        list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
 328                if (con_req_eve->cm_id == cm_id &&
 329                    con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 330                        list_del(&con_req_eve->list);
 331                        INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
 332                        queue_work(ctx->file->close_wq, &con_req_eve->close_work);
 333                        event_found = 1;
 334                        break;
 335                }
 336        }
 337        if (!event_found)
 338                pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
 339}
 340
 341static int ucma_event_handler(struct rdma_cm_id *cm_id,
 342                              struct rdma_cm_event *event)
 343{
 344        struct ucma_event *uevent;
 345        struct ucma_context *ctx = cm_id->context;
 346        int ret = 0;
 347
 348        uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
 349        if (!uevent)
 350                return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
 351
 352        mutex_lock(&ctx->file->mut);
 353        uevent->cm_id = cm_id;
 354        ucma_set_event_context(ctx, event, uevent);
 355        uevent->resp.event = event->event;
 356        uevent->resp.status = event->status;
 357        if (cm_id->qp_type == IB_QPT_UD)
 358                ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
 359                                   &event->param.ud);
 360        else
 361                ucma_copy_conn_event(&uevent->resp.param.conn,
 362                                     &event->param.conn);
 363
 364        uevent->resp.ece.vendor_id = event->ece.vendor_id;
 365        uevent->resp.ece.attr_mod = event->ece.attr_mod;
 366
 367        if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 368                if (!ctx->backlog) {
 369                        ret = -ENOMEM;
 370                        kfree(uevent);
 371                        goto out;
 372                }
 373                ctx->backlog--;
 374        } else if (!ctx->uid || ctx->cm_id != cm_id) {
 375                /*
 376                 * We ignore events for new connections until userspace has set
 377                 * their context.  This can only happen if an error occurs on a
 378                 * new connection before the user accepts it.  This is okay,
 379                 * since the accept will just fail later. However, we do need
 380                 * to release the underlying HW resources in case of a device
 381                 * removal event.
 382                 */
 383                if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
 384                        ucma_removal_event_handler(cm_id);
 385
 386                kfree(uevent);
 387                goto out;
 388        }
 389
 390        list_add_tail(&uevent->list, &ctx->file->event_list);
 391        wake_up_interruptible(&ctx->file->poll_wait);
 392        if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
 393                ucma_removal_event_handler(cm_id);
 394out:
 395        mutex_unlock(&ctx->file->mut);
 396        return ret;
 397}
 398
 399static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
 400                              int in_len, int out_len)
 401{
 402        struct ucma_context *ctx;
 403        struct rdma_ucm_get_event cmd;
 404        struct ucma_event *uevent;
 405        int ret = 0;
 406
 407        /*
 408         * Old 32 bit user space does not send the 4 byte padding in the
 409         * reserved field. We don't care, allow it to keep working.
 410         */
 411        if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
 412                              sizeof(uevent->resp.ece))
 413                return -ENOSPC;
 414
 415        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 416                return -EFAULT;
 417
 418        mutex_lock(&file->mut);
 419        while (list_empty(&file->event_list)) {
 420                mutex_unlock(&file->mut);
 421
 422                if (file->filp->f_flags & O_NONBLOCK)
 423                        return -EAGAIN;
 424
 425                if (wait_event_interruptible(file->poll_wait,
 426                                             !list_empty(&file->event_list)))
 427                        return -ERESTARTSYS;
 428
 429                mutex_lock(&file->mut);
 430        }
 431
 432        uevent = list_entry(file->event_list.next, struct ucma_event, list);
 433
 434        if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 435                ctx = ucma_alloc_ctx(file);
 436                if (!ctx) {
 437                        ret = -ENOMEM;
 438                        goto done;
 439                }
 440                uevent->ctx->backlog++;
 441                ctx->cm_id = uevent->cm_id;
 442                ctx->cm_id->context = ctx;
 443                uevent->resp.id = ctx->id;
 444        }
 445
 446        if (copy_to_user(u64_to_user_ptr(cmd.response),
 447                         &uevent->resp,
 448                         min_t(size_t, out_len, sizeof(uevent->resp)))) {
 449                ret = -EFAULT;
 450                goto done;
 451        }
 452
 453        list_del(&uevent->list);
 454        uevent->ctx->events_reported++;
 455        if (uevent->mc)
 456                uevent->mc->events_reported++;
 457        kfree(uevent);
 458done:
 459        mutex_unlock(&file->mut);
 460        return ret;
 461}
 462
 463static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
 464{
 465        switch (cmd->ps) {
 466        case RDMA_PS_TCP:
 467                *qp_type = IB_QPT_RC;
 468                return 0;
 469        case RDMA_PS_UDP:
 470        case RDMA_PS_IPOIB:
 471                *qp_type = IB_QPT_UD;
 472                return 0;
 473        case RDMA_PS_IB:
 474                *qp_type = cmd->qp_type;
 475                return 0;
 476        default:
 477                return -EINVAL;
 478        }
 479}
 480
 481static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
 482                              int in_len, int out_len)
 483{
 484        struct rdma_ucm_create_id cmd;
 485        struct rdma_ucm_create_id_resp resp;
 486        struct ucma_context *ctx;
 487        struct rdma_cm_id *cm_id;
 488        enum ib_qp_type qp_type;
 489        int ret;
 490
 491        if (out_len < sizeof(resp))
 492                return -ENOSPC;
 493
 494        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 495                return -EFAULT;
 496
 497        ret = ucma_get_qp_type(&cmd, &qp_type);
 498        if (ret)
 499                return ret;
 500
 501        mutex_lock(&file->mut);
 502        ctx = ucma_alloc_ctx(file);
 503        mutex_unlock(&file->mut);
 504        if (!ctx)
 505                return -ENOMEM;
 506
 507        ctx->uid = cmd.uid;
 508        cm_id = __rdma_create_id(current->nsproxy->net_ns,
 509                                 ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
 510        if (IS_ERR(cm_id)) {
 511                ret = PTR_ERR(cm_id);
 512                goto err1;
 513        }
 514
 515        resp.id = ctx->id;
 516        if (copy_to_user(u64_to_user_ptr(cmd.response),
 517                         &resp, sizeof(resp))) {
 518                ret = -EFAULT;
 519                goto err2;
 520        }
 521
 522        ctx->cm_id = cm_id;
 523        return 0;
 524
 525err2:
 526        rdma_destroy_id(cm_id);
 527err1:
 528        xa_erase(&ctx_table, ctx->id);
 529        mutex_lock(&file->mut);
 530        list_del(&ctx->list);
 531        mutex_unlock(&file->mut);
 532        kfree(ctx);
 533        return ret;
 534}
 535
 536static void ucma_cleanup_multicast(struct ucma_context *ctx)
 537{
 538        struct ucma_multicast *mc, *tmp;
 539
 540        mutex_lock(&ctx->file->mut);
 541        list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
 542                list_del(&mc->list);
 543                xa_erase(&multicast_table, mc->id);
 544                kfree(mc);
 545        }
 546        mutex_unlock(&ctx->file->mut);
 547}
 548
 549static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
 550{
 551        struct ucma_event *uevent, *tmp;
 552
 553        list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
 554                if (uevent->mc != mc)
 555                        continue;
 556
 557                list_del(&uevent->list);
 558                kfree(uevent);
 559        }
 560}
 561
 562/*
 563 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
 564 * this point, no new events will be reported from the hardware. However, we
 565 * still need to cleanup the UCMA context for this ID. Specifically, there
 566 * might be events that have not yet been consumed by the user space software.
 567 * These might include pending connect requests which we have not completed
 568 * processing.  We cannot call rdma_destroy_id while holding the lock of the
 569 * context (file->mut), as it might cause a deadlock. We therefore extract all
 570 * relevant events from the context pending events list while holding the
 571 * mutex. After that we release them as needed.
 572 */
 573static int ucma_free_ctx(struct ucma_context *ctx)
 574{
 575        int events_reported;
 576        struct ucma_event *uevent, *tmp;
 577        LIST_HEAD(list);
 578
 579
 580        ucma_cleanup_multicast(ctx);
 581
 582        /* Cleanup events not yet reported to the user. */
 583        mutex_lock(&ctx->file->mut);
 584        list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
 585                if (uevent->ctx == ctx)
 586                        list_move_tail(&uevent->list, &list);
 587        }
 588        list_del(&ctx->list);
 589        mutex_unlock(&ctx->file->mut);
 590
 591        list_for_each_entry_safe(uevent, tmp, &list, list) {
 592                list_del(&uevent->list);
 593                if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
 594                        rdma_destroy_id(uevent->cm_id);
 595                kfree(uevent);
 596        }
 597
 598        events_reported = ctx->events_reported;
 599        mutex_destroy(&ctx->mutex);
 600        kfree(ctx);
 601        return events_reported;
 602}
 603
 604static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
 605                               int in_len, int out_len)
 606{
 607        struct rdma_ucm_destroy_id cmd;
 608        struct rdma_ucm_destroy_id_resp resp;
 609        struct ucma_context *ctx;
 610        int ret = 0;
 611
 612        if (out_len < sizeof(resp))
 613                return -ENOSPC;
 614
 615        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 616                return -EFAULT;
 617
 618        xa_lock(&ctx_table);
 619        ctx = _ucma_find_context(cmd.id, file);
 620        if (!IS_ERR(ctx))
 621                __xa_erase(&ctx_table, ctx->id);
 622        xa_unlock(&ctx_table);
 623
 624        if (IS_ERR(ctx))
 625                return PTR_ERR(ctx);
 626
 627        mutex_lock(&ctx->file->mut);
 628        ctx->destroying = 1;
 629        mutex_unlock(&ctx->file->mut);
 630
 631        flush_workqueue(ctx->file->close_wq);
 632        /* At this point it's guaranteed that there is no inflight
 633         * closing task */
 634        xa_lock(&ctx_table);
 635        if (!ctx->closing) {
 636                xa_unlock(&ctx_table);
 637                ucma_put_ctx(ctx);
 638                wait_for_completion(&ctx->comp);
 639                rdma_destroy_id(ctx->cm_id);
 640        } else {
 641                xa_unlock(&ctx_table);
 642        }
 643
 644        resp.events_reported = ucma_free_ctx(ctx);
 645        if (copy_to_user(u64_to_user_ptr(cmd.response),
 646                         &resp, sizeof(resp)))
 647                ret = -EFAULT;
 648
 649        return ret;
 650}
 651
 652static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
 653                              int in_len, int out_len)
 654{
 655        struct rdma_ucm_bind_ip cmd;
 656        struct ucma_context *ctx;
 657        int ret;
 658
 659        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 660                return -EFAULT;
 661
 662        if (!rdma_addr_size_in6(&cmd.addr))
 663                return -EINVAL;
 664
 665        ctx = ucma_get_ctx(file, cmd.id);
 666        if (IS_ERR(ctx))
 667                return PTR_ERR(ctx);
 668
 669        mutex_lock(&ctx->mutex);
 670        ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
 671        mutex_unlock(&ctx->mutex);
 672
 673        ucma_put_ctx(ctx);
 674        return ret;
 675}
 676
 677static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
 678                         int in_len, int out_len)
 679{
 680        struct rdma_ucm_bind cmd;
 681        struct ucma_context *ctx;
 682        int ret;
 683
 684        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 685                return -EFAULT;
 686
 687        if (cmd.reserved || !cmd.addr_size ||
 688            cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
 689                return -EINVAL;
 690
 691        ctx = ucma_get_ctx(file, cmd.id);
 692        if (IS_ERR(ctx))
 693                return PTR_ERR(ctx);
 694
 695        mutex_lock(&ctx->mutex);
 696        ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
 697        mutex_unlock(&ctx->mutex);
 698        ucma_put_ctx(ctx);
 699        return ret;
 700}
 701
 702static ssize_t ucma_resolve_ip(struct ucma_file *file,
 703                               const char __user *inbuf,
 704                               int in_len, int out_len)
 705{
 706        struct rdma_ucm_resolve_ip cmd;
 707        struct ucma_context *ctx;
 708        int ret;
 709
 710        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 711                return -EFAULT;
 712
 713        if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
 714            !rdma_addr_size_in6(&cmd.dst_addr))
 715                return -EINVAL;
 716
 717        ctx = ucma_get_ctx(file, cmd.id);
 718        if (IS_ERR(ctx))
 719                return PTR_ERR(ctx);
 720
 721        mutex_lock(&ctx->mutex);
 722        ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
 723                                (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
 724        mutex_unlock(&ctx->mutex);
 725        ucma_put_ctx(ctx);
 726        return ret;
 727}
 728
 729static ssize_t ucma_resolve_addr(struct ucma_file *file,
 730                                 const char __user *inbuf,
 731                                 int in_len, int out_len)
 732{
 733        struct rdma_ucm_resolve_addr cmd;
 734        struct ucma_context *ctx;
 735        int ret;
 736
 737        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 738                return -EFAULT;
 739
 740        if (cmd.reserved ||
 741            (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
 742            !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
 743                return -EINVAL;
 744
 745        ctx = ucma_get_ctx(file, cmd.id);
 746        if (IS_ERR(ctx))
 747                return PTR_ERR(ctx);
 748
 749        mutex_lock(&ctx->mutex);
 750        ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
 751                                (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
 752        mutex_unlock(&ctx->mutex);
 753        ucma_put_ctx(ctx);
 754        return ret;
 755}
 756
 757static ssize_t ucma_resolve_route(struct ucma_file *file,
 758                                  const char __user *inbuf,
 759                                  int in_len, int out_len)
 760{
 761        struct rdma_ucm_resolve_route cmd;
 762        struct ucma_context *ctx;
 763        int ret;
 764
 765        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 766                return -EFAULT;
 767
 768        ctx = ucma_get_ctx_dev(file, cmd.id);
 769        if (IS_ERR(ctx))
 770                return PTR_ERR(ctx);
 771
 772        mutex_lock(&ctx->mutex);
 773        ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
 774        mutex_unlock(&ctx->mutex);
 775        ucma_put_ctx(ctx);
 776        return ret;
 777}
 778
 779static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
 780                               struct rdma_route *route)
 781{
 782        struct rdma_dev_addr *dev_addr;
 783
 784        resp->num_paths = route->num_paths;
 785        switch (route->num_paths) {
 786        case 0:
 787                dev_addr = &route->addr.dev_addr;
 788                rdma_addr_get_dgid(dev_addr,
 789                                   (union ib_gid *) &resp->ib_route[0].dgid);
 790                rdma_addr_get_sgid(dev_addr,
 791                                   (union ib_gid *) &resp->ib_route[0].sgid);
 792                resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
 793                break;
 794        case 2:
 795                ib_copy_path_rec_to_user(&resp->ib_route[1],
 796                                         &route->path_rec[1]);
 797                fallthrough;
 798        case 1:
 799                ib_copy_path_rec_to_user(&resp->ib_route[0],
 800                                         &route->path_rec[0]);
 801                break;
 802        default:
 803                break;
 804        }
 805}
 806
 807static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
 808                                 struct rdma_route *route)
 809{
 810
 811        resp->num_paths = route->num_paths;
 812        switch (route->num_paths) {
 813        case 0:
 814                rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
 815                            (union ib_gid *)&resp->ib_route[0].dgid);
 816                rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
 817                            (union ib_gid *)&resp->ib_route[0].sgid);
 818                resp->ib_route[0].pkey = cpu_to_be16(0xffff);
 819                break;
 820        case 2:
 821                ib_copy_path_rec_to_user(&resp->ib_route[1],
 822                                         &route->path_rec[1]);
 823                fallthrough;
 824        case 1:
 825                ib_copy_path_rec_to_user(&resp->ib_route[0],
 826                                         &route->path_rec[0]);
 827                break;
 828        default:
 829                break;
 830        }
 831}
 832
 833static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
 834                               struct rdma_route *route)
 835{
 836        struct rdma_dev_addr *dev_addr;
 837
 838        dev_addr = &route->addr.dev_addr;
 839        rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
 840        rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
 841}
 842
 843static ssize_t ucma_query_route(struct ucma_file *file,
 844                                const char __user *inbuf,
 845                                int in_len, int out_len)
 846{
 847        struct rdma_ucm_query cmd;
 848        struct rdma_ucm_query_route_resp resp;
 849        struct ucma_context *ctx;
 850        struct sockaddr *addr;
 851        int ret = 0;
 852
 853        if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
 854                return -ENOSPC;
 855
 856        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 857                return -EFAULT;
 858
 859        ctx = ucma_get_ctx(file, cmd.id);
 860        if (IS_ERR(ctx))
 861                return PTR_ERR(ctx);
 862
 863        mutex_lock(&ctx->mutex);
 864        memset(&resp, 0, sizeof resp);
 865        addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 866        memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
 867                                     sizeof(struct sockaddr_in) :
 868                                     sizeof(struct sockaddr_in6));
 869        addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 870        memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
 871                                     sizeof(struct sockaddr_in) :
 872                                     sizeof(struct sockaddr_in6));
 873        if (!ctx->cm_id->device)
 874                goto out;
 875
 876        resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 877        resp.ibdev_index = ctx->cm_id->device->index;
 878        resp.port_num = ctx->cm_id->port_num;
 879
 880        if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
 881                ucma_copy_ib_route(&resp, &ctx->cm_id->route);
 882        else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
 883                ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
 884        else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
 885                ucma_copy_iw_route(&resp, &ctx->cm_id->route);
 886
 887out:
 888        mutex_unlock(&ctx->mutex);
 889        if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
 890                         min_t(size_t, out_len, sizeof(resp))))
 891                ret = -EFAULT;
 892
 893        ucma_put_ctx(ctx);
 894        return ret;
 895}
 896
 897static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
 898                                   struct rdma_ucm_query_addr_resp *resp)
 899{
 900        if (!cm_id->device)
 901                return;
 902
 903        resp->node_guid = (__force __u64) cm_id->device->node_guid;
 904        resp->ibdev_index = cm_id->device->index;
 905        resp->port_num = cm_id->port_num;
 906        resp->pkey = (__force __u16) cpu_to_be16(
 907                     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
 908}
 909
 910static ssize_t ucma_query_addr(struct ucma_context *ctx,
 911                               void __user *response, int out_len)
 912{
 913        struct rdma_ucm_query_addr_resp resp;
 914        struct sockaddr *addr;
 915        int ret = 0;
 916
 917        if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
 918                return -ENOSPC;
 919
 920        memset(&resp, 0, sizeof resp);
 921
 922        addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 923        resp.src_size = rdma_addr_size(addr);
 924        memcpy(&resp.src_addr, addr, resp.src_size);
 925
 926        addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 927        resp.dst_size = rdma_addr_size(addr);
 928        memcpy(&resp.dst_addr, addr, resp.dst_size);
 929
 930        ucma_query_device_addr(ctx->cm_id, &resp);
 931
 932        if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
 933                ret = -EFAULT;
 934
 935        return ret;
 936}
 937
 938static ssize_t ucma_query_path(struct ucma_context *ctx,
 939                               void __user *response, int out_len)
 940{
 941        struct rdma_ucm_query_path_resp *resp;
 942        int i, ret = 0;
 943
 944        if (out_len < sizeof(*resp))
 945                return -ENOSPC;
 946
 947        resp = kzalloc(out_len, GFP_KERNEL);
 948        if (!resp)
 949                return -ENOMEM;
 950
 951        resp->num_paths = ctx->cm_id->route.num_paths;
 952        for (i = 0, out_len -= sizeof(*resp);
 953             i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
 954             i++, out_len -= sizeof(struct ib_path_rec_data)) {
 955                struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
 956
 957                resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
 958                                           IB_PATH_BIDIRECTIONAL;
 959                if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
 960                        struct sa_path_rec ib;
 961
 962                        sa_convert_path_opa_to_ib(&ib, rec);
 963                        ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
 964
 965                } else {
 966                        ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
 967                }
 968        }
 969
 970        if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
 971                ret = -EFAULT;
 972
 973        kfree(resp);
 974        return ret;
 975}
 976
 977static ssize_t ucma_query_gid(struct ucma_context *ctx,
 978                              void __user *response, int out_len)
 979{
 980        struct rdma_ucm_query_addr_resp resp;
 981        struct sockaddr_ib *addr;
 982        int ret = 0;
 983
 984        if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
 985                return -ENOSPC;
 986
 987        memset(&resp, 0, sizeof resp);
 988
 989        ucma_query_device_addr(ctx->cm_id, &resp);
 990
 991        addr = (struct sockaddr_ib *) &resp.src_addr;
 992        resp.src_size = sizeof(*addr);
 993        if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
 994                memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
 995        } else {
 996                addr->sib_family = AF_IB;
 997                addr->sib_pkey = (__force __be16) resp.pkey;
 998                rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
 999                               NULL);
1000                addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
1001                                                    &ctx->cm_id->route.addr.src_addr);
1002        }
1003
1004        addr = (struct sockaddr_ib *) &resp.dst_addr;
1005        resp.dst_size = sizeof(*addr);
1006        if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
1007                memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
1008        } else {
1009                addr->sib_family = AF_IB;
1010                addr->sib_pkey = (__force __be16) resp.pkey;
1011                rdma_read_gids(ctx->cm_id, NULL,
1012                               (union ib_gid *)&addr->sib_addr);
1013                addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
1014                                                    &ctx->cm_id->route.addr.dst_addr);
1015        }
1016
1017        if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
1018                ret = -EFAULT;
1019
1020        return ret;
1021}
1022
1023static ssize_t ucma_query(struct ucma_file *file,
1024                          const char __user *inbuf,
1025                          int in_len, int out_len)
1026{
1027        struct rdma_ucm_query cmd;
1028        struct ucma_context *ctx;
1029        void __user *response;
1030        int ret;
1031
1032        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1033                return -EFAULT;
1034
1035        response = u64_to_user_ptr(cmd.response);
1036        ctx = ucma_get_ctx(file, cmd.id);
1037        if (IS_ERR(ctx))
1038                return PTR_ERR(ctx);
1039
1040        mutex_lock(&ctx->mutex);
1041        switch (cmd.option) {
1042        case RDMA_USER_CM_QUERY_ADDR:
1043                ret = ucma_query_addr(ctx, response, out_len);
1044                break;
1045        case RDMA_USER_CM_QUERY_PATH:
1046                ret = ucma_query_path(ctx, response, out_len);
1047                break;
1048        case RDMA_USER_CM_QUERY_GID:
1049                ret = ucma_query_gid(ctx, response, out_len);
1050                break;
1051        default:
1052                ret = -ENOSYS;
1053                break;
1054        }
1055        mutex_unlock(&ctx->mutex);
1056
1057        ucma_put_ctx(ctx);
1058        return ret;
1059}
1060
1061static void ucma_copy_conn_param(struct rdma_cm_id *id,
1062                                 struct rdma_conn_param *dst,
1063                                 struct rdma_ucm_conn_param *src)
1064{
1065        dst->private_data = src->private_data;
1066        dst->private_data_len = src->private_data_len;
1067        dst->responder_resources =src->responder_resources;
1068        dst->initiator_depth = src->initiator_depth;
1069        dst->flow_control = src->flow_control;
1070        dst->retry_count = src->retry_count;
1071        dst->rnr_retry_count = src->rnr_retry_count;
1072        dst->srq = src->srq;
1073        dst->qp_num = src->qp_num & 0xFFFFFF;
1074        dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1075}
1076
1077static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1078                            int in_len, int out_len)
1079{
1080        struct rdma_conn_param conn_param;
1081        struct rdma_ucm_ece ece = {};
1082        struct rdma_ucm_connect cmd;
1083        struct ucma_context *ctx;
1084        size_t in_size;
1085        int ret;
1086
1087        if (in_len < offsetofend(typeof(cmd), reserved))
1088                return -EINVAL;
1089        in_size = min_t(size_t, in_len, sizeof(cmd));
1090        if (copy_from_user(&cmd, inbuf, in_size))
1091                return -EFAULT;
1092
1093        if (!cmd.conn_param.valid)
1094                return -EINVAL;
1095
1096        ctx = ucma_get_ctx_dev(file, cmd.id);
1097        if (IS_ERR(ctx))
1098                return PTR_ERR(ctx);
1099
1100        ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1101        if (offsetofend(typeof(cmd), ece) <= in_size) {
1102                ece.vendor_id = cmd.ece.vendor_id;
1103                ece.attr_mod = cmd.ece.attr_mod;
1104        }
1105
1106        mutex_lock(&ctx->mutex);
1107        ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
1108        mutex_unlock(&ctx->mutex);
1109        ucma_put_ctx(ctx);
1110        return ret;
1111}
1112
1113static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1114                           int in_len, int out_len)
1115{
1116        struct rdma_ucm_listen cmd;
1117        struct ucma_context *ctx;
1118        int ret;
1119
1120        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1121                return -EFAULT;
1122
1123        ctx = ucma_get_ctx(file, cmd.id);
1124        if (IS_ERR(ctx))
1125                return PTR_ERR(ctx);
1126
1127        ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1128                       cmd.backlog : max_backlog;
1129        mutex_lock(&ctx->mutex);
1130        ret = rdma_listen(ctx->cm_id, ctx->backlog);
1131        mutex_unlock(&ctx->mutex);
1132        ucma_put_ctx(ctx);
1133        return ret;
1134}
1135
1136static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1137                           int in_len, int out_len)
1138{
1139        struct rdma_ucm_accept cmd;
1140        struct rdma_conn_param conn_param;
1141        struct rdma_ucm_ece ece = {};
1142        struct ucma_context *ctx;
1143        size_t in_size;
1144        int ret;
1145
1146        if (in_len < offsetofend(typeof(cmd), reserved))
1147                return -EINVAL;
1148        in_size = min_t(size_t, in_len, sizeof(cmd));
1149        if (copy_from_user(&cmd, inbuf, in_size))
1150                return -EFAULT;
1151
1152        ctx = ucma_get_ctx_dev(file, cmd.id);
1153        if (IS_ERR(ctx))
1154                return PTR_ERR(ctx);
1155
1156        if (offsetofend(typeof(cmd), ece) <= in_size) {
1157                ece.vendor_id = cmd.ece.vendor_id;
1158                ece.attr_mod = cmd.ece.attr_mod;
1159        }
1160
1161        if (cmd.conn_param.valid) {
1162                ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1163                mutex_lock(&file->mut);
1164                mutex_lock(&ctx->mutex);
1165                ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
1166                mutex_unlock(&ctx->mutex);
1167                if (!ret)
1168                        ctx->uid = cmd.uid;
1169                mutex_unlock(&file->mut);
1170        } else {
1171                mutex_lock(&ctx->mutex);
1172                ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
1173                mutex_unlock(&ctx->mutex);
1174        }
1175        ucma_put_ctx(ctx);
1176        return ret;
1177}
1178
1179static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1180                           int in_len, int out_len)
1181{
1182        struct rdma_ucm_reject cmd;
1183        struct ucma_context *ctx;
1184        int ret;
1185
1186        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1187                return -EFAULT;
1188
1189        if (!cmd.reason)
1190                cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
1191
1192        switch (cmd.reason) {
1193        case IB_CM_REJ_CONSUMER_DEFINED:
1194        case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
1195                break;
1196        default:
1197                return -EINVAL;
1198        }
1199
1200        ctx = ucma_get_ctx_dev(file, cmd.id);
1201        if (IS_ERR(ctx))
1202                return PTR_ERR(ctx);
1203
1204        mutex_lock(&ctx->mutex);
1205        ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
1206                          cmd.reason);
1207        mutex_unlock(&ctx->mutex);
1208        ucma_put_ctx(ctx);
1209        return ret;
1210}
1211
1212static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1213                               int in_len, int out_len)
1214{
1215        struct rdma_ucm_disconnect cmd;
1216        struct ucma_context *ctx;
1217        int ret;
1218
1219        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1220                return -EFAULT;
1221
1222        ctx = ucma_get_ctx_dev(file, cmd.id);
1223        if (IS_ERR(ctx))
1224                return PTR_ERR(ctx);
1225
1226        mutex_lock(&ctx->mutex);
1227        ret = rdma_disconnect(ctx->cm_id);
1228        mutex_unlock(&ctx->mutex);
1229        ucma_put_ctx(ctx);
1230        return ret;
1231}
1232
1233static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1234                                 const char __user *inbuf,
1235                                 int in_len, int out_len)
1236{
1237        struct rdma_ucm_init_qp_attr cmd;
1238        struct ib_uverbs_qp_attr resp;
1239        struct ucma_context *ctx;
1240        struct ib_qp_attr qp_attr;
1241        int ret;
1242
1243        if (out_len < sizeof(resp))
1244                return -ENOSPC;
1245
1246        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1247                return -EFAULT;
1248
1249        if (cmd.qp_state > IB_QPS_ERR)
1250                return -EINVAL;
1251
1252        ctx = ucma_get_ctx_dev(file, cmd.id);
1253        if (IS_ERR(ctx))
1254                return PTR_ERR(ctx);
1255
1256        resp.qp_attr_mask = 0;
1257        memset(&qp_attr, 0, sizeof qp_attr);
1258        qp_attr.qp_state = cmd.qp_state;
1259        mutex_lock(&ctx->mutex);
1260        ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1261        mutex_unlock(&ctx->mutex);
1262        if (ret)
1263                goto out;
1264
1265        ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1266        if (copy_to_user(u64_to_user_ptr(cmd.response),
1267                         &resp, sizeof(resp)))
1268                ret = -EFAULT;
1269
1270out:
1271        ucma_put_ctx(ctx);
1272        return ret;
1273}
1274
1275static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1276                              void *optval, size_t optlen)
1277{
1278        int ret = 0;
1279
1280        switch (optname) {
1281        case RDMA_OPTION_ID_TOS:
1282                if (optlen != sizeof(u8)) {
1283                        ret = -EINVAL;
1284                        break;
1285                }
1286                rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1287                break;
1288        case RDMA_OPTION_ID_REUSEADDR:
1289                if (optlen != sizeof(int)) {
1290                        ret = -EINVAL;
1291                        break;
1292                }
1293                ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1294                break;
1295        case RDMA_OPTION_ID_AFONLY:
1296                if (optlen != sizeof(int)) {
1297                        ret = -EINVAL;
1298                        break;
1299                }
1300                ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1301                break;
1302        case RDMA_OPTION_ID_ACK_TIMEOUT:
1303                if (optlen != sizeof(u8)) {
1304                        ret = -EINVAL;
1305                        break;
1306                }
1307                ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1308                break;
1309        default:
1310                ret = -ENOSYS;
1311        }
1312
1313        return ret;
1314}
1315
1316static int ucma_set_ib_path(struct ucma_context *ctx,
1317                            struct ib_path_rec_data *path_data, size_t optlen)
1318{
1319        struct sa_path_rec sa_path;
1320        struct rdma_cm_event event;
1321        int ret;
1322
1323        if (optlen % sizeof(*path_data))
1324                return -EINVAL;
1325
1326        for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1327                if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1328                                         IB_PATH_BIDIRECTIONAL))
1329                        break;
1330        }
1331
1332        if (!optlen)
1333                return -EINVAL;
1334
1335        if (!ctx->cm_id->device)
1336                return -EINVAL;
1337
1338        memset(&sa_path, 0, sizeof(sa_path));
1339
1340        sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1341        ib_sa_unpack_path(path_data->path_rec, &sa_path);
1342
1343        if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1344                struct sa_path_rec opa;
1345
1346                sa_convert_path_ib_to_opa(&opa, &sa_path);
1347                mutex_lock(&ctx->mutex);
1348                ret = rdma_set_ib_path(ctx->cm_id, &opa);
1349                mutex_unlock(&ctx->mutex);
1350        } else {
1351                mutex_lock(&ctx->mutex);
1352                ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1353                mutex_unlock(&ctx->mutex);
1354        }
1355        if (ret)
1356                return ret;
1357
1358        memset(&event, 0, sizeof event);
1359        event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1360        return ucma_event_handler(ctx->cm_id, &event);
1361}
1362
1363static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1364                              void *optval, size_t optlen)
1365{
1366        int ret;
1367
1368        switch (optname) {
1369        case RDMA_OPTION_IB_PATH:
1370                ret = ucma_set_ib_path(ctx, optval, optlen);
1371                break;
1372        default:
1373                ret = -ENOSYS;
1374        }
1375
1376        return ret;
1377}
1378
1379static int ucma_set_option_level(struct ucma_context *ctx, int level,
1380                                 int optname, void *optval, size_t optlen)
1381{
1382        int ret;
1383
1384        switch (level) {
1385        case RDMA_OPTION_ID:
1386                mutex_lock(&ctx->mutex);
1387                ret = ucma_set_option_id(ctx, optname, optval, optlen);
1388                mutex_unlock(&ctx->mutex);
1389                break;
1390        case RDMA_OPTION_IB:
1391                ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1392                break;
1393        default:
1394                ret = -ENOSYS;
1395        }
1396
1397        return ret;
1398}
1399
1400static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1401                               int in_len, int out_len)
1402{
1403        struct rdma_ucm_set_option cmd;
1404        struct ucma_context *ctx;
1405        void *optval;
1406        int ret;
1407
1408        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1409                return -EFAULT;
1410
1411        if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1412                return -EINVAL;
1413
1414        ctx = ucma_get_ctx(file, cmd.id);
1415        if (IS_ERR(ctx))
1416                return PTR_ERR(ctx);
1417
1418        optval = memdup_user(u64_to_user_ptr(cmd.optval),
1419                             cmd.optlen);
1420        if (IS_ERR(optval)) {
1421                ret = PTR_ERR(optval);
1422                goto out;
1423        }
1424
1425        ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1426                                    cmd.optlen);
1427        kfree(optval);
1428
1429out:
1430        ucma_put_ctx(ctx);
1431        return ret;
1432}
1433
1434static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1435                           int in_len, int out_len)
1436{
1437        struct rdma_ucm_notify cmd;
1438        struct ucma_context *ctx;
1439        int ret = -EINVAL;
1440
1441        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1442                return -EFAULT;
1443
1444        ctx = ucma_get_ctx(file, cmd.id);
1445        if (IS_ERR(ctx))
1446                return PTR_ERR(ctx);
1447
1448        mutex_lock(&ctx->mutex);
1449        if (ctx->cm_id->device)
1450                ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1451        mutex_unlock(&ctx->mutex);
1452
1453        ucma_put_ctx(ctx);
1454        return ret;
1455}
1456
1457static ssize_t ucma_process_join(struct ucma_file *file,
1458                                 struct rdma_ucm_join_mcast *cmd,  int out_len)
1459{
1460        struct rdma_ucm_create_id_resp resp;
1461        struct ucma_context *ctx;
1462        struct ucma_multicast *mc;
1463        struct sockaddr *addr;
1464        int ret;
1465        u8 join_state;
1466
1467        if (out_len < sizeof(resp))
1468                return -ENOSPC;
1469
1470        addr = (struct sockaddr *) &cmd->addr;
1471        if (cmd->addr_size != rdma_addr_size(addr))
1472                return -EINVAL;
1473
1474        if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1475                join_state = BIT(FULLMEMBER_JOIN);
1476        else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1477                join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1478        else
1479                return -EINVAL;
1480
1481        ctx = ucma_get_ctx_dev(file, cmd->id);
1482        if (IS_ERR(ctx))
1483                return PTR_ERR(ctx);
1484
1485        mutex_lock(&file->mut);
1486        mc = ucma_alloc_multicast(ctx);
1487        if (!mc) {
1488                ret = -ENOMEM;
1489                goto err1;
1490        }
1491        mc->join_state = join_state;
1492        mc->uid = cmd->uid;
1493        memcpy(&mc->addr, addr, cmd->addr_size);
1494        mutex_lock(&ctx->mutex);
1495        ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1496                                  join_state, mc);
1497        mutex_unlock(&ctx->mutex);
1498        if (ret)
1499                goto err2;
1500
1501        resp.id = mc->id;
1502        if (copy_to_user(u64_to_user_ptr(cmd->response),
1503                         &resp, sizeof(resp))) {
1504                ret = -EFAULT;
1505                goto err3;
1506        }
1507
1508        xa_store(&multicast_table, mc->id, mc, 0);
1509
1510        mutex_unlock(&file->mut);
1511        ucma_put_ctx(ctx);
1512        return 0;
1513
1514err3:
1515        rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1516        ucma_cleanup_mc_events(mc);
1517err2:
1518        xa_erase(&multicast_table, mc->id);
1519        list_del(&mc->list);
1520        kfree(mc);
1521err1:
1522        mutex_unlock(&file->mut);
1523        ucma_put_ctx(ctx);
1524        return ret;
1525}
1526
1527static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1528                                      const char __user *inbuf,
1529                                      int in_len, int out_len)
1530{
1531        struct rdma_ucm_join_ip_mcast cmd;
1532        struct rdma_ucm_join_mcast join_cmd;
1533
1534        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1535                return -EFAULT;
1536
1537        join_cmd.response = cmd.response;
1538        join_cmd.uid = cmd.uid;
1539        join_cmd.id = cmd.id;
1540        join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1541        if (!join_cmd.addr_size)
1542                return -EINVAL;
1543
1544        join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1545        memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1546
1547        return ucma_process_join(file, &join_cmd, out_len);
1548}
1549
1550static ssize_t ucma_join_multicast(struct ucma_file *file,
1551                                   const char __user *inbuf,
1552                                   int in_len, int out_len)
1553{
1554        struct rdma_ucm_join_mcast cmd;
1555
1556        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1557                return -EFAULT;
1558
1559        if (!rdma_addr_size_kss(&cmd.addr))
1560                return -EINVAL;
1561
1562        return ucma_process_join(file, &cmd, out_len);
1563}
1564
1565static ssize_t ucma_leave_multicast(struct ucma_file *file,
1566                                    const char __user *inbuf,
1567                                    int in_len, int out_len)
1568{
1569        struct rdma_ucm_destroy_id cmd;
1570        struct rdma_ucm_destroy_id_resp resp;
1571        struct ucma_multicast *mc;
1572        int ret = 0;
1573
1574        if (out_len < sizeof(resp))
1575                return -ENOSPC;
1576
1577        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1578                return -EFAULT;
1579
1580        xa_lock(&multicast_table);
1581        mc = xa_load(&multicast_table, cmd.id);
1582        if (!mc)
1583                mc = ERR_PTR(-ENOENT);
1584        else if (mc->ctx->file != file)
1585                mc = ERR_PTR(-EINVAL);
1586        else if (!refcount_inc_not_zero(&mc->ctx->ref))
1587                mc = ERR_PTR(-ENXIO);
1588        else
1589                __xa_erase(&multicast_table, mc->id);
1590        xa_unlock(&multicast_table);
1591
1592        if (IS_ERR(mc)) {
1593                ret = PTR_ERR(mc);
1594                goto out;
1595        }
1596
1597        mutex_lock(&mc->ctx->mutex);
1598        rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1599        mutex_unlock(&mc->ctx->mutex);
1600
1601        mutex_lock(&mc->ctx->file->mut);
1602        ucma_cleanup_mc_events(mc);
1603        list_del(&mc->list);
1604        mutex_unlock(&mc->ctx->file->mut);
1605
1606        ucma_put_ctx(mc->ctx);
1607        resp.events_reported = mc->events_reported;
1608        kfree(mc);
1609
1610        if (copy_to_user(u64_to_user_ptr(cmd.response),
1611                         &resp, sizeof(resp)))
1612                ret = -EFAULT;
1613out:
1614        return ret;
1615}
1616
1617static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1618{
1619        /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1620        if (file1 < file2) {
1621                mutex_lock(&file1->mut);
1622                mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1623        } else {
1624                mutex_lock(&file2->mut);
1625                mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1626        }
1627}
1628
1629static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1630{
1631        if (file1 < file2) {
1632                mutex_unlock(&file2->mut);
1633                mutex_unlock(&file1->mut);
1634        } else {
1635                mutex_unlock(&file1->mut);
1636                mutex_unlock(&file2->mut);
1637        }
1638}
1639
1640static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1641{
1642        struct ucma_event *uevent, *tmp;
1643
1644        list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1645                if (uevent->ctx == ctx)
1646                        list_move_tail(&uevent->list, &file->event_list);
1647}
1648
1649static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1650                               const char __user *inbuf,
1651                               int in_len, int out_len)
1652{
1653        struct rdma_ucm_migrate_id cmd;
1654        struct rdma_ucm_migrate_resp resp;
1655        struct ucma_context *ctx;
1656        struct fd f;
1657        struct ucma_file *cur_file;
1658        int ret = 0;
1659
1660        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1661                return -EFAULT;
1662
1663        /* Get current fd to protect against it being closed */
1664        f = fdget(cmd.fd);
1665        if (!f.file)
1666                return -ENOENT;
1667        if (f.file->f_op != &ucma_fops) {
1668                ret = -EINVAL;
1669                goto file_put;
1670        }
1671
1672        /* Validate current fd and prevent destruction of id. */
1673        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1674        if (IS_ERR(ctx)) {
1675                ret = PTR_ERR(ctx);
1676                goto file_put;
1677        }
1678
1679        cur_file = ctx->file;
1680        if (cur_file == new_file) {
1681                resp.events_reported = ctx->events_reported;
1682                goto response;
1683        }
1684
1685        /*
1686         * Migrate events between fd's, maintaining order, and avoiding new
1687         * events being added before existing events.
1688         */
1689        ucma_lock_files(cur_file, new_file);
1690        xa_lock(&ctx_table);
1691
1692        list_move_tail(&ctx->list, &new_file->ctx_list);
1693        ucma_move_events(ctx, new_file);
1694        ctx->file = new_file;
1695        resp.events_reported = ctx->events_reported;
1696
1697        xa_unlock(&ctx_table);
1698        ucma_unlock_files(cur_file, new_file);
1699
1700response:
1701        if (copy_to_user(u64_to_user_ptr(cmd.response),
1702                         &resp, sizeof(resp)))
1703                ret = -EFAULT;
1704
1705        ucma_put_ctx(ctx);
1706file_put:
1707        fdput(f);
1708        return ret;
1709}
1710
1711static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1712                                   const char __user *inbuf,
1713                                   int in_len, int out_len) = {
1714        [RDMA_USER_CM_CMD_CREATE_ID]     = ucma_create_id,
1715        [RDMA_USER_CM_CMD_DESTROY_ID]    = ucma_destroy_id,
1716        [RDMA_USER_CM_CMD_BIND_IP]       = ucma_bind_ip,
1717        [RDMA_USER_CM_CMD_RESOLVE_IP]    = ucma_resolve_ip,
1718        [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1719        [RDMA_USER_CM_CMD_QUERY_ROUTE]   = ucma_query_route,
1720        [RDMA_USER_CM_CMD_CONNECT]       = ucma_connect,
1721        [RDMA_USER_CM_CMD_LISTEN]        = ucma_listen,
1722        [RDMA_USER_CM_CMD_ACCEPT]        = ucma_accept,
1723        [RDMA_USER_CM_CMD_REJECT]        = ucma_reject,
1724        [RDMA_USER_CM_CMD_DISCONNECT]    = ucma_disconnect,
1725        [RDMA_USER_CM_CMD_INIT_QP_ATTR]  = ucma_init_qp_attr,
1726        [RDMA_USER_CM_CMD_GET_EVENT]     = ucma_get_event,
1727        [RDMA_USER_CM_CMD_GET_OPTION]    = NULL,
1728        [RDMA_USER_CM_CMD_SET_OPTION]    = ucma_set_option,
1729        [RDMA_USER_CM_CMD_NOTIFY]        = ucma_notify,
1730        [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1731        [RDMA_USER_CM_CMD_LEAVE_MCAST]   = ucma_leave_multicast,
1732        [RDMA_USER_CM_CMD_MIGRATE_ID]    = ucma_migrate_id,
1733        [RDMA_USER_CM_CMD_QUERY]         = ucma_query,
1734        [RDMA_USER_CM_CMD_BIND]          = ucma_bind,
1735        [RDMA_USER_CM_CMD_RESOLVE_ADDR]  = ucma_resolve_addr,
1736        [RDMA_USER_CM_CMD_JOIN_MCAST]    = ucma_join_multicast
1737};
1738
1739static ssize_t ucma_write(struct file *filp, const char __user *buf,
1740                          size_t len, loff_t *pos)
1741{
1742        struct ucma_file *file = filp->private_data;
1743        struct rdma_ucm_cmd_hdr hdr;
1744        ssize_t ret;
1745
1746        if (!ib_safe_file_access(filp)) {
1747                pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1748                            task_tgid_vnr(current), current->comm);
1749                return -EACCES;
1750        }
1751
1752        if (len < sizeof(hdr))
1753                return -EINVAL;
1754
1755        if (copy_from_user(&hdr, buf, sizeof(hdr)))
1756                return -EFAULT;
1757
1758        if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1759                return -EINVAL;
1760        hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1761
1762        if (hdr.in + sizeof(hdr) > len)
1763                return -EINVAL;
1764
1765        if (!ucma_cmd_table[hdr.cmd])
1766                return -ENOSYS;
1767
1768        ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1769        if (!ret)
1770                ret = len;
1771
1772        return ret;
1773}
1774
1775static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1776{
1777        struct ucma_file *file = filp->private_data;
1778        __poll_t mask = 0;
1779
1780        poll_wait(filp, &file->poll_wait, wait);
1781
1782        if (!list_empty(&file->event_list))
1783                mask = EPOLLIN | EPOLLRDNORM;
1784
1785        return mask;
1786}
1787
1788/*
1789 * ucma_open() does not need the BKL:
1790 *
1791 *  - no global state is referred to;
1792 *  - there is no ioctl method to race against;
1793 *  - no further module initialization is required for open to work
1794 *    after the device is registered.
1795 */
1796static int ucma_open(struct inode *inode, struct file *filp)
1797{
1798        struct ucma_file *file;
1799
1800        file = kmalloc(sizeof *file, GFP_KERNEL);
1801        if (!file)
1802                return -ENOMEM;
1803
1804        file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1805                                                 WQ_MEM_RECLAIM);
1806        if (!file->close_wq) {
1807                kfree(file);
1808                return -ENOMEM;
1809        }
1810
1811        INIT_LIST_HEAD(&file->event_list);
1812        INIT_LIST_HEAD(&file->ctx_list);
1813        init_waitqueue_head(&file->poll_wait);
1814        mutex_init(&file->mut);
1815
1816        filp->private_data = file;
1817        file->filp = filp;
1818
1819        return stream_open(inode, filp);
1820}
1821
1822static int ucma_close(struct inode *inode, struct file *filp)
1823{
1824        struct ucma_file *file = filp->private_data;
1825        struct ucma_context *ctx, *tmp;
1826
1827        mutex_lock(&file->mut);
1828        list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1829                ctx->destroying = 1;
1830                mutex_unlock(&file->mut);
1831
1832                xa_erase(&ctx_table, ctx->id);
1833                flush_workqueue(file->close_wq);
1834                /* At that step once ctx was marked as destroying and workqueue
1835                 * was flushed we are safe from any inflights handlers that
1836                 * might put other closing task.
1837                 */
1838                xa_lock(&ctx_table);
1839                if (!ctx->closing) {
1840                        xa_unlock(&ctx_table);
1841                        ucma_put_ctx(ctx);
1842                        wait_for_completion(&ctx->comp);
1843                        /* rdma_destroy_id ensures that no event handlers are
1844                         * inflight for that id before releasing it.
1845                         */
1846                        rdma_destroy_id(ctx->cm_id);
1847                } else {
1848                        xa_unlock(&ctx_table);
1849                }
1850
1851                ucma_free_ctx(ctx);
1852                mutex_lock(&file->mut);
1853        }
1854        mutex_unlock(&file->mut);
1855        destroy_workqueue(file->close_wq);
1856        kfree(file);
1857        return 0;
1858}
1859
1860static const struct file_operations ucma_fops = {
1861        .owner   = THIS_MODULE,
1862        .open    = ucma_open,
1863        .release = ucma_close,
1864        .write   = ucma_write,
1865        .poll    = ucma_poll,
1866        .llseek  = no_llseek,
1867};
1868
1869static struct miscdevice ucma_misc = {
1870        .minor          = MISC_DYNAMIC_MINOR,
1871        .name           = "rdma_cm",
1872        .nodename       = "infiniband/rdma_cm",
1873        .mode           = 0666,
1874        .fops           = &ucma_fops,
1875};
1876
1877static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
1878{
1879        res->abi = RDMA_USER_CM_ABI_VERSION;
1880        res->cdev = ucma_misc.this_device;
1881        return 0;
1882}
1883
1884static struct ib_client rdma_cma_client = {
1885        .name = "rdma_cm",
1886        .get_global_nl_info = ucma_get_global_nl_info,
1887};
1888MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
1889
1890static ssize_t show_abi_version(struct device *dev,
1891                                struct device_attribute *attr,
1892                                char *buf)
1893{
1894        return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1895}
1896static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1897
1898static int __init ucma_init(void)
1899{
1900        int ret;
1901
1902        ret = misc_register(&ucma_misc);
1903        if (ret)
1904                return ret;
1905
1906        ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1907        if (ret) {
1908                pr_err("rdma_ucm: couldn't create abi_version attr\n");
1909                goto err1;
1910        }
1911
1912        ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1913        if (!ucma_ctl_table_hdr) {
1914                pr_err("rdma_ucm: couldn't register sysctl paths\n");
1915                ret = -ENOMEM;
1916                goto err2;
1917        }
1918
1919        ret = ib_register_client(&rdma_cma_client);
1920        if (ret)
1921                goto err3;
1922
1923        return 0;
1924err3:
1925        unregister_net_sysctl_table(ucma_ctl_table_hdr);
1926err2:
1927        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1928err1:
1929        misc_deregister(&ucma_misc);
1930        return ret;
1931}
1932
1933static void __exit ucma_cleanup(void)
1934{
1935        ib_unregister_client(&rdma_cma_client);
1936        unregister_net_sysctl_table(ucma_ctl_table_hdr);
1937        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1938        misc_deregister(&ucma_misc);
1939}
1940
1941module_init(ucma_init);
1942module_exit(ucma_cleanup);
1943