linux/drivers/infiniband/core/ucma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *      copyright notice, this list of conditions and the following
  16 *      disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *      copyright notice, this list of conditions and the following
  20 *      disclaimer in the documentation and/or other materials
  21 *      provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/completion.h>
  34#include <linux/file.h>
  35#include <linux/mutex.h>
  36#include <linux/poll.h>
  37#include <linux/sched.h>
  38#include <linux/idr.h>
  39#include <linux/in.h>
  40#include <linux/in6.h>
  41#include <linux/miscdevice.h>
  42#include <linux/slab.h>
  43#include <linux/sysctl.h>
  44#include <linux/module.h>
  45#include <linux/nsproxy.h>
  46
  47#include <rdma/rdma_user_cm.h>
  48#include <rdma/ib_marshall.h>
  49#include <rdma/rdma_cm.h>
  50#include <rdma/rdma_cm_ib.h>
  51#include <rdma/ib_addr.h>
  52#include <rdma/ib.h>
  53
  54MODULE_AUTHOR("Sean Hefty");
  55MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  56MODULE_LICENSE("Dual BSD/GPL");
  57
  58static unsigned int max_backlog = 1024;
  59
  60static struct ctl_table_header *ucma_ctl_table_hdr;
  61static struct ctl_table ucma_ctl_table[] = {
  62        {
  63                .procname       = "max_backlog",
  64                .data           = &max_backlog,
  65                .maxlen         = sizeof max_backlog,
  66                .mode           = 0644,
  67                .proc_handler   = proc_dointvec,
  68        },
  69        { }
  70};
  71
  72struct ucma_file {
  73        struct mutex            mut;
  74        struct file             *filp;
  75        struct list_head        ctx_list;
  76        struct list_head        event_list;
  77        wait_queue_head_t       poll_wait;
  78        struct workqueue_struct *close_wq;
  79};
  80
  81struct ucma_context {
  82        int                     id;
  83        struct completion       comp;
  84        atomic_t                ref;
  85        int                     events_reported;
  86        int                     backlog;
  87
  88        struct ucma_file        *file;
  89        struct rdma_cm_id       *cm_id;
  90        u64                     uid;
  91
  92        struct list_head        list;
  93        struct list_head        mc_list;
  94        /* mark that device is in process of destroying the internal HW
  95         * resources, protected by the global mut
  96         */
  97        int                     closing;
  98        /* sync between removal event and id destroy, protected by file mut */
  99        int                     destroying;
 100        struct work_struct      close_work;
 101};
 102
 103struct ucma_multicast {
 104        struct ucma_context     *ctx;
 105        int                     id;
 106        int                     events_reported;
 107
 108        u64                     uid;
 109        u8                      join_state;
 110        struct list_head        list;
 111        struct sockaddr_storage addr;
 112};
 113
 114struct ucma_event {
 115        struct ucma_context     *ctx;
 116        struct ucma_multicast   *mc;
 117        struct list_head        list;
 118        struct rdma_cm_id       *cm_id;
 119        struct rdma_ucm_event_resp resp;
 120        struct work_struct      close_work;
 121};
 122
 123static DEFINE_MUTEX(mut);
 124static DEFINE_IDR(ctx_idr);
 125static DEFINE_IDR(multicast_idr);
 126
 127static inline struct ucma_context *_ucma_find_context(int id,
 128                                                      struct ucma_file *file)
 129{
 130        struct ucma_context *ctx;
 131
 132        ctx = idr_find(&ctx_idr, id);
 133        if (!ctx)
 134                ctx = ERR_PTR(-ENOENT);
 135        else if (ctx->file != file)
 136                ctx = ERR_PTR(-EINVAL);
 137        return ctx;
 138}
 139
 140static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
 141{
 142        struct ucma_context *ctx;
 143
 144        mutex_lock(&mut);
 145        ctx = _ucma_find_context(id, file);
 146        if (!IS_ERR(ctx)) {
 147                if (ctx->closing)
 148                        ctx = ERR_PTR(-EIO);
 149                else
 150                        atomic_inc(&ctx->ref);
 151        }
 152        mutex_unlock(&mut);
 153        return ctx;
 154}
 155
 156static void ucma_put_ctx(struct ucma_context *ctx)
 157{
 158        if (atomic_dec_and_test(&ctx->ref))
 159                complete(&ctx->comp);
 160}
 161
 162static void ucma_close_event_id(struct work_struct *work)
 163{
 164        struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
 165
 166        rdma_destroy_id(uevent_close->cm_id);
 167        kfree(uevent_close);
 168}
 169
 170static void ucma_close_id(struct work_struct *work)
 171{
 172        struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
 173
 174        /* once all inflight tasks are finished, we close all underlying
 175         * resources. The context is still alive till its explicit destryoing
 176         * by its creator.
 177         */
 178        ucma_put_ctx(ctx);
 179        wait_for_completion(&ctx->comp);
 180        /* No new events will be generated after destroying the id. */
 181        rdma_destroy_id(ctx->cm_id);
 182}
 183
 184static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
 185{
 186        struct ucma_context *ctx;
 187
 188        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 189        if (!ctx)
 190                return NULL;
 191
 192        INIT_WORK(&ctx->close_work, ucma_close_id);
 193        atomic_set(&ctx->ref, 1);
 194        init_completion(&ctx->comp);
 195        INIT_LIST_HEAD(&ctx->mc_list);
 196        ctx->file = file;
 197
 198        mutex_lock(&mut);
 199        ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
 200        mutex_unlock(&mut);
 201        if (ctx->id < 0)
 202                goto error;
 203
 204        list_add_tail(&ctx->list, &file->ctx_list);
 205        return ctx;
 206
 207error:
 208        kfree(ctx);
 209        return NULL;
 210}
 211
 212static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
 213{
 214        struct ucma_multicast *mc;
 215
 216        mc = kzalloc(sizeof(*mc), GFP_KERNEL);
 217        if (!mc)
 218                return NULL;
 219
 220        mutex_lock(&mut);
 221        mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
 222        mutex_unlock(&mut);
 223        if (mc->id < 0)
 224                goto error;
 225
 226        mc->ctx = ctx;
 227        list_add_tail(&mc->list, &ctx->mc_list);
 228        return mc;
 229
 230error:
 231        kfree(mc);
 232        return NULL;
 233}
 234
 235static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
 236                                 struct rdma_conn_param *src)
 237{
 238        if (src->private_data_len)
 239                memcpy(dst->private_data, src->private_data,
 240                       src->private_data_len);
 241        dst->private_data_len = src->private_data_len;
 242        dst->responder_resources =src->responder_resources;
 243        dst->initiator_depth = src->initiator_depth;
 244        dst->flow_control = src->flow_control;
 245        dst->retry_count = src->retry_count;
 246        dst->rnr_retry_count = src->rnr_retry_count;
 247        dst->srq = src->srq;
 248        dst->qp_num = src->qp_num;
 249}
 250
 251static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
 252                               struct rdma_ud_param *src)
 253{
 254        if (src->private_data_len)
 255                memcpy(dst->private_data, src->private_data,
 256                       src->private_data_len);
 257        dst->private_data_len = src->private_data_len;
 258        ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
 259        dst->qp_num = src->qp_num;
 260        dst->qkey = src->qkey;
 261}
 262
 263static void ucma_set_event_context(struct ucma_context *ctx,
 264                                   struct rdma_cm_event *event,
 265                                   struct ucma_event *uevent)
 266{
 267        uevent->ctx = ctx;
 268        switch (event->event) {
 269        case RDMA_CM_EVENT_MULTICAST_JOIN:
 270        case RDMA_CM_EVENT_MULTICAST_ERROR:
 271                uevent->mc = (struct ucma_multicast *)
 272                             event->param.ud.private_data;
 273                uevent->resp.uid = uevent->mc->uid;
 274                uevent->resp.id = uevent->mc->id;
 275                break;
 276        default:
 277                uevent->resp.uid = ctx->uid;
 278                uevent->resp.id = ctx->id;
 279                break;
 280        }
 281}
 282
 283/* Called with file->mut locked for the relevant context. */
 284static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
 285{
 286        struct ucma_context *ctx = cm_id->context;
 287        struct ucma_event *con_req_eve;
 288        int event_found = 0;
 289
 290        if (ctx->destroying)
 291                return;
 292
 293        /* only if context is pointing to cm_id that it owns it and can be
 294         * queued to be closed, otherwise that cm_id is an inflight one that
 295         * is part of that context event list pending to be detached and
 296         * reattached to its new context as part of ucma_get_event,
 297         * handled separately below.
 298         */
 299        if (ctx->cm_id == cm_id) {
 300                mutex_lock(&mut);
 301                ctx->closing = 1;
 302                mutex_unlock(&mut);
 303                queue_work(ctx->file->close_wq, &ctx->close_work);
 304                return;
 305        }
 306
 307        list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
 308                if (con_req_eve->cm_id == cm_id &&
 309                    con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 310                        list_del(&con_req_eve->list);
 311                        INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
 312                        queue_work(ctx->file->close_wq, &con_req_eve->close_work);
 313                        event_found = 1;
 314                        break;
 315                }
 316        }
 317        if (!event_found)
 318                pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
 319}
 320
 321static int ucma_event_handler(struct rdma_cm_id *cm_id,
 322                              struct rdma_cm_event *event)
 323{
 324        struct ucma_event *uevent;
 325        struct ucma_context *ctx = cm_id->context;
 326        int ret = 0;
 327
 328        uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
 329        if (!uevent)
 330                return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
 331
 332        mutex_lock(&ctx->file->mut);
 333        uevent->cm_id = cm_id;
 334        ucma_set_event_context(ctx, event, uevent);
 335        uevent->resp.event = event->event;
 336        uevent->resp.status = event->status;
 337        if (cm_id->qp_type == IB_QPT_UD)
 338                ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
 339        else
 340                ucma_copy_conn_event(&uevent->resp.param.conn,
 341                                     &event->param.conn);
 342
 343        if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 344                if (!ctx->backlog) {
 345                        ret = -ENOMEM;
 346                        kfree(uevent);
 347                        goto out;
 348                }
 349                ctx->backlog--;
 350        } else if (!ctx->uid || ctx->cm_id != cm_id) {
 351                /*
 352                 * We ignore events for new connections until userspace has set
 353                 * their context.  This can only happen if an error occurs on a
 354                 * new connection before the user accepts it.  This is okay,
 355                 * since the accept will just fail later. However, we do need
 356                 * to release the underlying HW resources in case of a device
 357                 * removal event.
 358                 */
 359                if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
 360                        ucma_removal_event_handler(cm_id);
 361
 362                kfree(uevent);
 363                goto out;
 364        }
 365
 366        list_add_tail(&uevent->list, &ctx->file->event_list);
 367        wake_up_interruptible(&ctx->file->poll_wait);
 368        if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
 369                ucma_removal_event_handler(cm_id);
 370out:
 371        mutex_unlock(&ctx->file->mut);
 372        return ret;
 373}
 374
 375static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
 376                              int in_len, int out_len)
 377{
 378        struct ucma_context *ctx;
 379        struct rdma_ucm_get_event cmd;
 380        struct ucma_event *uevent;
 381        int ret = 0;
 382
 383        if (out_len < sizeof uevent->resp)
 384                return -ENOSPC;
 385
 386        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 387                return -EFAULT;
 388
 389        mutex_lock(&file->mut);
 390        while (list_empty(&file->event_list)) {
 391                mutex_unlock(&file->mut);
 392
 393                if (file->filp->f_flags & O_NONBLOCK)
 394                        return -EAGAIN;
 395
 396                if (wait_event_interruptible(file->poll_wait,
 397                                             !list_empty(&file->event_list)))
 398                        return -ERESTARTSYS;
 399
 400                mutex_lock(&file->mut);
 401        }
 402
 403        uevent = list_entry(file->event_list.next, struct ucma_event, list);
 404
 405        if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 406                ctx = ucma_alloc_ctx(file);
 407                if (!ctx) {
 408                        ret = -ENOMEM;
 409                        goto done;
 410                }
 411                uevent->ctx->backlog++;
 412                ctx->cm_id = uevent->cm_id;
 413                ctx->cm_id->context = ctx;
 414                uevent->resp.id = ctx->id;
 415        }
 416
 417        if (copy_to_user((void __user *)(unsigned long)cmd.response,
 418                         &uevent->resp, sizeof uevent->resp)) {
 419                ret = -EFAULT;
 420                goto done;
 421        }
 422
 423        list_del(&uevent->list);
 424        uevent->ctx->events_reported++;
 425        if (uevent->mc)
 426                uevent->mc->events_reported++;
 427        kfree(uevent);
 428done:
 429        mutex_unlock(&file->mut);
 430        return ret;
 431}
 432
 433static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
 434{
 435        switch (cmd->ps) {
 436        case RDMA_PS_TCP:
 437                *qp_type = IB_QPT_RC;
 438                return 0;
 439        case RDMA_PS_UDP:
 440        case RDMA_PS_IPOIB:
 441                *qp_type = IB_QPT_UD;
 442                return 0;
 443        case RDMA_PS_IB:
 444                *qp_type = cmd->qp_type;
 445                return 0;
 446        default:
 447                return -EINVAL;
 448        }
 449}
 450
 451static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
 452                              int in_len, int out_len)
 453{
 454        struct rdma_ucm_create_id cmd;
 455        struct rdma_ucm_create_id_resp resp;
 456        struct ucma_context *ctx;
 457        enum ib_qp_type qp_type;
 458        int ret;
 459
 460        if (out_len < sizeof(resp))
 461                return -ENOSPC;
 462
 463        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 464                return -EFAULT;
 465
 466        ret = ucma_get_qp_type(&cmd, &qp_type);
 467        if (ret)
 468                return ret;
 469
 470        mutex_lock(&file->mut);
 471        ctx = ucma_alloc_ctx(file);
 472        mutex_unlock(&file->mut);
 473        if (!ctx)
 474                return -ENOMEM;
 475
 476        ctx->uid = cmd.uid;
 477        ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
 478                                    ucma_event_handler, ctx, cmd.ps, qp_type);
 479        if (IS_ERR(ctx->cm_id)) {
 480                ret = PTR_ERR(ctx->cm_id);
 481                goto err1;
 482        }
 483
 484        resp.id = ctx->id;
 485        if (copy_to_user((void __user *)(unsigned long)cmd.response,
 486                         &resp, sizeof(resp))) {
 487                ret = -EFAULT;
 488                goto err2;
 489        }
 490        return 0;
 491
 492err2:
 493        rdma_destroy_id(ctx->cm_id);
 494err1:
 495        mutex_lock(&mut);
 496        idr_remove(&ctx_idr, ctx->id);
 497        mutex_unlock(&mut);
 498        kfree(ctx);
 499        return ret;
 500}
 501
 502static void ucma_cleanup_multicast(struct ucma_context *ctx)
 503{
 504        struct ucma_multicast *mc, *tmp;
 505
 506        mutex_lock(&mut);
 507        list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
 508                list_del(&mc->list);
 509                idr_remove(&multicast_idr, mc->id);
 510                kfree(mc);
 511        }
 512        mutex_unlock(&mut);
 513}
 514
 515static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
 516{
 517        struct ucma_event *uevent, *tmp;
 518
 519        list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
 520                if (uevent->mc != mc)
 521                        continue;
 522
 523                list_del(&uevent->list);
 524                kfree(uevent);
 525        }
 526}
 527
 528/*
 529 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
 530 * this point, no new events will be reported from the hardware. However, we
 531 * still need to cleanup the UCMA context for this ID. Specifically, there
 532 * might be events that have not yet been consumed by the user space software.
 533 * These might include pending connect requests which we have not completed
 534 * processing.  We cannot call rdma_destroy_id while holding the lock of the
 535 * context (file->mut), as it might cause a deadlock. We therefore extract all
 536 * relevant events from the context pending events list while holding the
 537 * mutex. After that we release them as needed.
 538 */
 539static int ucma_free_ctx(struct ucma_context *ctx)
 540{
 541        int events_reported;
 542        struct ucma_event *uevent, *tmp;
 543        LIST_HEAD(list);
 544
 545
 546        ucma_cleanup_multicast(ctx);
 547
 548        /* Cleanup events not yet reported to the user. */
 549        mutex_lock(&ctx->file->mut);
 550        list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
 551                if (uevent->ctx == ctx)
 552                        list_move_tail(&uevent->list, &list);
 553        }
 554        list_del(&ctx->list);
 555        mutex_unlock(&ctx->file->mut);
 556
 557        list_for_each_entry_safe(uevent, tmp, &list, list) {
 558                list_del(&uevent->list);
 559                if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
 560                        rdma_destroy_id(uevent->cm_id);
 561                kfree(uevent);
 562        }
 563
 564        events_reported = ctx->events_reported;
 565        kfree(ctx);
 566        return events_reported;
 567}
 568
 569static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
 570                               int in_len, int out_len)
 571{
 572        struct rdma_ucm_destroy_id cmd;
 573        struct rdma_ucm_destroy_id_resp resp;
 574        struct ucma_context *ctx;
 575        int ret = 0;
 576
 577        if (out_len < sizeof(resp))
 578                return -ENOSPC;
 579
 580        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 581                return -EFAULT;
 582
 583        mutex_lock(&mut);
 584        ctx = _ucma_find_context(cmd.id, file);
 585        if (!IS_ERR(ctx))
 586                idr_remove(&ctx_idr, ctx->id);
 587        mutex_unlock(&mut);
 588
 589        if (IS_ERR(ctx))
 590                return PTR_ERR(ctx);
 591
 592        mutex_lock(&ctx->file->mut);
 593        ctx->destroying = 1;
 594        mutex_unlock(&ctx->file->mut);
 595
 596        flush_workqueue(ctx->file->close_wq);
 597        /* At this point it's guaranteed that there is no inflight
 598         * closing task */
 599        mutex_lock(&mut);
 600        if (!ctx->closing) {
 601                mutex_unlock(&mut);
 602                ucma_put_ctx(ctx);
 603                wait_for_completion(&ctx->comp);
 604                rdma_destroy_id(ctx->cm_id);
 605        } else {
 606                mutex_unlock(&mut);
 607        }
 608
 609        resp.events_reported = ucma_free_ctx(ctx);
 610        if (copy_to_user((void __user *)(unsigned long)cmd.response,
 611                         &resp, sizeof(resp)))
 612                ret = -EFAULT;
 613
 614        return ret;
 615}
 616
 617static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
 618                              int in_len, int out_len)
 619{
 620        struct rdma_ucm_bind_ip cmd;
 621        struct ucma_context *ctx;
 622        int ret;
 623
 624        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 625                return -EFAULT;
 626
 627        ctx = ucma_get_ctx(file, cmd.id);
 628        if (IS_ERR(ctx))
 629                return PTR_ERR(ctx);
 630
 631        ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
 632        ucma_put_ctx(ctx);
 633        return ret;
 634}
 635
 636static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
 637                         int in_len, int out_len)
 638{
 639        struct rdma_ucm_bind cmd;
 640        struct sockaddr *addr;
 641        struct ucma_context *ctx;
 642        int ret;
 643
 644        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 645                return -EFAULT;
 646
 647        addr = (struct sockaddr *) &cmd.addr;
 648        if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
 649                return -EINVAL;
 650
 651        ctx = ucma_get_ctx(file, cmd.id);
 652        if (IS_ERR(ctx))
 653                return PTR_ERR(ctx);
 654
 655        ret = rdma_bind_addr(ctx->cm_id, addr);
 656        ucma_put_ctx(ctx);
 657        return ret;
 658}
 659
 660static ssize_t ucma_resolve_ip(struct ucma_file *file,
 661                               const char __user *inbuf,
 662                               int in_len, int out_len)
 663{
 664        struct rdma_ucm_resolve_ip cmd;
 665        struct ucma_context *ctx;
 666        int ret;
 667
 668        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 669                return -EFAULT;
 670
 671        ctx = ucma_get_ctx(file, cmd.id);
 672        if (IS_ERR(ctx))
 673                return PTR_ERR(ctx);
 674
 675        ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
 676                                (struct sockaddr *) &cmd.dst_addr,
 677                                cmd.timeout_ms);
 678        ucma_put_ctx(ctx);
 679        return ret;
 680}
 681
 682static ssize_t ucma_resolve_addr(struct ucma_file *file,
 683                                 const char __user *inbuf,
 684                                 int in_len, int out_len)
 685{
 686        struct rdma_ucm_resolve_addr cmd;
 687        struct sockaddr *src, *dst;
 688        struct ucma_context *ctx;
 689        int ret;
 690
 691        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 692                return -EFAULT;
 693
 694        src = (struct sockaddr *) &cmd.src_addr;
 695        dst = (struct sockaddr *) &cmd.dst_addr;
 696        if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
 697            !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
 698                return -EINVAL;
 699
 700        ctx = ucma_get_ctx(file, cmd.id);
 701        if (IS_ERR(ctx))
 702                return PTR_ERR(ctx);
 703
 704        ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
 705        ucma_put_ctx(ctx);
 706        return ret;
 707}
 708
 709static ssize_t ucma_resolve_route(struct ucma_file *file,
 710                                  const char __user *inbuf,
 711                                  int in_len, int out_len)
 712{
 713        struct rdma_ucm_resolve_route cmd;
 714        struct ucma_context *ctx;
 715        int ret;
 716
 717        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 718                return -EFAULT;
 719
 720        ctx = ucma_get_ctx(file, cmd.id);
 721        if (IS_ERR(ctx))
 722                return PTR_ERR(ctx);
 723
 724        ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
 725        ucma_put_ctx(ctx);
 726        return ret;
 727}
 728
 729static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
 730                               struct rdma_route *route)
 731{
 732        struct rdma_dev_addr *dev_addr;
 733
 734        resp->num_paths = route->num_paths;
 735        switch (route->num_paths) {
 736        case 0:
 737                dev_addr = &route->addr.dev_addr;
 738                rdma_addr_get_dgid(dev_addr,
 739                                   (union ib_gid *) &resp->ib_route[0].dgid);
 740                rdma_addr_get_sgid(dev_addr,
 741                                   (union ib_gid *) &resp->ib_route[0].sgid);
 742                resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
 743                break;
 744        case 2:
 745                ib_copy_path_rec_to_user(&resp->ib_route[1],
 746                                         &route->path_rec[1]);
 747                /* fall through */
 748        case 1:
 749                ib_copy_path_rec_to_user(&resp->ib_route[0],
 750                                         &route->path_rec[0]);
 751                break;
 752        default:
 753                break;
 754        }
 755}
 756
 757static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
 758                                 struct rdma_route *route)
 759{
 760
 761        resp->num_paths = route->num_paths;
 762        switch (route->num_paths) {
 763        case 0:
 764                rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
 765                            (union ib_gid *)&resp->ib_route[0].dgid);
 766                rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
 767                            (union ib_gid *)&resp->ib_route[0].sgid);
 768                resp->ib_route[0].pkey = cpu_to_be16(0xffff);
 769                break;
 770        case 2:
 771                ib_copy_path_rec_to_user(&resp->ib_route[1],
 772                                         &route->path_rec[1]);
 773                /* fall through */
 774        case 1:
 775                ib_copy_path_rec_to_user(&resp->ib_route[0],
 776                                         &route->path_rec[0]);
 777                break;
 778        default:
 779                break;
 780        }
 781}
 782
 783static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
 784                               struct rdma_route *route)
 785{
 786        struct rdma_dev_addr *dev_addr;
 787
 788        dev_addr = &route->addr.dev_addr;
 789        rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
 790        rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
 791}
 792
 793static ssize_t ucma_query_route(struct ucma_file *file,
 794                                const char __user *inbuf,
 795                                int in_len, int out_len)
 796{
 797        struct rdma_ucm_query cmd;
 798        struct rdma_ucm_query_route_resp resp;
 799        struct ucma_context *ctx;
 800        struct sockaddr *addr;
 801        int ret = 0;
 802
 803        if (out_len < sizeof(resp))
 804                return -ENOSPC;
 805
 806        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 807                return -EFAULT;
 808
 809        ctx = ucma_get_ctx(file, cmd.id);
 810        if (IS_ERR(ctx))
 811                return PTR_ERR(ctx);
 812
 813        memset(&resp, 0, sizeof resp);
 814        addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 815        memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
 816                                     sizeof(struct sockaddr_in) :
 817                                     sizeof(struct sockaddr_in6));
 818        addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 819        memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
 820                                     sizeof(struct sockaddr_in) :
 821                                     sizeof(struct sockaddr_in6));
 822        if (!ctx->cm_id->device)
 823                goto out;
 824
 825        resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 826        resp.port_num = ctx->cm_id->port_num;
 827
 828        if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
 829                ucma_copy_ib_route(&resp, &ctx->cm_id->route);
 830        else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
 831                ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
 832        else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
 833                ucma_copy_iw_route(&resp, &ctx->cm_id->route);
 834
 835out:
 836        if (copy_to_user((void __user *)(unsigned long)cmd.response,
 837                         &resp, sizeof(resp)))
 838                ret = -EFAULT;
 839
 840        ucma_put_ctx(ctx);
 841        return ret;
 842}
 843
 844static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
 845                                   struct rdma_ucm_query_addr_resp *resp)
 846{
 847        if (!cm_id->device)
 848                return;
 849
 850        resp->node_guid = (__force __u64) cm_id->device->node_guid;
 851        resp->port_num = cm_id->port_num;
 852        resp->pkey = (__force __u16) cpu_to_be16(
 853                     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
 854}
 855
 856static ssize_t ucma_query_addr(struct ucma_context *ctx,
 857                               void __user *response, int out_len)
 858{
 859        struct rdma_ucm_query_addr_resp resp;
 860        struct sockaddr *addr;
 861        int ret = 0;
 862
 863        if (out_len < sizeof(resp))
 864                return -ENOSPC;
 865
 866        memset(&resp, 0, sizeof resp);
 867
 868        addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 869        resp.src_size = rdma_addr_size(addr);
 870        memcpy(&resp.src_addr, addr, resp.src_size);
 871
 872        addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 873        resp.dst_size = rdma_addr_size(addr);
 874        memcpy(&resp.dst_addr, addr, resp.dst_size);
 875
 876        ucma_query_device_addr(ctx->cm_id, &resp);
 877
 878        if (copy_to_user(response, &resp, sizeof(resp)))
 879                ret = -EFAULT;
 880
 881        return ret;
 882}
 883
 884static ssize_t ucma_query_path(struct ucma_context *ctx,
 885                               void __user *response, int out_len)
 886{
 887        struct rdma_ucm_query_path_resp *resp;
 888        int i, ret = 0;
 889
 890        if (out_len < sizeof(*resp))
 891                return -ENOSPC;
 892
 893        resp = kzalloc(out_len, GFP_KERNEL);
 894        if (!resp)
 895                return -ENOMEM;
 896
 897        resp->num_paths = ctx->cm_id->route.num_paths;
 898        for (i = 0, out_len -= sizeof(*resp);
 899             i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
 900             i++, out_len -= sizeof(struct ib_path_rec_data)) {
 901
 902                resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
 903                                           IB_PATH_BIDIRECTIONAL;
 904                ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
 905                                &resp->path_data[i].path_rec);
 906        }
 907
 908        if (copy_to_user(response, resp,
 909                         sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
 910                ret = -EFAULT;
 911
 912        kfree(resp);
 913        return ret;
 914}
 915
 916static ssize_t ucma_query_gid(struct ucma_context *ctx,
 917                              void __user *response, int out_len)
 918{
 919        struct rdma_ucm_query_addr_resp resp;
 920        struct sockaddr_ib *addr;
 921        int ret = 0;
 922
 923        if (out_len < sizeof(resp))
 924                return -ENOSPC;
 925
 926        memset(&resp, 0, sizeof resp);
 927
 928        ucma_query_device_addr(ctx->cm_id, &resp);
 929
 930        addr = (struct sockaddr_ib *) &resp.src_addr;
 931        resp.src_size = sizeof(*addr);
 932        if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
 933                memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
 934        } else {
 935                addr->sib_family = AF_IB;
 936                addr->sib_pkey = (__force __be16) resp.pkey;
 937                rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
 938                                   (union ib_gid *) &addr->sib_addr);
 939                addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
 940                                                    &ctx->cm_id->route.addr.src_addr);
 941        }
 942
 943        addr = (struct sockaddr_ib *) &resp.dst_addr;
 944        resp.dst_size = sizeof(*addr);
 945        if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
 946                memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
 947        } else {
 948                addr->sib_family = AF_IB;
 949                addr->sib_pkey = (__force __be16) resp.pkey;
 950                rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
 951                                   (union ib_gid *) &addr->sib_addr);
 952                addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
 953                                                    &ctx->cm_id->route.addr.dst_addr);
 954        }
 955
 956        if (copy_to_user(response, &resp, sizeof(resp)))
 957                ret = -EFAULT;
 958
 959        return ret;
 960}
 961
 962static ssize_t ucma_query(struct ucma_file *file,
 963                          const char __user *inbuf,
 964                          int in_len, int out_len)
 965{
 966        struct rdma_ucm_query cmd;
 967        struct ucma_context *ctx;
 968        void __user *response;
 969        int ret;
 970
 971        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 972                return -EFAULT;
 973
 974        response = (void __user *)(unsigned long) cmd.response;
 975        ctx = ucma_get_ctx(file, cmd.id);
 976        if (IS_ERR(ctx))
 977                return PTR_ERR(ctx);
 978
 979        switch (cmd.option) {
 980        case RDMA_USER_CM_QUERY_ADDR:
 981                ret = ucma_query_addr(ctx, response, out_len);
 982                break;
 983        case RDMA_USER_CM_QUERY_PATH:
 984                ret = ucma_query_path(ctx, response, out_len);
 985                break;
 986        case RDMA_USER_CM_QUERY_GID:
 987                ret = ucma_query_gid(ctx, response, out_len);
 988                break;
 989        default:
 990                ret = -ENOSYS;
 991                break;
 992        }
 993
 994        ucma_put_ctx(ctx);
 995        return ret;
 996}
 997
 998static void ucma_copy_conn_param(struct rdma_cm_id *id,
 999                                 struct rdma_conn_param *dst,
1000                                 struct rdma_ucm_conn_param *src)
1001{
1002        dst->private_data = src->private_data;
1003        dst->private_data_len = src->private_data_len;
1004        dst->responder_resources =src->responder_resources;
1005        dst->initiator_depth = src->initiator_depth;
1006        dst->flow_control = src->flow_control;
1007        dst->retry_count = src->retry_count;
1008        dst->rnr_retry_count = src->rnr_retry_count;
1009        dst->srq = src->srq;
1010        dst->qp_num = src->qp_num;
1011        dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1012}
1013
1014static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1015                            int in_len, int out_len)
1016{
1017        struct rdma_ucm_connect cmd;
1018        struct rdma_conn_param conn_param;
1019        struct ucma_context *ctx;
1020        int ret;
1021
1022        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1023                return -EFAULT;
1024
1025        if (!cmd.conn_param.valid)
1026                return -EINVAL;
1027
1028        ctx = ucma_get_ctx(file, cmd.id);
1029        if (IS_ERR(ctx))
1030                return PTR_ERR(ctx);
1031
1032        ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1033        ret = rdma_connect(ctx->cm_id, &conn_param);
1034        ucma_put_ctx(ctx);
1035        return ret;
1036}
1037
1038static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1039                           int in_len, int out_len)
1040{
1041        struct rdma_ucm_listen cmd;
1042        struct ucma_context *ctx;
1043        int ret;
1044
1045        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1046                return -EFAULT;
1047
1048        ctx = ucma_get_ctx(file, cmd.id);
1049        if (IS_ERR(ctx))
1050                return PTR_ERR(ctx);
1051
1052        ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1053                       cmd.backlog : max_backlog;
1054        ret = rdma_listen(ctx->cm_id, ctx->backlog);
1055        ucma_put_ctx(ctx);
1056        return ret;
1057}
1058
1059static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1060                           int in_len, int out_len)
1061{
1062        struct rdma_ucm_accept cmd;
1063        struct rdma_conn_param conn_param;
1064        struct ucma_context *ctx;
1065        int ret;
1066
1067        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1068                return -EFAULT;
1069
1070        ctx = ucma_get_ctx(file, cmd.id);
1071        if (IS_ERR(ctx))
1072                return PTR_ERR(ctx);
1073
1074        if (cmd.conn_param.valid) {
1075                ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1076                mutex_lock(&file->mut);
1077                ret = rdma_accept(ctx->cm_id, &conn_param);
1078                if (!ret)
1079                        ctx->uid = cmd.uid;
1080                mutex_unlock(&file->mut);
1081        } else
1082                ret = rdma_accept(ctx->cm_id, NULL);
1083
1084        ucma_put_ctx(ctx);
1085        return ret;
1086}
1087
1088static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1089                           int in_len, int out_len)
1090{
1091        struct rdma_ucm_reject cmd;
1092        struct ucma_context *ctx;
1093        int ret;
1094
1095        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1096                return -EFAULT;
1097
1098        ctx = ucma_get_ctx(file, cmd.id);
1099        if (IS_ERR(ctx))
1100                return PTR_ERR(ctx);
1101
1102        ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1103        ucma_put_ctx(ctx);
1104        return ret;
1105}
1106
1107static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1108                               int in_len, int out_len)
1109{
1110        struct rdma_ucm_disconnect cmd;
1111        struct ucma_context *ctx;
1112        int ret;
1113
1114        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1115                return -EFAULT;
1116
1117        ctx = ucma_get_ctx(file, cmd.id);
1118        if (IS_ERR(ctx))
1119                return PTR_ERR(ctx);
1120
1121        ret = rdma_disconnect(ctx->cm_id);
1122        ucma_put_ctx(ctx);
1123        return ret;
1124}
1125
1126static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1127                                 const char __user *inbuf,
1128                                 int in_len, int out_len)
1129{
1130        struct rdma_ucm_init_qp_attr cmd;
1131        struct ib_uverbs_qp_attr resp;
1132        struct ucma_context *ctx;
1133        struct ib_qp_attr qp_attr;
1134        int ret;
1135
1136        if (out_len < sizeof(resp))
1137                return -ENOSPC;
1138
1139        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1140                return -EFAULT;
1141
1142        ctx = ucma_get_ctx(file, cmd.id);
1143        if (IS_ERR(ctx))
1144                return PTR_ERR(ctx);
1145
1146        resp.qp_attr_mask = 0;
1147        memset(&qp_attr, 0, sizeof qp_attr);
1148        qp_attr.qp_state = cmd.qp_state;
1149        ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1150        if (ret)
1151                goto out;
1152
1153        ib_copy_qp_attr_to_user(&resp, &qp_attr);
1154        if (copy_to_user((void __user *)(unsigned long)cmd.response,
1155                         &resp, sizeof(resp)))
1156                ret = -EFAULT;
1157
1158out:
1159        ucma_put_ctx(ctx);
1160        return ret;
1161}
1162
1163static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1164                              void *optval, size_t optlen)
1165{
1166        int ret = 0;
1167
1168        switch (optname) {
1169        case RDMA_OPTION_ID_TOS:
1170                if (optlen != sizeof(u8)) {
1171                        ret = -EINVAL;
1172                        break;
1173                }
1174                rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1175                break;
1176        case RDMA_OPTION_ID_REUSEADDR:
1177                if (optlen != sizeof(int)) {
1178                        ret = -EINVAL;
1179                        break;
1180                }
1181                ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1182                break;
1183        case RDMA_OPTION_ID_AFONLY:
1184                if (optlen != sizeof(int)) {
1185                        ret = -EINVAL;
1186                        break;
1187                }
1188                ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1189                break;
1190        default:
1191                ret = -ENOSYS;
1192        }
1193
1194        return ret;
1195}
1196
1197static int ucma_set_ib_path(struct ucma_context *ctx,
1198                            struct ib_path_rec_data *path_data, size_t optlen)
1199{
1200        struct ib_sa_path_rec sa_path;
1201        struct rdma_cm_event event;
1202        int ret;
1203
1204        if (optlen % sizeof(*path_data))
1205                return -EINVAL;
1206
1207        for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1208                if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1209                                         IB_PATH_BIDIRECTIONAL))
1210                        break;
1211        }
1212
1213        if (!optlen)
1214                return -EINVAL;
1215
1216        memset(&sa_path, 0, sizeof(sa_path));
1217
1218        ib_sa_unpack_path(path_data->path_rec, &sa_path);
1219        ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1220        if (ret)
1221                return ret;
1222
1223        memset(&event, 0, sizeof event);
1224        event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1225        return ucma_event_handler(ctx->cm_id, &event);
1226}
1227
1228static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1229                              void *optval, size_t optlen)
1230{
1231        int ret;
1232
1233        switch (optname) {
1234        case RDMA_OPTION_IB_PATH:
1235                ret = ucma_set_ib_path(ctx, optval, optlen);
1236                break;
1237        default:
1238                ret = -ENOSYS;
1239        }
1240
1241        return ret;
1242}
1243
1244static int ucma_set_option_level(struct ucma_context *ctx, int level,
1245                                 int optname, void *optval, size_t optlen)
1246{
1247        int ret;
1248
1249        switch (level) {
1250        case RDMA_OPTION_ID:
1251                ret = ucma_set_option_id(ctx, optname, optval, optlen);
1252                break;
1253        case RDMA_OPTION_IB:
1254                ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1255                break;
1256        default:
1257                ret = -ENOSYS;
1258        }
1259
1260        return ret;
1261}
1262
1263static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1264                               int in_len, int out_len)
1265{
1266        struct rdma_ucm_set_option cmd;
1267        struct ucma_context *ctx;
1268        void *optval;
1269        int ret;
1270
1271        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1272                return -EFAULT;
1273
1274        ctx = ucma_get_ctx(file, cmd.id);
1275        if (IS_ERR(ctx))
1276                return PTR_ERR(ctx);
1277
1278        optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1279                             cmd.optlen);
1280        if (IS_ERR(optval)) {
1281                ret = PTR_ERR(optval);
1282                goto out;
1283        }
1284
1285        ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1286                                    cmd.optlen);
1287        kfree(optval);
1288
1289out:
1290        ucma_put_ctx(ctx);
1291        return ret;
1292}
1293
1294static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1295                           int in_len, int out_len)
1296{
1297        struct rdma_ucm_notify cmd;
1298        struct ucma_context *ctx;
1299        int ret;
1300
1301        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1302                return -EFAULT;
1303
1304        ctx = ucma_get_ctx(file, cmd.id);
1305        if (IS_ERR(ctx))
1306                return PTR_ERR(ctx);
1307
1308        ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1309        ucma_put_ctx(ctx);
1310        return ret;
1311}
1312
1313static ssize_t ucma_process_join(struct ucma_file *file,
1314                                 struct rdma_ucm_join_mcast *cmd,  int out_len)
1315{
1316        struct rdma_ucm_create_id_resp resp;
1317        struct ucma_context *ctx;
1318        struct ucma_multicast *mc;
1319        struct sockaddr *addr;
1320        int ret;
1321        u8 join_state;
1322
1323        if (out_len < sizeof(resp))
1324                return -ENOSPC;
1325
1326        addr = (struct sockaddr *) &cmd->addr;
1327        if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1328                return -EINVAL;
1329
1330        if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1331                join_state = BIT(FULLMEMBER_JOIN);
1332        else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1333                join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1334        else
1335                return -EINVAL;
1336
1337        ctx = ucma_get_ctx(file, cmd->id);
1338        if (IS_ERR(ctx))
1339                return PTR_ERR(ctx);
1340
1341        mutex_lock(&file->mut);
1342        mc = ucma_alloc_multicast(ctx);
1343        if (!mc) {
1344                ret = -ENOMEM;
1345                goto err1;
1346        }
1347        mc->join_state = join_state;
1348        mc->uid = cmd->uid;
1349        memcpy(&mc->addr, addr, cmd->addr_size);
1350        ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1351                                  join_state, mc);
1352        if (ret)
1353                goto err2;
1354
1355        resp.id = mc->id;
1356        if (copy_to_user((void __user *)(unsigned long) cmd->response,
1357                         &resp, sizeof(resp))) {
1358                ret = -EFAULT;
1359                goto err3;
1360        }
1361
1362        mutex_unlock(&file->mut);
1363        ucma_put_ctx(ctx);
1364        return 0;
1365
1366err3:
1367        rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1368        ucma_cleanup_mc_events(mc);
1369err2:
1370        mutex_lock(&mut);
1371        idr_remove(&multicast_idr, mc->id);
1372        mutex_unlock(&mut);
1373        list_del(&mc->list);
1374        kfree(mc);
1375err1:
1376        mutex_unlock(&file->mut);
1377        ucma_put_ctx(ctx);
1378        return ret;
1379}
1380
1381static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1382                                      const char __user *inbuf,
1383                                      int in_len, int out_len)
1384{
1385        struct rdma_ucm_join_ip_mcast cmd;
1386        struct rdma_ucm_join_mcast join_cmd;
1387
1388        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1389                return -EFAULT;
1390
1391        join_cmd.response = cmd.response;
1392        join_cmd.uid = cmd.uid;
1393        join_cmd.id = cmd.id;
1394        join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1395        join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1396        memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1397
1398        return ucma_process_join(file, &join_cmd, out_len);
1399}
1400
1401static ssize_t ucma_join_multicast(struct ucma_file *file,
1402                                   const char __user *inbuf,
1403                                   int in_len, int out_len)
1404{
1405        struct rdma_ucm_join_mcast cmd;
1406
1407        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1408                return -EFAULT;
1409
1410        return ucma_process_join(file, &cmd, out_len);
1411}
1412
1413static ssize_t ucma_leave_multicast(struct ucma_file *file,
1414                                    const char __user *inbuf,
1415                                    int in_len, int out_len)
1416{
1417        struct rdma_ucm_destroy_id cmd;
1418        struct rdma_ucm_destroy_id_resp resp;
1419        struct ucma_multicast *mc;
1420        int ret = 0;
1421
1422        if (out_len < sizeof(resp))
1423                return -ENOSPC;
1424
1425        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1426                return -EFAULT;
1427
1428        mutex_lock(&mut);
1429        mc = idr_find(&multicast_idr, cmd.id);
1430        if (!mc)
1431                mc = ERR_PTR(-ENOENT);
1432        else if (mc->ctx->file != file)
1433                mc = ERR_PTR(-EINVAL);
1434        else if (!atomic_inc_not_zero(&mc->ctx->ref))
1435                mc = ERR_PTR(-ENXIO);
1436        else
1437                idr_remove(&multicast_idr, mc->id);
1438        mutex_unlock(&mut);
1439
1440        if (IS_ERR(mc)) {
1441                ret = PTR_ERR(mc);
1442                goto out;
1443        }
1444
1445        rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1446        mutex_lock(&mc->ctx->file->mut);
1447        ucma_cleanup_mc_events(mc);
1448        list_del(&mc->list);
1449        mutex_unlock(&mc->ctx->file->mut);
1450
1451        ucma_put_ctx(mc->ctx);
1452        resp.events_reported = mc->events_reported;
1453        kfree(mc);
1454
1455        if (copy_to_user((void __user *)(unsigned long)cmd.response,
1456                         &resp, sizeof(resp)))
1457                ret = -EFAULT;
1458out:
1459        return ret;
1460}
1461
1462static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1463{
1464        /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1465        if (file1 < file2) {
1466                mutex_lock(&file1->mut);
1467                mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1468        } else {
1469                mutex_lock(&file2->mut);
1470                mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1471        }
1472}
1473
1474static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1475{
1476        if (file1 < file2) {
1477                mutex_unlock(&file2->mut);
1478                mutex_unlock(&file1->mut);
1479        } else {
1480                mutex_unlock(&file1->mut);
1481                mutex_unlock(&file2->mut);
1482        }
1483}
1484
1485static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1486{
1487        struct ucma_event *uevent, *tmp;
1488
1489        list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1490                if (uevent->ctx == ctx)
1491                        list_move_tail(&uevent->list, &file->event_list);
1492}
1493
1494static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1495                               const char __user *inbuf,
1496                               int in_len, int out_len)
1497{
1498        struct rdma_ucm_migrate_id cmd;
1499        struct rdma_ucm_migrate_resp resp;
1500        struct ucma_context *ctx;
1501        struct fd f;
1502        struct ucma_file *cur_file;
1503        int ret = 0;
1504
1505        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1506                return -EFAULT;
1507
1508        /* Get current fd to protect against it being closed */
1509        f = fdget(cmd.fd);
1510        if (!f.file)
1511                return -ENOENT;
1512
1513        /* Validate current fd and prevent destruction of id. */
1514        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1515        if (IS_ERR(ctx)) {
1516                ret = PTR_ERR(ctx);
1517                goto file_put;
1518        }
1519
1520        cur_file = ctx->file;
1521        if (cur_file == new_file) {
1522                resp.events_reported = ctx->events_reported;
1523                goto response;
1524        }
1525
1526        /*
1527         * Migrate events between fd's, maintaining order, and avoiding new
1528         * events being added before existing events.
1529         */
1530        ucma_lock_files(cur_file, new_file);
1531        mutex_lock(&mut);
1532
1533        list_move_tail(&ctx->list, &new_file->ctx_list);
1534        ucma_move_events(ctx, new_file);
1535        ctx->file = new_file;
1536        resp.events_reported = ctx->events_reported;
1537
1538        mutex_unlock(&mut);
1539        ucma_unlock_files(cur_file, new_file);
1540
1541response:
1542        if (copy_to_user((void __user *)(unsigned long)cmd.response,
1543                         &resp, sizeof(resp)))
1544                ret = -EFAULT;
1545
1546        ucma_put_ctx(ctx);
1547file_put:
1548        fdput(f);
1549        return ret;
1550}
1551
1552static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1553                                   const char __user *inbuf,
1554                                   int in_len, int out_len) = {
1555        [RDMA_USER_CM_CMD_CREATE_ID]     = ucma_create_id,
1556        [RDMA_USER_CM_CMD_DESTROY_ID]    = ucma_destroy_id,
1557        [RDMA_USER_CM_CMD_BIND_IP]       = ucma_bind_ip,
1558        [RDMA_USER_CM_CMD_RESOLVE_IP]    = ucma_resolve_ip,
1559        [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1560        [RDMA_USER_CM_CMD_QUERY_ROUTE]   = ucma_query_route,
1561        [RDMA_USER_CM_CMD_CONNECT]       = ucma_connect,
1562        [RDMA_USER_CM_CMD_LISTEN]        = ucma_listen,
1563        [RDMA_USER_CM_CMD_ACCEPT]        = ucma_accept,
1564        [RDMA_USER_CM_CMD_REJECT]        = ucma_reject,
1565        [RDMA_USER_CM_CMD_DISCONNECT]    = ucma_disconnect,
1566        [RDMA_USER_CM_CMD_INIT_QP_ATTR]  = ucma_init_qp_attr,
1567        [RDMA_USER_CM_CMD_GET_EVENT]     = ucma_get_event,
1568        [RDMA_USER_CM_CMD_GET_OPTION]    = NULL,
1569        [RDMA_USER_CM_CMD_SET_OPTION]    = ucma_set_option,
1570        [RDMA_USER_CM_CMD_NOTIFY]        = ucma_notify,
1571        [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1572        [RDMA_USER_CM_CMD_LEAVE_MCAST]   = ucma_leave_multicast,
1573        [RDMA_USER_CM_CMD_MIGRATE_ID]    = ucma_migrate_id,
1574        [RDMA_USER_CM_CMD_QUERY]         = ucma_query,
1575        [RDMA_USER_CM_CMD_BIND]          = ucma_bind,
1576        [RDMA_USER_CM_CMD_RESOLVE_ADDR]  = ucma_resolve_addr,
1577        [RDMA_USER_CM_CMD_JOIN_MCAST]    = ucma_join_multicast
1578};
1579
1580static ssize_t ucma_write(struct file *filp, const char __user *buf,
1581                          size_t len, loff_t *pos)
1582{
1583        struct ucma_file *file = filp->private_data;
1584        struct rdma_ucm_cmd_hdr hdr;
1585        ssize_t ret;
1586
1587        if (!ib_safe_file_access(filp)) {
1588                pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1589                            task_tgid_vnr(current), current->comm);
1590                return -EACCES;
1591        }
1592
1593        if (len < sizeof(hdr))
1594                return -EINVAL;
1595
1596        if (copy_from_user(&hdr, buf, sizeof(hdr)))
1597                return -EFAULT;
1598
1599        if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1600                return -EINVAL;
1601
1602        if (hdr.in + sizeof(hdr) > len)
1603                return -EINVAL;
1604
1605        if (!ucma_cmd_table[hdr.cmd])
1606                return -ENOSYS;
1607
1608        ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1609        if (!ret)
1610                ret = len;
1611
1612        return ret;
1613}
1614
1615static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1616{
1617        struct ucma_file *file = filp->private_data;
1618        unsigned int mask = 0;
1619
1620        poll_wait(filp, &file->poll_wait, wait);
1621
1622        if (!list_empty(&file->event_list))
1623                mask = POLLIN | POLLRDNORM;
1624
1625        return mask;
1626}
1627
1628/*
1629 * ucma_open() does not need the BKL:
1630 *
1631 *  - no global state is referred to;
1632 *  - there is no ioctl method to race against;
1633 *  - no further module initialization is required for open to work
1634 *    after the device is registered.
1635 */
1636static int ucma_open(struct inode *inode, struct file *filp)
1637{
1638        struct ucma_file *file;
1639
1640        file = kmalloc(sizeof *file, GFP_KERNEL);
1641        if (!file)
1642                return -ENOMEM;
1643
1644        file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1645                                                 WQ_MEM_RECLAIM);
1646        if (!file->close_wq) {
1647                kfree(file);
1648                return -ENOMEM;
1649        }
1650
1651        INIT_LIST_HEAD(&file->event_list);
1652        INIT_LIST_HEAD(&file->ctx_list);
1653        init_waitqueue_head(&file->poll_wait);
1654        mutex_init(&file->mut);
1655
1656        filp->private_data = file;
1657        file->filp = filp;
1658
1659        return nonseekable_open(inode, filp);
1660}
1661
1662static int ucma_close(struct inode *inode, struct file *filp)
1663{
1664        struct ucma_file *file = filp->private_data;
1665        struct ucma_context *ctx, *tmp;
1666
1667        mutex_lock(&file->mut);
1668        list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1669                ctx->destroying = 1;
1670                mutex_unlock(&file->mut);
1671
1672                mutex_lock(&mut);
1673                idr_remove(&ctx_idr, ctx->id);
1674                mutex_unlock(&mut);
1675
1676                flush_workqueue(file->close_wq);
1677                /* At that step once ctx was marked as destroying and workqueue
1678                 * was flushed we are safe from any inflights handlers that
1679                 * might put other closing task.
1680                 */
1681                mutex_lock(&mut);
1682                if (!ctx->closing) {
1683                        mutex_unlock(&mut);
1684                        /* rdma_destroy_id ensures that no event handlers are
1685                         * inflight for that id before releasing it.
1686                         */
1687                        rdma_destroy_id(ctx->cm_id);
1688                } else {
1689                        mutex_unlock(&mut);
1690                }
1691
1692                ucma_free_ctx(ctx);
1693                mutex_lock(&file->mut);
1694        }
1695        mutex_unlock(&file->mut);
1696        destroy_workqueue(file->close_wq);
1697        kfree(file);
1698        return 0;
1699}
1700
1701static const struct file_operations ucma_fops = {
1702        .owner   = THIS_MODULE,
1703        .open    = ucma_open,
1704        .release = ucma_close,
1705        .write   = ucma_write,
1706        .poll    = ucma_poll,
1707        .llseek  = no_llseek,
1708};
1709
1710static struct miscdevice ucma_misc = {
1711        .minor          = MISC_DYNAMIC_MINOR,
1712        .name           = "rdma_cm",
1713        .nodename       = "infiniband/rdma_cm",
1714        .mode           = 0666,
1715        .fops           = &ucma_fops,
1716};
1717
1718static ssize_t show_abi_version(struct device *dev,
1719                                struct device_attribute *attr,
1720                                char *buf)
1721{
1722        return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1723}
1724static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1725
1726static int __init ucma_init(void)
1727{
1728        int ret;
1729
1730        ret = misc_register(&ucma_misc);
1731        if (ret)
1732                return ret;
1733
1734        ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1735        if (ret) {
1736                pr_err("rdma_ucm: couldn't create abi_version attr\n");
1737                goto err1;
1738        }
1739
1740        ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1741        if (!ucma_ctl_table_hdr) {
1742                pr_err("rdma_ucm: couldn't register sysctl paths\n");
1743                ret = -ENOMEM;
1744                goto err2;
1745        }
1746        return 0;
1747err2:
1748        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1749err1:
1750        misc_deregister(&ucma_misc);
1751        return ret;
1752}
1753
1754static void __exit ucma_cleanup(void)
1755{
1756        unregister_net_sysctl_table(ucma_ctl_table_hdr);
1757        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1758        misc_deregister(&ucma_misc);
1759        idr_destroy(&ctx_idr);
1760        idr_destroy(&multicast_idr);
1761}
1762
1763module_init(ucma_init);
1764module_exit(ucma_cleanup);
1765