linux/drivers/infiniband/core/uverbs_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
   5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
   6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
   7 *
   8 * This software is available to you under a choice of one of two
   9 * licenses.  You may choose to be licensed under the terms of the GNU
  10 * General Public License (GPL) Version 2, available from the file
  11 * COPYING in the main directory of this source tree, or the
  12 * OpenIB.org BSD license below:
  13 *
  14 *     Redistribution and use in source and binary forms, with or
  15 *     without modification, are permitted provided that the following
  16 *     conditions are met:
  17 *
  18 *      - Redistributions of source code must retain the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer.
  21 *
  22 *      - Redistributions in binary form must reproduce the above
  23 *        copyright notice, this list of conditions and the following
  24 *        disclaimer in the documentation and/or other materials
  25 *        provided with the distribution.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34 * SOFTWARE.
  35 */
  36
  37#include <linux/module.h>
  38#include <linux/init.h>
  39#include <linux/device.h>
  40#include <linux/err.h>
  41#include <linux/fs.h>
  42#include <linux/poll.h>
  43#include <linux/sched.h>
  44#include <linux/file.h>
  45#include <linux/cdev.h>
  46#include <linux/anon_inodes.h>
  47#include <linux/slab.h>
  48
  49#include <asm/uaccess.h>
  50
  51#include "uverbs.h"
  52
  53MODULE_AUTHOR("Roland Dreier");
  54MODULE_DESCRIPTION("InfiniBand userspace verbs access");
  55MODULE_LICENSE("Dual BSD/GPL");
  56
  57enum {
  58        IB_UVERBS_MAJOR       = 231,
  59        IB_UVERBS_BASE_MINOR  = 192,
  60        IB_UVERBS_MAX_DEVICES = 32
  61};
  62
  63#define IB_UVERBS_BASE_DEV      MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
  64
  65static struct class *uverbs_class;
  66
  67DEFINE_SPINLOCK(ib_uverbs_idr_lock);
  68DEFINE_IDR(ib_uverbs_pd_idr);
  69DEFINE_IDR(ib_uverbs_mr_idr);
  70DEFINE_IDR(ib_uverbs_mw_idr);
  71DEFINE_IDR(ib_uverbs_ah_idr);
  72DEFINE_IDR(ib_uverbs_cq_idr);
  73DEFINE_IDR(ib_uverbs_qp_idr);
  74DEFINE_IDR(ib_uverbs_srq_idr);
  75DEFINE_IDR(ib_uverbs_xrcd_idr);
  76
  77static DEFINE_SPINLOCK(map_lock);
  78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
  79
  80static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
  81                                     const char __user *buf, int in_len,
  82                                     int out_len) = {
  83        [IB_USER_VERBS_CMD_GET_CONTEXT]         = ib_uverbs_get_context,
  84        [IB_USER_VERBS_CMD_QUERY_DEVICE]        = ib_uverbs_query_device,
  85        [IB_USER_VERBS_CMD_QUERY_PORT]          = ib_uverbs_query_port,
  86        [IB_USER_VERBS_CMD_ALLOC_PD]            = ib_uverbs_alloc_pd,
  87        [IB_USER_VERBS_CMD_DEALLOC_PD]          = ib_uverbs_dealloc_pd,
  88        [IB_USER_VERBS_CMD_REG_MR]              = ib_uverbs_reg_mr,
  89        [IB_USER_VERBS_CMD_DEREG_MR]            = ib_uverbs_dereg_mr,
  90        [IB_USER_VERBS_CMD_ALLOC_MW]            = ib_uverbs_alloc_mw,
  91        [IB_USER_VERBS_CMD_DEALLOC_MW]          = ib_uverbs_dealloc_mw,
  92        [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
  93        [IB_USER_VERBS_CMD_CREATE_CQ]           = ib_uverbs_create_cq,
  94        [IB_USER_VERBS_CMD_RESIZE_CQ]           = ib_uverbs_resize_cq,
  95        [IB_USER_VERBS_CMD_POLL_CQ]             = ib_uverbs_poll_cq,
  96        [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ]       = ib_uverbs_req_notify_cq,
  97        [IB_USER_VERBS_CMD_DESTROY_CQ]          = ib_uverbs_destroy_cq,
  98        [IB_USER_VERBS_CMD_CREATE_QP]           = ib_uverbs_create_qp,
  99        [IB_USER_VERBS_CMD_QUERY_QP]            = ib_uverbs_query_qp,
 100        [IB_USER_VERBS_CMD_MODIFY_QP]           = ib_uverbs_modify_qp,
 101        [IB_USER_VERBS_CMD_DESTROY_QP]          = ib_uverbs_destroy_qp,
 102        [IB_USER_VERBS_CMD_POST_SEND]           = ib_uverbs_post_send,
 103        [IB_USER_VERBS_CMD_POST_RECV]           = ib_uverbs_post_recv,
 104        [IB_USER_VERBS_CMD_POST_SRQ_RECV]       = ib_uverbs_post_srq_recv,
 105        [IB_USER_VERBS_CMD_CREATE_AH]           = ib_uverbs_create_ah,
 106        [IB_USER_VERBS_CMD_DESTROY_AH]          = ib_uverbs_destroy_ah,
 107        [IB_USER_VERBS_CMD_ATTACH_MCAST]        = ib_uverbs_attach_mcast,
 108        [IB_USER_VERBS_CMD_DETACH_MCAST]        = ib_uverbs_detach_mcast,
 109        [IB_USER_VERBS_CMD_CREATE_SRQ]          = ib_uverbs_create_srq,
 110        [IB_USER_VERBS_CMD_MODIFY_SRQ]          = ib_uverbs_modify_srq,
 111        [IB_USER_VERBS_CMD_QUERY_SRQ]           = ib_uverbs_query_srq,
 112        [IB_USER_VERBS_CMD_DESTROY_SRQ]         = ib_uverbs_destroy_srq,
 113        [IB_USER_VERBS_CMD_OPEN_XRCD]           = ib_uverbs_open_xrcd,
 114        [IB_USER_VERBS_CMD_CLOSE_XRCD]          = ib_uverbs_close_xrcd,
 115        [IB_USER_VERBS_CMD_CREATE_XSRQ]         = ib_uverbs_create_xsrq,
 116        [IB_USER_VERBS_CMD_OPEN_QP]             = ib_uverbs_open_qp
 117};
 118
 119static void ib_uverbs_add_one(struct ib_device *device);
 120static void ib_uverbs_remove_one(struct ib_device *device);
 121
 122static void ib_uverbs_release_dev(struct kref *ref)
 123{
 124        struct ib_uverbs_device *dev =
 125                container_of(ref, struct ib_uverbs_device, ref);
 126
 127        complete(&dev->comp);
 128}
 129
 130static void ib_uverbs_release_event_file(struct kref *ref)
 131{
 132        struct ib_uverbs_event_file *file =
 133                container_of(ref, struct ib_uverbs_event_file, ref);
 134
 135        kfree(file);
 136}
 137
 138void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
 139                          struct ib_uverbs_event_file *ev_file,
 140                          struct ib_ucq_object *uobj)
 141{
 142        struct ib_uverbs_event *evt, *tmp;
 143
 144        if (ev_file) {
 145                spin_lock_irq(&ev_file->lock);
 146                list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
 147                        list_del(&evt->list);
 148                        kfree(evt);
 149                }
 150                spin_unlock_irq(&ev_file->lock);
 151
 152                kref_put(&ev_file->ref, ib_uverbs_release_event_file);
 153        }
 154
 155        spin_lock_irq(&file->async_file->lock);
 156        list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
 157                list_del(&evt->list);
 158                kfree(evt);
 159        }
 160        spin_unlock_irq(&file->async_file->lock);
 161}
 162
 163void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
 164                              struct ib_uevent_object *uobj)
 165{
 166        struct ib_uverbs_event *evt, *tmp;
 167
 168        spin_lock_irq(&file->async_file->lock);
 169        list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
 170                list_del(&evt->list);
 171                kfree(evt);
 172        }
 173        spin_unlock_irq(&file->async_file->lock);
 174}
 175
 176static void ib_uverbs_detach_umcast(struct ib_qp *qp,
 177                                    struct ib_uqp_object *uobj)
 178{
 179        struct ib_uverbs_mcast_entry *mcast, *tmp;
 180
 181        list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
 182                ib_detach_mcast(qp, &mcast->gid, mcast->lid);
 183                list_del(&mcast->list);
 184                kfree(mcast);
 185        }
 186}
 187
 188static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
 189                                      struct ib_ucontext *context)
 190{
 191        struct ib_uobject *uobj, *tmp;
 192
 193        if (!context)
 194                return 0;
 195
 196        context->closing = 1;
 197
 198        list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
 199                struct ib_ah *ah = uobj->object;
 200
 201                idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
 202                ib_destroy_ah(ah);
 203                kfree(uobj);
 204        }
 205
 206        /* Remove MWs before QPs, in order to support type 2A MWs. */
 207        list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
 208                struct ib_mw *mw = uobj->object;
 209
 210                idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
 211                ib_dealloc_mw(mw);
 212                kfree(uobj);
 213        }
 214
 215        list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
 216                struct ib_qp *qp = uobj->object;
 217                struct ib_uqp_object *uqp =
 218                        container_of(uobj, struct ib_uqp_object, uevent.uobject);
 219
 220                idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
 221                if (qp != qp->real_qp) {
 222                        ib_close_qp(qp);
 223                } else {
 224                        ib_uverbs_detach_umcast(qp, uqp);
 225                        ib_destroy_qp(qp);
 226                }
 227                ib_uverbs_release_uevent(file, &uqp->uevent);
 228                kfree(uqp);
 229        }
 230
 231        list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
 232                struct ib_cq *cq = uobj->object;
 233                struct ib_uverbs_event_file *ev_file = cq->cq_context;
 234                struct ib_ucq_object *ucq =
 235                        container_of(uobj, struct ib_ucq_object, uobject);
 236
 237                idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
 238                ib_destroy_cq(cq);
 239                ib_uverbs_release_ucq(file, ev_file, ucq);
 240                kfree(ucq);
 241        }
 242
 243        list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
 244                struct ib_srq *srq = uobj->object;
 245                struct ib_uevent_object *uevent =
 246                        container_of(uobj, struct ib_uevent_object, uobject);
 247
 248                idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
 249                ib_destroy_srq(srq);
 250                ib_uverbs_release_uevent(file, uevent);
 251                kfree(uevent);
 252        }
 253
 254        list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
 255                struct ib_mr *mr = uobj->object;
 256
 257                idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
 258                ib_dereg_mr(mr);
 259                kfree(uobj);
 260        }
 261
 262        mutex_lock(&file->device->xrcd_tree_mutex);
 263        list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
 264                struct ib_xrcd *xrcd = uobj->object;
 265                struct ib_uxrcd_object *uxrcd =
 266                        container_of(uobj, struct ib_uxrcd_object, uobject);
 267
 268                idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
 269                ib_uverbs_dealloc_xrcd(file->device, xrcd);
 270                kfree(uxrcd);
 271        }
 272        mutex_unlock(&file->device->xrcd_tree_mutex);
 273
 274        list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
 275                struct ib_pd *pd = uobj->object;
 276
 277                idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 278                ib_dealloc_pd(pd);
 279                kfree(uobj);
 280        }
 281
 282        return context->device->dealloc_ucontext(context);
 283}
 284
 285static void ib_uverbs_release_file(struct kref *ref)
 286{
 287        struct ib_uverbs_file *file =
 288                container_of(ref, struct ib_uverbs_file, ref);
 289
 290        module_put(file->device->ib_dev->owner);
 291        kref_put(&file->device->ref, ib_uverbs_release_dev);
 292
 293        kfree(file);
 294}
 295
 296static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
 297                                    size_t count, loff_t *pos)
 298{
 299        struct ib_uverbs_event_file *file = filp->private_data;
 300        struct ib_uverbs_event *event;
 301        int eventsz;
 302        int ret = 0;
 303
 304        spin_lock_irq(&file->lock);
 305
 306        while (list_empty(&file->event_list)) {
 307                spin_unlock_irq(&file->lock);
 308
 309                if (filp->f_flags & O_NONBLOCK)
 310                        return -EAGAIN;
 311
 312                if (wait_event_interruptible(file->poll_wait,
 313                                             !list_empty(&file->event_list)))
 314                        return -ERESTARTSYS;
 315
 316                spin_lock_irq(&file->lock);
 317        }
 318
 319        event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
 320
 321        if (file->is_async)
 322                eventsz = sizeof (struct ib_uverbs_async_event_desc);
 323        else
 324                eventsz = sizeof (struct ib_uverbs_comp_event_desc);
 325
 326        if (eventsz > count) {
 327                ret   = -EINVAL;
 328                event = NULL;
 329        } else {
 330                list_del(file->event_list.next);
 331                if (event->counter) {
 332                        ++(*event->counter);
 333                        list_del(&event->obj_list);
 334                }
 335        }
 336
 337        spin_unlock_irq(&file->lock);
 338
 339        if (event) {
 340                if (copy_to_user(buf, event, eventsz))
 341                        ret = -EFAULT;
 342                else
 343                        ret = eventsz;
 344        }
 345
 346        kfree(event);
 347
 348        return ret;
 349}
 350
 351static unsigned int ib_uverbs_event_poll(struct file *filp,
 352                                         struct poll_table_struct *wait)
 353{
 354        unsigned int pollflags = 0;
 355        struct ib_uverbs_event_file *file = filp->private_data;
 356
 357        poll_wait(filp, &file->poll_wait, wait);
 358
 359        spin_lock_irq(&file->lock);
 360        if (!list_empty(&file->event_list))
 361                pollflags = POLLIN | POLLRDNORM;
 362        spin_unlock_irq(&file->lock);
 363
 364        return pollflags;
 365}
 366
 367static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
 368{
 369        struct ib_uverbs_event_file *file = filp->private_data;
 370
 371        return fasync_helper(fd, filp, on, &file->async_queue);
 372}
 373
 374static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
 375{
 376        struct ib_uverbs_event_file *file = filp->private_data;
 377        struct ib_uverbs_event *entry, *tmp;
 378
 379        spin_lock_irq(&file->lock);
 380        file->is_closed = 1;
 381        list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
 382                if (entry->counter)
 383                        list_del(&entry->obj_list);
 384                kfree(entry);
 385        }
 386        spin_unlock_irq(&file->lock);
 387
 388        if (file->is_async) {
 389                ib_unregister_event_handler(&file->uverbs_file->event_handler);
 390                kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
 391        }
 392        kref_put(&file->ref, ib_uverbs_release_event_file);
 393
 394        return 0;
 395}
 396
 397static const struct file_operations uverbs_event_fops = {
 398        .owner   = THIS_MODULE,
 399        .read    = ib_uverbs_event_read,
 400        .poll    = ib_uverbs_event_poll,
 401        .release = ib_uverbs_event_close,
 402        .fasync  = ib_uverbs_event_fasync,
 403        .llseek  = no_llseek,
 404};
 405
 406void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
 407{
 408        struct ib_uverbs_event_file    *file = cq_context;
 409        struct ib_ucq_object           *uobj;
 410        struct ib_uverbs_event         *entry;
 411        unsigned long                   flags;
 412
 413        if (!file)
 414                return;
 415
 416        spin_lock_irqsave(&file->lock, flags);
 417        if (file->is_closed) {
 418                spin_unlock_irqrestore(&file->lock, flags);
 419                return;
 420        }
 421
 422        entry = kmalloc(sizeof *entry, GFP_ATOMIC);
 423        if (!entry) {
 424                spin_unlock_irqrestore(&file->lock, flags);
 425                return;
 426        }
 427
 428        uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
 429
 430        entry->desc.comp.cq_handle = cq->uobject->user_handle;
 431        entry->counter             = &uobj->comp_events_reported;
 432
 433        list_add_tail(&entry->list, &file->event_list);
 434        list_add_tail(&entry->obj_list, &uobj->comp_list);
 435        spin_unlock_irqrestore(&file->lock, flags);
 436
 437        wake_up_interruptible(&file->poll_wait);
 438        kill_fasync(&file->async_queue, SIGIO, POLL_IN);
 439}
 440
 441static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
 442                                    __u64 element, __u64 event,
 443                                    struct list_head *obj_list,
 444                                    u32 *counter)
 445{
 446        struct ib_uverbs_event *entry;
 447        unsigned long flags;
 448
 449        spin_lock_irqsave(&file->async_file->lock, flags);
 450        if (file->async_file->is_closed) {
 451                spin_unlock_irqrestore(&file->async_file->lock, flags);
 452                return;
 453        }
 454
 455        entry = kmalloc(sizeof *entry, GFP_ATOMIC);
 456        if (!entry) {
 457                spin_unlock_irqrestore(&file->async_file->lock, flags);
 458                return;
 459        }
 460
 461        entry->desc.async.element    = element;
 462        entry->desc.async.event_type = event;
 463        entry->counter               = counter;
 464
 465        list_add_tail(&entry->list, &file->async_file->event_list);
 466        if (obj_list)
 467                list_add_tail(&entry->obj_list, obj_list);
 468        spin_unlock_irqrestore(&file->async_file->lock, flags);
 469
 470        wake_up_interruptible(&file->async_file->poll_wait);
 471        kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
 472}
 473
 474void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
 475{
 476        struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
 477                                                  struct ib_ucq_object, uobject);
 478
 479        ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
 480                                event->event, &uobj->async_list,
 481                                &uobj->async_events_reported);
 482}
 483
 484void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
 485{
 486        struct ib_uevent_object *uobj;
 487
 488        uobj = container_of(event->element.qp->uobject,
 489                            struct ib_uevent_object, uobject);
 490
 491        ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
 492                                event->event, &uobj->event_list,
 493                                &uobj->events_reported);
 494}
 495
 496void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
 497{
 498        struct ib_uevent_object *uobj;
 499
 500        uobj = container_of(event->element.srq->uobject,
 501                            struct ib_uevent_object, uobject);
 502
 503        ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
 504                                event->event, &uobj->event_list,
 505                                &uobj->events_reported);
 506}
 507
 508void ib_uverbs_event_handler(struct ib_event_handler *handler,
 509                             struct ib_event *event)
 510{
 511        struct ib_uverbs_file *file =
 512                container_of(handler, struct ib_uverbs_file, event_handler);
 513
 514        ib_uverbs_async_handler(file, event->element.port_num, event->event,
 515                                NULL, NULL);
 516}
 517
 518struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
 519                                        int is_async)
 520{
 521        struct ib_uverbs_event_file *ev_file;
 522        struct file *filp;
 523
 524        ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
 525        if (!ev_file)
 526                return ERR_PTR(-ENOMEM);
 527
 528        kref_init(&ev_file->ref);
 529        spin_lock_init(&ev_file->lock);
 530        INIT_LIST_HEAD(&ev_file->event_list);
 531        init_waitqueue_head(&ev_file->poll_wait);
 532        ev_file->uverbs_file = uverbs_file;
 533        ev_file->async_queue = NULL;
 534        ev_file->is_async    = is_async;
 535        ev_file->is_closed   = 0;
 536
 537        filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
 538                                  ev_file, O_RDONLY);
 539        if (IS_ERR(filp))
 540                kfree(ev_file);
 541
 542        return filp;
 543}
 544
 545/*
 546 * Look up a completion event file by FD.  If lookup is successful,
 547 * takes a ref to the event file struct that it returns; if
 548 * unsuccessful, returns NULL.
 549 */
 550struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
 551{
 552        struct ib_uverbs_event_file *ev_file = NULL;
 553        struct fd f = fdget(fd);
 554
 555        if (!f.file)
 556                return NULL;
 557
 558        if (f.file->f_op != &uverbs_event_fops)
 559                goto out;
 560
 561        ev_file = f.file->private_data;
 562        if (ev_file->is_async) {
 563                ev_file = NULL;
 564                goto out;
 565        }
 566
 567        kref_get(&ev_file->ref);
 568
 569out:
 570        fdput(f);
 571        return ev_file;
 572}
 573
 574static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
 575                             size_t count, loff_t *pos)
 576{
 577        struct ib_uverbs_file *file = filp->private_data;
 578        struct ib_uverbs_cmd_hdr hdr;
 579
 580        if (count < sizeof hdr)
 581                return -EINVAL;
 582
 583        if (copy_from_user(&hdr, buf, sizeof hdr))
 584                return -EFAULT;
 585
 586        if (hdr.in_words * 4 != count)
 587                return -EINVAL;
 588
 589        if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
 590            !uverbs_cmd_table[hdr.command])
 591                return -EINVAL;
 592
 593        if (!file->ucontext &&
 594            hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
 595                return -EINVAL;
 596
 597        if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
 598                return -ENOSYS;
 599
 600        return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
 601                                             hdr.in_words * 4, hdr.out_words * 4);
 602}
 603
 604static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
 605{
 606        struct ib_uverbs_file *file = filp->private_data;
 607
 608        if (!file->ucontext)
 609                return -ENODEV;
 610        else
 611                return file->device->ib_dev->mmap(file->ucontext, vma);
 612}
 613
 614/*
 615 * ib_uverbs_open() does not need the BKL:
 616 *
 617 *  - the ib_uverbs_device structures are properly reference counted and
 618 *    everything else is purely local to the file being created, so
 619 *    races against other open calls are not a problem;
 620 *  - there is no ioctl method to race against;
 621 *  - the open method will either immediately run -ENXIO, or all
 622 *    required initialization will be done.
 623 */
 624static int ib_uverbs_open(struct inode *inode, struct file *filp)
 625{
 626        struct ib_uverbs_device *dev;
 627        struct ib_uverbs_file *file;
 628        int ret;
 629
 630        dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
 631        if (dev)
 632                kref_get(&dev->ref);
 633        else
 634                return -ENXIO;
 635
 636        if (!try_module_get(dev->ib_dev->owner)) {
 637                ret = -ENODEV;
 638                goto err;
 639        }
 640
 641        file = kmalloc(sizeof *file, GFP_KERNEL);
 642        if (!file) {
 643                ret = -ENOMEM;
 644                goto err_module;
 645        }
 646
 647        file->device     = dev;
 648        file->ucontext   = NULL;
 649        file->async_file = NULL;
 650        kref_init(&file->ref);
 651        mutex_init(&file->mutex);
 652
 653        filp->private_data = file;
 654
 655        return nonseekable_open(inode, filp);
 656
 657err_module:
 658        module_put(dev->ib_dev->owner);
 659
 660err:
 661        kref_put(&dev->ref, ib_uverbs_release_dev);
 662        return ret;
 663}
 664
 665static int ib_uverbs_close(struct inode *inode, struct file *filp)
 666{
 667        struct ib_uverbs_file *file = filp->private_data;
 668
 669        ib_uverbs_cleanup_ucontext(file, file->ucontext);
 670
 671        if (file->async_file)
 672                kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
 673
 674        kref_put(&file->ref, ib_uverbs_release_file);
 675
 676        return 0;
 677}
 678
 679static const struct file_operations uverbs_fops = {
 680        .owner   = THIS_MODULE,
 681        .write   = ib_uverbs_write,
 682        .open    = ib_uverbs_open,
 683        .release = ib_uverbs_close,
 684        .llseek  = no_llseek,
 685};
 686
 687static const struct file_operations uverbs_mmap_fops = {
 688        .owner   = THIS_MODULE,
 689        .write   = ib_uverbs_write,
 690        .mmap    = ib_uverbs_mmap,
 691        .open    = ib_uverbs_open,
 692        .release = ib_uverbs_close,
 693        .llseek  = no_llseek,
 694};
 695
 696static struct ib_client uverbs_client = {
 697        .name   = "uverbs",
 698        .add    = ib_uverbs_add_one,
 699        .remove = ib_uverbs_remove_one
 700};
 701
 702static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
 703                          char *buf)
 704{
 705        struct ib_uverbs_device *dev = dev_get_drvdata(device);
 706
 707        if (!dev)
 708                return -ENODEV;
 709
 710        return sprintf(buf, "%s\n", dev->ib_dev->name);
 711}
 712static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
 713
 714static ssize_t show_dev_abi_version(struct device *device,
 715                                    struct device_attribute *attr, char *buf)
 716{
 717        struct ib_uverbs_device *dev = dev_get_drvdata(device);
 718
 719        if (!dev)
 720                return -ENODEV;
 721
 722        return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver);
 723}
 724static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
 725
 726static CLASS_ATTR_STRING(abi_version, S_IRUGO,
 727                         __stringify(IB_USER_VERBS_ABI_VERSION));
 728
 729static dev_t overflow_maj;
 730static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
 731
 732/*
 733 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
 734 * requesting a new major number and doubling the number of max devices we
 735 * support. It's stupid, but simple.
 736 */
 737static int find_overflow_devnum(void)
 738{
 739        int ret;
 740
 741        if (!overflow_maj) {
 742                ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
 743                                          "infiniband_verbs");
 744                if (ret) {
 745                        printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
 746                        return ret;
 747                }
 748        }
 749
 750        ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
 751        if (ret >= IB_UVERBS_MAX_DEVICES)
 752                return -1;
 753
 754        return ret;
 755}
 756
 757static void ib_uverbs_add_one(struct ib_device *device)
 758{
 759        int devnum;
 760        dev_t base;
 761        struct ib_uverbs_device *uverbs_dev;
 762
 763        if (!device->alloc_ucontext)
 764                return;
 765
 766        uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
 767        if (!uverbs_dev)
 768                return;
 769
 770        kref_init(&uverbs_dev->ref);
 771        init_completion(&uverbs_dev->comp);
 772        uverbs_dev->xrcd_tree = RB_ROOT;
 773        mutex_init(&uverbs_dev->xrcd_tree_mutex);
 774
 775        spin_lock(&map_lock);
 776        devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
 777        if (devnum >= IB_UVERBS_MAX_DEVICES) {
 778                spin_unlock(&map_lock);
 779                devnum = find_overflow_devnum();
 780                if (devnum < 0)
 781                        goto err;
 782
 783                spin_lock(&map_lock);
 784                uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
 785                base = devnum + overflow_maj;
 786                set_bit(devnum, overflow_map);
 787        } else {
 788                uverbs_dev->devnum = devnum;
 789                base = devnum + IB_UVERBS_BASE_DEV;
 790                set_bit(devnum, dev_map);
 791        }
 792        spin_unlock(&map_lock);
 793
 794        uverbs_dev->ib_dev           = device;
 795        uverbs_dev->num_comp_vectors = device->num_comp_vectors;
 796
 797        cdev_init(&uverbs_dev->cdev, NULL);
 798        uverbs_dev->cdev.owner = THIS_MODULE;
 799        uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
 800        kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
 801        if (cdev_add(&uverbs_dev->cdev, base, 1))
 802                goto err_cdev;
 803
 804        uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
 805                                        uverbs_dev->cdev.dev, uverbs_dev,
 806                                        "uverbs%d", uverbs_dev->devnum);
 807        if (IS_ERR(uverbs_dev->dev))
 808                goto err_cdev;
 809
 810        if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
 811                goto err_class;
 812        if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
 813                goto err_class;
 814
 815        ib_set_client_data(device, &uverbs_client, uverbs_dev);
 816
 817        return;
 818
 819err_class:
 820        device_destroy(uverbs_class, uverbs_dev->cdev.dev);
 821
 822err_cdev:
 823        cdev_del(&uverbs_dev->cdev);
 824        if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
 825                clear_bit(devnum, dev_map);
 826        else
 827                clear_bit(devnum, overflow_map);
 828
 829err:
 830        kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
 831        wait_for_completion(&uverbs_dev->comp);
 832        kfree(uverbs_dev);
 833        return;
 834}
 835
 836static void ib_uverbs_remove_one(struct ib_device *device)
 837{
 838        struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client);
 839
 840        if (!uverbs_dev)
 841                return;
 842
 843        dev_set_drvdata(uverbs_dev->dev, NULL);
 844        device_destroy(uverbs_class, uverbs_dev->cdev.dev);
 845        cdev_del(&uverbs_dev->cdev);
 846
 847        if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
 848                clear_bit(uverbs_dev->devnum, dev_map);
 849        else
 850                clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
 851
 852        kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
 853        wait_for_completion(&uverbs_dev->comp);
 854        kfree(uverbs_dev);
 855}
 856
 857static char *uverbs_devnode(struct device *dev, umode_t *mode)
 858{
 859        if (mode)
 860                *mode = 0666;
 861        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 862}
 863
 864static int __init ib_uverbs_init(void)
 865{
 866        int ret;
 867
 868        ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
 869                                     "infiniband_verbs");
 870        if (ret) {
 871                printk(KERN_ERR "user_verbs: couldn't register device number\n");
 872                goto out;
 873        }
 874
 875        uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
 876        if (IS_ERR(uverbs_class)) {
 877                ret = PTR_ERR(uverbs_class);
 878                printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n");
 879                goto out_chrdev;
 880        }
 881
 882        uverbs_class->devnode = uverbs_devnode;
 883
 884        ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
 885        if (ret) {
 886                printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
 887                goto out_class;
 888        }
 889
 890        ret = ib_register_client(&uverbs_client);
 891        if (ret) {
 892                printk(KERN_ERR "user_verbs: couldn't register client\n");
 893                goto out_class;
 894        }
 895
 896        return 0;
 897
 898out_class:
 899        class_destroy(uverbs_class);
 900
 901out_chrdev:
 902        unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
 903
 904out:
 905        return ret;
 906}
 907
 908static void __exit ib_uverbs_cleanup(void)
 909{
 910        ib_unregister_client(&uverbs_client);
 911        class_destroy(uverbs_class);
 912        unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
 913        if (overflow_maj)
 914                unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
 915        idr_destroy(&ib_uverbs_pd_idr);
 916        idr_destroy(&ib_uverbs_mr_idr);
 917        idr_destroy(&ib_uverbs_mw_idr);
 918        idr_destroy(&ib_uverbs_ah_idr);
 919        idr_destroy(&ib_uverbs_cq_idr);
 920        idr_destroy(&ib_uverbs_qp_idr);
 921        idr_destroy(&ib_uverbs_srq_idr);
 922}
 923
 924module_init(ib_uverbs_init);
 925module_exit(ib_uverbs_cleanup);
 926