linux/drivers/infiniband/core/rdma_core.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies inc.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/file.h>
  34#include <linux/anon_inodes.h>
  35#include <linux/sched/mm.h>
  36#include <rdma/ib_verbs.h>
  37#include <rdma/uverbs_types.h>
  38#include <linux/rcupdate.h>
  39#include <rdma/uverbs_ioctl.h>
  40#include <rdma/rdma_user_ioctl.h>
  41#include "uverbs.h"
  42#include "core_priv.h"
  43#include "rdma_core.h"
  44
  45static void uverbs_uobject_free(struct kref *ref)
  46{
  47        kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
  48}
  49
  50/*
  51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put
  52 * is called. When the reference count is decreased, the uobject is freed.
  53 * For example, this is used when attaching a completion channel to a CQ.
  54 */
  55void uverbs_uobject_put(struct ib_uobject *uobject)
  56{
  57        kref_put(&uobject->ref, uverbs_uobject_free);
  58}
  59EXPORT_SYMBOL(uverbs_uobject_put);
  60
  61static int uverbs_try_lock_object(struct ib_uobject *uobj,
  62                                  enum rdma_lookup_mode mode)
  63{
  64        /*
  65         * When a shared access is required, we use a positive counter. Each
  66         * shared access request checks that the value != -1 and increment it.
  67         * Exclusive access is required for operations like write or destroy.
  68         * In exclusive access mode, we check that the counter is zero (nobody
  69         * claimed this object) and we set it to -1. Releasing a shared access
  70         * lock is done simply by decreasing the counter. As for exclusive
  71         * access locks, since only a single one of them is is allowed
  72         * concurrently, setting the counter to zero is enough for releasing
  73         * this lock.
  74         */
  75        switch (mode) {
  76        case UVERBS_LOOKUP_READ:
  77                return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
  78                        -EBUSY : 0;
  79        case UVERBS_LOOKUP_WRITE:
  80                /* lock is exclusive */
  81                return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
  82        case UVERBS_LOOKUP_DESTROY:
  83                return 0;
  84        }
  85        return 0;
  86}
  87
  88static void assert_uverbs_usecnt(struct ib_uobject *uobj,
  89                                 enum rdma_lookup_mode mode)
  90{
  91#ifdef CONFIG_LOCKDEP
  92        switch (mode) {
  93        case UVERBS_LOOKUP_READ:
  94                WARN_ON(atomic_read(&uobj->usecnt) <= 0);
  95                break;
  96        case UVERBS_LOOKUP_WRITE:
  97                WARN_ON(atomic_read(&uobj->usecnt) != -1);
  98                break;
  99        case UVERBS_LOOKUP_DESTROY:
 100                break;
 101        }
 102#endif
 103}
 104
 105/*
 106 * This must be called with the hw_destroy_rwsem locked for read or write,
 107 * also the uobject itself must be locked for write.
 108 *
 109 * Upon return the HW object is guaranteed to be destroyed.
 110 *
 111 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
 112 * however the type's allocat_commit function cannot have been called and the
 113 * uobject cannot be on the uobjects_lists
 114 *
 115 * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
 116 * rdma_lookup_get_uobject) and the object is left in a state where the caller
 117 * needs to call rdma_lookup_put_uobject.
 118 *
 119 * For all other destroy modes this function internally unlocks the uobject
 120 * and consumes the kref on the uobj.
 121 */
 122static int uverbs_destroy_uobject(struct ib_uobject *uobj,
 123                                  enum rdma_remove_reason reason,
 124                                  struct uverbs_attr_bundle *attrs)
 125{
 126        struct ib_uverbs_file *ufile = attrs->ufile;
 127        unsigned long flags;
 128        int ret;
 129
 130        lockdep_assert_held(&ufile->hw_destroy_rwsem);
 131        assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
 132
 133        if (reason == RDMA_REMOVE_ABORT) {
 134                WARN_ON(!list_empty(&uobj->list));
 135                WARN_ON(!uobj->context);
 136                uobj->uapi_object->type_class->alloc_abort(uobj);
 137        } else if (uobj->object) {
 138                ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
 139                                                                attrs);
 140                if (ret)
 141                        /* Nothing to be done, wait till ucontext will clean it */
 142                        return ret;
 143
 144                uobj->object = NULL;
 145        }
 146
 147        uobj->context = NULL;
 148
 149        /*
 150         * For DESTROY the usecnt is not changed, the caller is expected to
 151         * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
 152         * handle.
 153         */
 154        if (reason != RDMA_REMOVE_DESTROY)
 155                atomic_set(&uobj->usecnt, 0);
 156        else
 157                uobj->uapi_object->type_class->remove_handle(uobj);
 158
 159        if (!list_empty(&uobj->list)) {
 160                spin_lock_irqsave(&ufile->uobjects_lock, flags);
 161                list_del_init(&uobj->list);
 162                spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
 163
 164                /*
 165                 * Pairs with the get in rdma_alloc_commit_uobject(), could
 166                 * destroy uobj.
 167                 */
 168                uverbs_uobject_put(uobj);
 169        }
 170
 171        /*
 172         * When aborting the stack kref remains owned by the core code, and is
 173         * not transferred into the type. Pairs with the get in alloc_uobj
 174         */
 175        if (reason == RDMA_REMOVE_ABORT)
 176                uverbs_uobject_put(uobj);
 177
 178        return 0;
 179}
 180
 181/*
 182 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
 183 * sequence. It should only be used from command callbacks. On success the
 184 * caller must pair this with uobj_put_destroy(). This
 185 * version requires the caller to have already obtained an
 186 * LOOKUP_DESTROY uobject kref.
 187 */
 188int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
 189{
 190        struct ib_uverbs_file *ufile = attrs->ufile;
 191        int ret;
 192
 193        down_read(&ufile->hw_destroy_rwsem);
 194
 195        /*
 196         * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
 197         * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
 198         * This is because any other concurrent thread can still see the object
 199         * in the xarray due to RCU. Leaving it locked ensures nothing else will
 200         * touch it.
 201         */
 202        ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
 203        if (ret)
 204                goto out_unlock;
 205
 206        ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs);
 207        if (ret) {
 208                atomic_set(&uobj->usecnt, 0);
 209                goto out_unlock;
 210        }
 211
 212out_unlock:
 213        up_read(&ufile->hw_destroy_rwsem);
 214        return ret;
 215}
 216
 217/*
 218 * uobj_get_destroy destroys the HW object and returns a handle to the uobj
 219 * with a NULL object pointer. The caller must pair this with
 220 * uobj_put_destroy().
 221 */
 222struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
 223                                      u32 id, struct uverbs_attr_bundle *attrs)
 224{
 225        struct ib_uobject *uobj;
 226        int ret;
 227
 228        uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
 229                                       UVERBS_LOOKUP_DESTROY, attrs);
 230        if (IS_ERR(uobj))
 231                return uobj;
 232
 233        ret = uobj_destroy(uobj, attrs);
 234        if (ret) {
 235                rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
 236                return ERR_PTR(ret);
 237        }
 238
 239        return uobj;
 240}
 241
 242/*
 243 * Does both uobj_get_destroy() and uobj_put_destroy().  Returns 0 on success
 244 * (negative errno on failure). For use by callers that do not need the uobj.
 245 */
 246int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
 247                           struct uverbs_attr_bundle *attrs)
 248{
 249        struct ib_uobject *uobj;
 250
 251        uobj = __uobj_get_destroy(obj, id, attrs);
 252        if (IS_ERR(uobj))
 253                return PTR_ERR(uobj);
 254        uobj_put_destroy(uobj);
 255        return 0;
 256}
 257
 258/* alloc_uobj must be undone by uverbs_destroy_uobject() */
 259static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
 260                                     const struct uverbs_api_object *obj)
 261{
 262        struct ib_uverbs_file *ufile = attrs->ufile;
 263        struct ib_uobject *uobj;
 264
 265        if (!attrs->context) {
 266                struct ib_ucontext *ucontext =
 267                        ib_uverbs_get_ucontext_file(ufile);
 268
 269                if (IS_ERR(ucontext))
 270                        return ERR_CAST(ucontext);
 271                attrs->context = ucontext;
 272        }
 273
 274        uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
 275        if (!uobj)
 276                return ERR_PTR(-ENOMEM);
 277        /*
 278         * user_handle should be filled by the handler,
 279         * The object is added to the list in the commit stage.
 280         */
 281        uobj->ufile = ufile;
 282        uobj->context = attrs->context;
 283        INIT_LIST_HEAD(&uobj->list);
 284        uobj->uapi_object = obj;
 285        /*
 286         * Allocated objects start out as write locked to deny any other
 287         * syscalls from accessing them until they are committed. See
 288         * rdma_alloc_commit_uobject
 289         */
 290        atomic_set(&uobj->usecnt, -1);
 291        kref_init(&uobj->ref);
 292
 293        return uobj;
 294}
 295
 296static int idr_add_uobj(struct ib_uobject *uobj)
 297{
 298       /*
 299        * We start with allocating an idr pointing to NULL. This represents an
 300        * object which isn't initialized yet. We'll replace it later on with
 301        * the real object once we commit.
 302        */
 303        return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
 304                        GFP_KERNEL);
 305}
 306
 307/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
 308static struct ib_uobject *
 309lookup_get_idr_uobject(const struct uverbs_api_object *obj,
 310                       struct ib_uverbs_file *ufile, s64 id,
 311                       enum rdma_lookup_mode mode)
 312{
 313        struct ib_uobject *uobj;
 314
 315        if (id < 0 || id > ULONG_MAX)
 316                return ERR_PTR(-EINVAL);
 317
 318        rcu_read_lock();
 319        /*
 320         * The idr_find is guaranteed to return a pointer to something that
 321         * isn't freed yet, or NULL, as the free after idr_remove goes through
 322         * kfree_rcu(). However the object may still have been released and
 323         * kfree() could be called at any time.
 324         */
 325        uobj = xa_load(&ufile->idr, id);
 326        if (!uobj || !kref_get_unless_zero(&uobj->ref))
 327                uobj = ERR_PTR(-ENOENT);
 328        rcu_read_unlock();
 329        return uobj;
 330}
 331
 332static struct ib_uobject *
 333lookup_get_fd_uobject(const struct uverbs_api_object *obj,
 334                      struct ib_uverbs_file *ufile, s64 id,
 335                      enum rdma_lookup_mode mode)
 336{
 337        const struct uverbs_obj_fd_type *fd_type;
 338        struct file *f;
 339        struct ib_uobject *uobject;
 340        int fdno = id;
 341
 342        if (fdno != id)
 343                return ERR_PTR(-EINVAL);
 344
 345        if (mode != UVERBS_LOOKUP_READ)
 346                return ERR_PTR(-EOPNOTSUPP);
 347
 348        if (!obj->type_attrs)
 349                return ERR_PTR(-EIO);
 350        fd_type =
 351                container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
 352
 353        f = fget(fdno);
 354        if (!f)
 355                return ERR_PTR(-EBADF);
 356
 357        uobject = f->private_data;
 358        /*
 359         * fget(id) ensures we are not currently running
 360         * uverbs_uobject_fd_release(), and the caller is expected to ensure
 361         * that release is never done while a call to lookup is possible.
 362         */
 363        if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
 364                fput(f);
 365                return ERR_PTR(-EBADF);
 366        }
 367
 368        uverbs_uobject_get(uobject);
 369        return uobject;
 370}
 371
 372struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
 373                                           struct ib_uverbs_file *ufile, s64 id,
 374                                           enum rdma_lookup_mode mode,
 375                                           struct uverbs_attr_bundle *attrs)
 376{
 377        struct ib_uobject *uobj;
 378        int ret;
 379
 380        if (obj == ERR_PTR(-ENOMSG)) {
 381                /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
 382                uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
 383                if (IS_ERR(uobj))
 384                        return uobj;
 385        } else {
 386                if (IS_ERR(obj))
 387                        return ERR_PTR(-EINVAL);
 388
 389                uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
 390                if (IS_ERR(uobj))
 391                        return uobj;
 392
 393                if (uobj->uapi_object != obj) {
 394                        ret = -EINVAL;
 395                        goto free;
 396                }
 397        }
 398
 399        /*
 400         * If we have been disassociated block every command except for
 401         * DESTROY based commands.
 402         */
 403        if (mode != UVERBS_LOOKUP_DESTROY &&
 404            !srcu_dereference(ufile->device->ib_dev,
 405                              &ufile->device->disassociate_srcu)) {
 406                ret = -EIO;
 407                goto free;
 408        }
 409
 410        ret = uverbs_try_lock_object(uobj, mode);
 411        if (ret)
 412                goto free;
 413        if (attrs)
 414                attrs->context = uobj->context;
 415
 416        return uobj;
 417free:
 418        uobj->uapi_object->type_class->lookup_put(uobj, mode);
 419        uverbs_uobject_put(uobj);
 420        return ERR_PTR(ret);
 421}
 422
 423static struct ib_uobject *
 424alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
 425                        struct uverbs_attr_bundle *attrs)
 426{
 427        int ret;
 428        struct ib_uobject *uobj;
 429
 430        uobj = alloc_uobj(attrs, obj);
 431        if (IS_ERR(uobj))
 432                return uobj;
 433
 434        ret = idr_add_uobj(uobj);
 435        if (ret)
 436                goto uobj_put;
 437
 438        ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
 439                                   RDMACG_RESOURCE_HCA_OBJECT);
 440        if (ret)
 441                goto remove;
 442
 443        return uobj;
 444
 445remove:
 446        xa_erase(&attrs->ufile->idr, uobj->id);
 447uobj_put:
 448        uverbs_uobject_put(uobj);
 449        return ERR_PTR(ret);
 450}
 451
 452static struct ib_uobject *
 453alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
 454                       struct uverbs_attr_bundle *attrs)
 455{
 456        const struct uverbs_obj_fd_type *fd_type;
 457        int new_fd;
 458        struct ib_uobject *uobj, *ret;
 459        struct file *filp;
 460
 461        uobj = alloc_uobj(attrs, obj);
 462        if (IS_ERR(uobj))
 463                return uobj;
 464
 465        fd_type =
 466                container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
 467        if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
 468                    fd_type->fops->release != &uverbs_async_event_release)) {
 469                ret = ERR_PTR(-EINVAL);
 470                goto err_fd;
 471        }
 472
 473        new_fd = get_unused_fd_flags(O_CLOEXEC);
 474        if (new_fd < 0) {
 475                ret = ERR_PTR(new_fd);
 476                goto err_fd;
 477        }
 478
 479        /* Note that uverbs_uobject_fd_release() is called during abort */
 480        filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
 481                                  fd_type->flags);
 482        if (IS_ERR(filp)) {
 483                ret = ERR_CAST(filp);
 484                goto err_getfile;
 485        }
 486        uobj->object = filp;
 487
 488        uobj->id = new_fd;
 489        return uobj;
 490
 491err_getfile:
 492        put_unused_fd(new_fd);
 493err_fd:
 494        uverbs_uobject_put(uobj);
 495        return ret;
 496}
 497
 498struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
 499                                            struct uverbs_attr_bundle *attrs)
 500{
 501        struct ib_uverbs_file *ufile = attrs->ufile;
 502        struct ib_uobject *ret;
 503
 504        if (IS_ERR(obj))
 505                return ERR_PTR(-EINVAL);
 506
 507        /*
 508         * The hw_destroy_rwsem is held across the entire object creation and
 509         * released during rdma_alloc_commit_uobject or
 510         * rdma_alloc_abort_uobject
 511         */
 512        if (!down_read_trylock(&ufile->hw_destroy_rwsem))
 513                return ERR_PTR(-EIO);
 514
 515        ret = obj->type_class->alloc_begin(obj, attrs);
 516        if (IS_ERR(ret)) {
 517                up_read(&ufile->hw_destroy_rwsem);
 518                return ret;
 519        }
 520        return ret;
 521}
 522
 523static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
 524{
 525        ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
 526                           RDMACG_RESOURCE_HCA_OBJECT);
 527
 528        xa_erase(&uobj->ufile->idr, uobj->id);
 529}
 530
 531static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
 532                                               enum rdma_remove_reason why,
 533                                               struct uverbs_attr_bundle *attrs)
 534{
 535        const struct uverbs_obj_idr_type *idr_type =
 536                container_of(uobj->uapi_object->type_attrs,
 537                             struct uverbs_obj_idr_type, type);
 538        int ret = idr_type->destroy_object(uobj, why, attrs);
 539
 540        if (ret)
 541                return ret;
 542
 543        if (why == RDMA_REMOVE_ABORT)
 544                return 0;
 545
 546        ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
 547                           RDMACG_RESOURCE_HCA_OBJECT);
 548
 549        return 0;
 550}
 551
 552static void remove_handle_idr_uobject(struct ib_uobject *uobj)
 553{
 554        xa_erase(&uobj->ufile->idr, uobj->id);
 555        /* Matches the kref in alloc_commit_idr_uobject */
 556        uverbs_uobject_put(uobj);
 557}
 558
 559static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
 560{
 561        struct file *filp = uobj->object;
 562
 563        fput(filp);
 564        put_unused_fd(uobj->id);
 565}
 566
 567static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
 568                                              enum rdma_remove_reason why,
 569                                              struct uverbs_attr_bundle *attrs)
 570{
 571        const struct uverbs_obj_fd_type *fd_type = container_of(
 572                uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
 573
 574        fd_type->destroy_object(uobj, why);
 575        return 0;
 576}
 577
 578static void remove_handle_fd_uobject(struct ib_uobject *uobj)
 579{
 580}
 581
 582static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
 583{
 584        struct ib_uverbs_file *ufile = uobj->ufile;
 585        void *old;
 586
 587        /*
 588         * We already allocated this IDR with a NULL object, so
 589         * this shouldn't fail.
 590         *
 591         * NOTE: Storing the uobj transfers our kref on uobj to the XArray.
 592         * It will be put by remove_commit_idr_uobject()
 593         */
 594        old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
 595        WARN_ON(old != NULL);
 596}
 597
 598static void swap_idr_uobjects(struct ib_uobject *obj_old,
 599                             struct ib_uobject *obj_new)
 600{
 601        struct ib_uverbs_file *ufile = obj_old->ufile;
 602        void *old;
 603
 604        /*
 605         * New must be an object that been allocated but not yet committed, this
 606         * moves the pre-committed state to obj_old, new still must be comitted.
 607         */
 608        old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY,
 609                         GFP_KERNEL);
 610        if (WARN_ON(old != obj_old))
 611                return;
 612
 613        swap(obj_old->id, obj_new->id);
 614
 615        old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL);
 616        WARN_ON(old != NULL);
 617}
 618
 619static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
 620{
 621        int fd = uobj->id;
 622        struct file *filp = uobj->object;
 623
 624        /* Matching put will be done in uverbs_uobject_fd_release() */
 625        kref_get(&uobj->ufile->ref);
 626
 627        /* This shouldn't be used anymore. Use the file object instead */
 628        uobj->id = 0;
 629
 630        /*
 631         * NOTE: Once we install the file we loose ownership of our kref on
 632         * uobj. It will be put by uverbs_uobject_fd_release()
 633         */
 634        filp->private_data = uobj;
 635        fd_install(fd, filp);
 636}
 637
 638/*
 639 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
 640 * caller can no longer assume uobj is valid. If this function fails it
 641 * destroys the uboject, including the attached HW object.
 642 */
 643void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
 644                               struct uverbs_attr_bundle *attrs)
 645{
 646        struct ib_uverbs_file *ufile = attrs->ufile;
 647
 648        /* kref is held so long as the uobj is on the uobj list. */
 649        uverbs_uobject_get(uobj);
 650        spin_lock_irq(&ufile->uobjects_lock);
 651        list_add(&uobj->list, &ufile->uobjects);
 652        spin_unlock_irq(&ufile->uobjects_lock);
 653
 654        /* matches atomic_set(-1) in alloc_uobj */
 655        atomic_set(&uobj->usecnt, 0);
 656
 657        /* alloc_commit consumes the uobj kref */
 658        uobj->uapi_object->type_class->alloc_commit(uobj);
 659
 660        /* Matches the down_read in rdma_alloc_begin_uobject */
 661        up_read(&ufile->hw_destroy_rwsem);
 662}
 663
 664/*
 665 * new_uobj will be assigned to the handle currently used by to_uobj, and
 666 * to_uobj will be destroyed.
 667 *
 668 * Upon return the caller must do:
 669 *    rdma_alloc_commit_uobject(new_uobj)
 670 *    uobj_put_destroy(to_uobj)
 671 *
 672 * to_uobj must have a write get but the put mode switches to destroy once
 673 * this is called.
 674 */
 675void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj,
 676                        struct uverbs_attr_bundle *attrs)
 677{
 678        assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE);
 679
 680        if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object ||
 681                    !to_uobj->uapi_object->type_class->swap_uobjects))
 682                return;
 683
 684        to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj);
 685
 686        /*
 687         * If this fails then the uobject is still completely valid (though with
 688         * a new ID) and we leak it until context close.
 689         */
 690        uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs);
 691}
 692
 693/*
 694 * This consumes the kref for uobj. It is up to the caller to unwind the HW
 695 * object and anything else connected to uobj before calling this.
 696 */
 697void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
 698                              struct uverbs_attr_bundle *attrs,
 699                              bool hw_obj_valid)
 700{
 701        struct ib_uverbs_file *ufile = uobj->ufile;
 702        int ret;
 703
 704        if (hw_obj_valid) {
 705                ret = uobj->uapi_object->type_class->destroy_hw(
 706                        uobj, RDMA_REMOVE_ABORT, attrs);
 707                /*
 708                 * If the driver couldn't destroy the object then go ahead and
 709                 * commit it. Leaking objects that can't be destroyed is only
 710                 * done during FD close after the driver has a few more tries to
 711                 * destroy it.
 712                 */
 713                if (WARN_ON(ret))
 714                        return rdma_alloc_commit_uobject(uobj, attrs);
 715        }
 716
 717        uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
 718
 719        /* Matches the down_read in rdma_alloc_begin_uobject */
 720        up_read(&ufile->hw_destroy_rwsem);
 721}
 722
 723static void lookup_put_idr_uobject(struct ib_uobject *uobj,
 724                                   enum rdma_lookup_mode mode)
 725{
 726}
 727
 728static void lookup_put_fd_uobject(struct ib_uobject *uobj,
 729                                  enum rdma_lookup_mode mode)
 730{
 731        struct file *filp = uobj->object;
 732
 733        WARN_ON(mode != UVERBS_LOOKUP_READ);
 734        /*
 735         * This indirectly calls uverbs_uobject_fd_release() and free the
 736         * object
 737         */
 738        fput(filp);
 739}
 740
 741void rdma_lookup_put_uobject(struct ib_uobject *uobj,
 742                             enum rdma_lookup_mode mode)
 743{
 744        assert_uverbs_usecnt(uobj, mode);
 745        /*
 746         * In order to unlock an object, either decrease its usecnt for
 747         * read access or zero it in case of exclusive access. See
 748         * uverbs_try_lock_object for locking schema information.
 749         */
 750        switch (mode) {
 751        case UVERBS_LOOKUP_READ:
 752                atomic_dec(&uobj->usecnt);
 753                break;
 754        case UVERBS_LOOKUP_WRITE:
 755                atomic_set(&uobj->usecnt, 0);
 756                break;
 757        case UVERBS_LOOKUP_DESTROY:
 758                break;
 759        }
 760
 761        uobj->uapi_object->type_class->lookup_put(uobj, mode);
 762        /* Pairs with the kref obtained by type->lookup_get */
 763        uverbs_uobject_put(uobj);
 764}
 765
 766void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
 767{
 768        xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
 769}
 770
 771void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
 772{
 773        struct ib_uobject *entry;
 774        unsigned long id;
 775
 776        /*
 777         * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
 778         * there are no HW objects left, however the xarray is still populated
 779         * with anything that has not been cleaned up by userspace. Since the
 780         * kref on ufile is 0, nothing is allowed to call lookup_get.
 781         *
 782         * This is an optimized equivalent to remove_handle_idr_uobject
 783         */
 784        xa_for_each(&ufile->idr, id, entry) {
 785                WARN_ON(entry->object);
 786                uverbs_uobject_put(entry);
 787        }
 788
 789        xa_destroy(&ufile->idr);
 790}
 791
 792const struct uverbs_obj_type_class uverbs_idr_class = {
 793        .alloc_begin = alloc_begin_idr_uobject,
 794        .lookup_get = lookup_get_idr_uobject,
 795        .alloc_commit = alloc_commit_idr_uobject,
 796        .alloc_abort = alloc_abort_idr_uobject,
 797        .lookup_put = lookup_put_idr_uobject,
 798        .destroy_hw = destroy_hw_idr_uobject,
 799        .remove_handle = remove_handle_idr_uobject,
 800        .swap_uobjects = swap_idr_uobjects,
 801};
 802EXPORT_SYMBOL(uverbs_idr_class);
 803
 804/*
 805 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
 806 * file_operations release method.
 807 */
 808int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
 809{
 810        struct ib_uverbs_file *ufile;
 811        struct ib_uobject *uobj;
 812
 813        /*
 814         * This can only happen if the fput came from alloc_abort_fd_uobject()
 815         */
 816        if (!filp->private_data)
 817                return 0;
 818        uobj = filp->private_data;
 819        ufile = uobj->ufile;
 820
 821        if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
 822                struct uverbs_attr_bundle attrs = {
 823                        .context = uobj->context,
 824                        .ufile = ufile,
 825                };
 826
 827                /*
 828                 * lookup_get_fd_uobject holds the kref on the struct file any
 829                 * time a FD uobj is locked, which prevents this release
 830                 * method from being invoked. Meaning we can always get the
 831                 * write lock here, or we have a kernel bug.
 832                 */
 833                WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
 834                uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs);
 835                up_read(&ufile->hw_destroy_rwsem);
 836        }
 837
 838        /* Matches the get in alloc_commit_fd_uobject() */
 839        kref_put(&ufile->ref, ib_uverbs_release_file);
 840
 841        /* Pairs with filp->private_data in alloc_begin_fd_uobject */
 842        uverbs_uobject_put(uobj);
 843        return 0;
 844}
 845EXPORT_SYMBOL(uverbs_uobject_fd_release);
 846
 847/*
 848 * Drop the ucontext off the ufile and completely disconnect it from the
 849 * ib_device
 850 */
 851static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
 852                                   enum rdma_remove_reason reason)
 853{
 854        struct ib_ucontext *ucontext = ufile->ucontext;
 855        struct ib_device *ib_dev = ucontext->device;
 856
 857        /*
 858         * If we are closing the FD then the user mmap VMAs must have
 859         * already been destroyed as they hold on to the filep, otherwise
 860         * they need to be zap'd.
 861         */
 862        if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
 863                uverbs_user_mmap_disassociate(ufile);
 864                if (ib_dev->ops.disassociate_ucontext)
 865                        ib_dev->ops.disassociate_ucontext(ucontext);
 866        }
 867
 868        ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
 869                           RDMACG_RESOURCE_HCA_HANDLE);
 870
 871        rdma_restrack_del(&ucontext->res);
 872
 873        ib_dev->ops.dealloc_ucontext(ucontext);
 874        WARN_ON(!xa_empty(&ucontext->mmap_xa));
 875        kfree(ucontext);
 876
 877        ufile->ucontext = NULL;
 878}
 879
 880static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
 881                                  enum rdma_remove_reason reason)
 882{
 883        struct ib_uobject *obj, *next_obj;
 884        int ret = -EINVAL;
 885        struct uverbs_attr_bundle attrs = { .ufile = ufile };
 886
 887        /*
 888         * This shouldn't run while executing other commands on this
 889         * context. Thus, the only thing we should take care of is
 890         * releasing a FD while traversing this list. The FD could be
 891         * closed and released from the _release fop of this FD.
 892         * In order to mitigate this, we add a lock.
 893         * We take and release the lock per traversal in order to let
 894         * other threads (which might still use the FDs) chance to run.
 895         */
 896        list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
 897                attrs.context = obj->context;
 898                /*
 899                 * if we hit this WARN_ON, that means we are
 900                 * racing with a lookup_get.
 901                 */
 902                WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
 903                if (reason == RDMA_REMOVE_DRIVER_FAILURE)
 904                        obj->object = NULL;
 905                if (!uverbs_destroy_uobject(obj, reason, &attrs))
 906                        ret = 0;
 907                else
 908                        atomic_set(&obj->usecnt, 0);
 909        }
 910
 911        if (reason == RDMA_REMOVE_DRIVER_FAILURE) {
 912                WARN_ON(!list_empty(&ufile->uobjects));
 913                return 0;
 914        }
 915        return ret;
 916}
 917
 918/*
 919 * Destroy the ucontext and every uobject associated with it.
 920 *
 921 * This is internally locked and can be called in parallel from multiple
 922 * contexts.
 923 */
 924void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
 925                             enum rdma_remove_reason reason)
 926{
 927        down_write(&ufile->hw_destroy_rwsem);
 928
 929        /*
 930         * If a ucontext was never created then we can't have any uobjects to
 931         * cleanup, nothing to do.
 932         */
 933        if (!ufile->ucontext)
 934                goto done;
 935
 936        while (!list_empty(&ufile->uobjects) &&
 937               !__uverbs_cleanup_ufile(ufile, reason)) {
 938        }
 939
 940        if (WARN_ON(!list_empty(&ufile->uobjects)))
 941                __uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE);
 942        ufile_destroy_ucontext(ufile, reason);
 943
 944done:
 945        up_write(&ufile->hw_destroy_rwsem);
 946}
 947
 948const struct uverbs_obj_type_class uverbs_fd_class = {
 949        .alloc_begin = alloc_begin_fd_uobject,
 950        .lookup_get = lookup_get_fd_uobject,
 951        .alloc_commit = alloc_commit_fd_uobject,
 952        .alloc_abort = alloc_abort_fd_uobject,
 953        .lookup_put = lookup_put_fd_uobject,
 954        .destroy_hw = destroy_hw_fd_uobject,
 955        .remove_handle = remove_handle_fd_uobject,
 956};
 957EXPORT_SYMBOL(uverbs_fd_class);
 958
 959struct ib_uobject *
 960uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
 961                             s64 id, struct uverbs_attr_bundle *attrs)
 962{
 963        const struct uverbs_api_object *obj =
 964                uapi_get_object(attrs->ufile->device->uapi, object_id);
 965
 966        switch (access) {
 967        case UVERBS_ACCESS_READ:
 968                return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 969                                               UVERBS_LOOKUP_READ, attrs);
 970        case UVERBS_ACCESS_DESTROY:
 971                /* Actual destruction is done inside uverbs_handle_method */
 972                return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 973                                               UVERBS_LOOKUP_DESTROY, attrs);
 974        case UVERBS_ACCESS_WRITE:
 975                return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 976                                               UVERBS_LOOKUP_WRITE, attrs);
 977        case UVERBS_ACCESS_NEW:
 978                return rdma_alloc_begin_uobject(obj, attrs);
 979        default:
 980                WARN_ON(true);
 981                return ERR_PTR(-EOPNOTSUPP);
 982        }
 983}
 984
 985void uverbs_finalize_object(struct ib_uobject *uobj,
 986                            enum uverbs_obj_access access, bool hw_obj_valid,
 987                            bool commit, struct uverbs_attr_bundle *attrs)
 988{
 989        /*
 990         * refcounts should be handled at the object level and not at the
 991         * uobject level. Refcounts of the objects themselves are done in
 992         * handlers.
 993         */
 994
 995        switch (access) {
 996        case UVERBS_ACCESS_READ:
 997                rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
 998                break;
 999        case UVERBS_ACCESS_WRITE:
1000                rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
1001                break;
1002        case UVERBS_ACCESS_DESTROY:
1003                if (uobj)
1004                        rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
1005                break;
1006        case UVERBS_ACCESS_NEW:
1007                if (commit)
1008                        rdma_alloc_commit_uobject(uobj, attrs);
1009                else
1010                        rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
1011                break;
1012        default:
1013                WARN_ON(true);
1014        }
1015}
1016