linux/drivers/gpu/drm/vmwgfx/ttm_object.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 *
  31 * While no substantial code is shared, the prime code is inspired by
  32 * drm_prime.c, with
  33 * Authors:
  34 *      Dave Airlie <airlied@redhat.com>
  35 *      Rob Clark <rob.clark@linaro.org>
  36 */
  37/** @file ttm_ref_object.c
  38 *
  39 * Base- and reference object implementation for the various
  40 * ttm objects. Implements reference counting, minimal security checks
  41 * and release on file close.
  42 */
  43
  44
  45/**
  46 * struct ttm_object_file
  47 *
  48 * @tdev: Pointer to the ttm_object_device.
  49 *
  50 * @lock: Lock that protects the ref_list list and the
  51 * ref_hash hash tables.
  52 *
  53 * @ref_list: List of ttm_ref_objects to be destroyed at
  54 * file release.
  55 *
  56 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
  57 * for fast lookup of ref objects given a base object.
  58 */
  59
  60#define pr_fmt(fmt) "[TTM] " fmt
  61
  62#include <drm/ttm/ttm_module.h>
  63#include <linux/list.h>
  64#include <linux/spinlock.h>
  65#include <linux/slab.h>
  66#include <linux/atomic.h>
  67#include "ttm_object.h"
  68
  69struct ttm_object_file {
  70        struct ttm_object_device *tdev;
  71        spinlock_t lock;
  72        struct list_head ref_list;
  73        struct drm_open_hash ref_hash[TTM_REF_NUM];
  74        struct kref refcount;
  75};
  76
  77/**
  78 * struct ttm_object_device
  79 *
  80 * @object_lock: lock that protects the object_hash hash table.
  81 *
  82 * @object_hash: hash table for fast lookup of object global names.
  83 *
  84 * @object_count: Per device object count.
  85 *
  86 * This is the per-device data structure needed for ttm object management.
  87 */
  88
  89struct ttm_object_device {
  90        spinlock_t object_lock;
  91        struct drm_open_hash object_hash;
  92        atomic_t object_count;
  93        struct ttm_mem_global *mem_glob;
  94        struct dma_buf_ops ops;
  95        void (*dmabuf_release)(struct dma_buf *dma_buf);
  96        size_t dma_buf_size;
  97        struct idr idr;
  98};
  99
 100/**
 101 * struct ttm_ref_object
 102 *
 103 * @hash: Hash entry for the per-file object reference hash.
 104 *
 105 * @head: List entry for the per-file list of ref-objects.
 106 *
 107 * @kref: Ref count.
 108 *
 109 * @obj: Base object this ref object is referencing.
 110 *
 111 * @ref_type: Type of ref object.
 112 *
 113 * This is similar to an idr object, but it also has a hash table entry
 114 * that allows lookup with a pointer to the referenced object as a key. In
 115 * that way, one can easily detect whether a base object is referenced by
 116 * a particular ttm_object_file. It also carries a ref count to avoid creating
 117 * multiple ref objects if a ttm_object_file references the same base
 118 * object more than once.
 119 */
 120
 121struct ttm_ref_object {
 122        struct rcu_head rcu_head;
 123        struct drm_hash_item hash;
 124        struct list_head head;
 125        struct kref kref;
 126        enum ttm_ref_type ref_type;
 127        struct ttm_base_object *obj;
 128        struct ttm_object_file *tfile;
 129};
 130
 131static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
 132
 133static inline struct ttm_object_file *
 134ttm_object_file_ref(struct ttm_object_file *tfile)
 135{
 136        kref_get(&tfile->refcount);
 137        return tfile;
 138}
 139
 140static void ttm_object_file_destroy(struct kref *kref)
 141{
 142        struct ttm_object_file *tfile =
 143                container_of(kref, struct ttm_object_file, refcount);
 144
 145        kfree(tfile);
 146}
 147
 148
 149static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
 150{
 151        struct ttm_object_file *tfile = *p_tfile;
 152
 153        *p_tfile = NULL;
 154        kref_put(&tfile->refcount, ttm_object_file_destroy);
 155}
 156
 157
 158int ttm_base_object_init(struct ttm_object_file *tfile,
 159                         struct ttm_base_object *base,
 160                         bool shareable,
 161                         enum ttm_object_type object_type,
 162                         void (*refcount_release) (struct ttm_base_object **),
 163                         void (*ref_obj_release) (struct ttm_base_object *,
 164                                                  enum ttm_ref_type ref_type))
 165{
 166        struct ttm_object_device *tdev = tfile->tdev;
 167        int ret;
 168
 169        base->shareable = shareable;
 170        base->tfile = ttm_object_file_ref(tfile);
 171        base->refcount_release = refcount_release;
 172        base->ref_obj_release = ref_obj_release;
 173        base->object_type = object_type;
 174        kref_init(&base->refcount);
 175        idr_preload(GFP_KERNEL);
 176        spin_lock(&tdev->object_lock);
 177        ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
 178        spin_unlock(&tdev->object_lock);
 179        idr_preload_end();
 180        if (ret < 0)
 181                return ret;
 182
 183        base->handle = ret;
 184        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 185        if (unlikely(ret != 0))
 186                goto out_err1;
 187
 188        ttm_base_object_unref(&base);
 189
 190        return 0;
 191out_err1:
 192        spin_lock(&tdev->object_lock);
 193        idr_remove(&tdev->idr, base->handle);
 194        spin_unlock(&tdev->object_lock);
 195        return ret;
 196}
 197
 198static void ttm_release_base(struct kref *kref)
 199{
 200        struct ttm_base_object *base =
 201            container_of(kref, struct ttm_base_object, refcount);
 202        struct ttm_object_device *tdev = base->tfile->tdev;
 203
 204        spin_lock(&tdev->object_lock);
 205        idr_remove(&tdev->idr, base->handle);
 206        spin_unlock(&tdev->object_lock);
 207
 208        /*
 209         * Note: We don't use synchronize_rcu() here because it's far
 210         * too slow. It's up to the user to free the object using
 211         * call_rcu() or ttm_base_object_kfree().
 212         */
 213
 214        ttm_object_file_unref(&base->tfile);
 215        if (base->refcount_release)
 216                base->refcount_release(&base);
 217}
 218
 219void ttm_base_object_unref(struct ttm_base_object **p_base)
 220{
 221        struct ttm_base_object *base = *p_base;
 222
 223        *p_base = NULL;
 224
 225        kref_put(&base->refcount, ttm_release_base);
 226}
 227
 228/**
 229 * ttm_base_object_noref_lookup - look up a base object without reference
 230 * @tfile: The struct ttm_object_file the object is registered with.
 231 * @key: The object handle.
 232 *
 233 * This function looks up a ttm base object and returns a pointer to it
 234 * without refcounting the pointer. The returned pointer is only valid
 235 * until ttm_base_object_noref_release() is called, and the object
 236 * pointed to by the returned pointer may be doomed. Any persistent usage
 237 * of the object requires a refcount to be taken using kref_get_unless_zero().
 238 * Iff this function returns successfully it needs to be paired with
 239 * ttm_base_object_noref_release() and no sleeping- or scheduling functions
 240 * may be called inbetween these function callse.
 241 *
 242 * Return: A pointer to the object if successful or NULL otherwise.
 243 */
 244struct ttm_base_object *
 245ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
 246{
 247        struct drm_hash_item *hash;
 248        struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 249        int ret;
 250
 251        rcu_read_lock();
 252        ret = drm_ht_find_item_rcu(ht, key, &hash);
 253        if (ret) {
 254                rcu_read_unlock();
 255                return NULL;
 256        }
 257
 258        __release(RCU);
 259        return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
 260}
 261EXPORT_SYMBOL(ttm_base_object_noref_lookup);
 262
 263struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 264                                               uint32_t key)
 265{
 266        struct ttm_base_object *base = NULL;
 267        struct drm_hash_item *hash;
 268        struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 269        int ret;
 270
 271        rcu_read_lock();
 272        ret = drm_ht_find_item_rcu(ht, key, &hash);
 273
 274        if (likely(ret == 0)) {
 275                base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
 276                if (!kref_get_unless_zero(&base->refcount))
 277                        base = NULL;
 278        }
 279        rcu_read_unlock();
 280
 281        return base;
 282}
 283
 284struct ttm_base_object *
 285ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
 286{
 287        struct ttm_base_object *base;
 288
 289        rcu_read_lock();
 290        base = idr_find(&tdev->idr, key);
 291
 292        if (base && !kref_get_unless_zero(&base->refcount))
 293                base = NULL;
 294        rcu_read_unlock();
 295
 296        return base;
 297}
 298
 299/**
 300 * ttm_ref_object_exists - Check whether a caller has a valid ref object
 301 * (has opened) a base object.
 302 *
 303 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
 304 * @base: Pointer to a struct base object.
 305 *
 306 * Checks wether the caller identified by @tfile has put a valid USAGE
 307 * reference object on the base object identified by @base.
 308 */
 309bool ttm_ref_object_exists(struct ttm_object_file *tfile,
 310                           struct ttm_base_object *base)
 311{
 312        struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 313        struct drm_hash_item *hash;
 314        struct ttm_ref_object *ref;
 315
 316        rcu_read_lock();
 317        if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
 318                goto out_false;
 319
 320        /*
 321         * Verify that the ref object is really pointing to our base object.
 322         * Our base object could actually be dead, and the ref object pointing
 323         * to another base object with the same handle.
 324         */
 325        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 326        if (unlikely(base != ref->obj))
 327                goto out_false;
 328
 329        /*
 330         * Verify that the ref->obj pointer was actually valid!
 331         */
 332        rmb();
 333        if (unlikely(kref_read(&ref->kref) == 0))
 334                goto out_false;
 335
 336        rcu_read_unlock();
 337        return true;
 338
 339 out_false:
 340        rcu_read_unlock();
 341        return false;
 342}
 343
 344int ttm_ref_object_add(struct ttm_object_file *tfile,
 345                       struct ttm_base_object *base,
 346                       enum ttm_ref_type ref_type, bool *existed,
 347                       bool require_existed)
 348{
 349        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
 350        struct ttm_ref_object *ref;
 351        struct drm_hash_item *hash;
 352        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 353        struct ttm_operation_ctx ctx = {
 354                .interruptible = false,
 355                .no_wait_gpu = false
 356        };
 357        int ret = -EINVAL;
 358
 359        if (base->tfile != tfile && !base->shareable)
 360                return -EPERM;
 361
 362        if (existed != NULL)
 363                *existed = true;
 364
 365        while (ret == -EINVAL) {
 366                rcu_read_lock();
 367                ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
 368
 369                if (ret == 0) {
 370                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 371                        if (kref_get_unless_zero(&ref->kref)) {
 372                                rcu_read_unlock();
 373                                break;
 374                        }
 375                }
 376
 377                rcu_read_unlock();
 378                if (require_existed)
 379                        return -EPERM;
 380
 381                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
 382                                           &ctx);
 383                if (unlikely(ret != 0))
 384                        return ret;
 385                ref = kmalloc(sizeof(*ref), GFP_KERNEL);
 386                if (unlikely(ref == NULL)) {
 387                        ttm_mem_global_free(mem_glob, sizeof(*ref));
 388                        return -ENOMEM;
 389                }
 390
 391                ref->hash.key = base->handle;
 392                ref->obj = base;
 393                ref->tfile = tfile;
 394                ref->ref_type = ref_type;
 395                kref_init(&ref->kref);
 396
 397                spin_lock(&tfile->lock);
 398                ret = drm_ht_insert_item_rcu(ht, &ref->hash);
 399
 400                if (likely(ret == 0)) {
 401                        list_add_tail(&ref->head, &tfile->ref_list);
 402                        kref_get(&base->refcount);
 403                        spin_unlock(&tfile->lock);
 404                        if (existed != NULL)
 405                                *existed = false;
 406                        break;
 407                }
 408
 409                spin_unlock(&tfile->lock);
 410                BUG_ON(ret != -EINVAL);
 411
 412                ttm_mem_global_free(mem_glob, sizeof(*ref));
 413                kfree(ref);
 414        }
 415
 416        return ret;
 417}
 418
 419static void __releases(tfile->lock) __acquires(tfile->lock)
 420ttm_ref_object_release(struct kref *kref)
 421{
 422        struct ttm_ref_object *ref =
 423            container_of(kref, struct ttm_ref_object, kref);
 424        struct ttm_base_object *base = ref->obj;
 425        struct ttm_object_file *tfile = ref->tfile;
 426        struct drm_open_hash *ht;
 427        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 428
 429        ht = &tfile->ref_hash[ref->ref_type];
 430        (void)drm_ht_remove_item_rcu(ht, &ref->hash);
 431        list_del(&ref->head);
 432        spin_unlock(&tfile->lock);
 433
 434        if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
 435                base->ref_obj_release(base, ref->ref_type);
 436
 437        ttm_base_object_unref(&ref->obj);
 438        ttm_mem_global_free(mem_glob, sizeof(*ref));
 439        kfree_rcu(ref, rcu_head);
 440        spin_lock(&tfile->lock);
 441}
 442
 443int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
 444                              unsigned long key, enum ttm_ref_type ref_type)
 445{
 446        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
 447        struct ttm_ref_object *ref;
 448        struct drm_hash_item *hash;
 449        int ret;
 450
 451        spin_lock(&tfile->lock);
 452        ret = drm_ht_find_item(ht, key, &hash);
 453        if (unlikely(ret != 0)) {
 454                spin_unlock(&tfile->lock);
 455                return -EINVAL;
 456        }
 457        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 458        kref_put(&ref->kref, ttm_ref_object_release);
 459        spin_unlock(&tfile->lock);
 460        return 0;
 461}
 462
 463void ttm_object_file_release(struct ttm_object_file **p_tfile)
 464{
 465        struct ttm_ref_object *ref;
 466        struct list_head *list;
 467        unsigned int i;
 468        struct ttm_object_file *tfile = *p_tfile;
 469
 470        *p_tfile = NULL;
 471        spin_lock(&tfile->lock);
 472
 473        /*
 474         * Since we release the lock within the loop, we have to
 475         * restart it from the beginning each time.
 476         */
 477
 478        while (!list_empty(&tfile->ref_list)) {
 479                list = tfile->ref_list.next;
 480                ref = list_entry(list, struct ttm_ref_object, head);
 481                ttm_ref_object_release(&ref->kref);
 482        }
 483
 484        spin_unlock(&tfile->lock);
 485        for (i = 0; i < TTM_REF_NUM; ++i)
 486                drm_ht_remove(&tfile->ref_hash[i]);
 487
 488        ttm_object_file_unref(&tfile);
 489}
 490
 491struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
 492                                             unsigned int hash_order)
 493{
 494        struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
 495        unsigned int i;
 496        unsigned int j = 0;
 497        int ret;
 498
 499        if (unlikely(tfile == NULL))
 500                return NULL;
 501
 502        spin_lock_init(&tfile->lock);
 503        tfile->tdev = tdev;
 504        kref_init(&tfile->refcount);
 505        INIT_LIST_HEAD(&tfile->ref_list);
 506
 507        for (i = 0; i < TTM_REF_NUM; ++i) {
 508                ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
 509                if (ret) {
 510                        j = i;
 511                        goto out_err;
 512                }
 513        }
 514
 515        return tfile;
 516out_err:
 517        for (i = 0; i < j; ++i)
 518                drm_ht_remove(&tfile->ref_hash[i]);
 519
 520        kfree(tfile);
 521
 522        return NULL;
 523}
 524
 525struct ttm_object_device *
 526ttm_object_device_init(struct ttm_mem_global *mem_glob,
 527                       unsigned int hash_order,
 528                       const struct dma_buf_ops *ops)
 529{
 530        struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
 531        int ret;
 532
 533        if (unlikely(tdev == NULL))
 534                return NULL;
 535
 536        tdev->mem_glob = mem_glob;
 537        spin_lock_init(&tdev->object_lock);
 538        atomic_set(&tdev->object_count, 0);
 539        ret = drm_ht_create(&tdev->object_hash, hash_order);
 540        if (ret != 0)
 541                goto out_no_object_hash;
 542
 543        idr_init(&tdev->idr);
 544        tdev->ops = *ops;
 545        tdev->dmabuf_release = tdev->ops.release;
 546        tdev->ops.release = ttm_prime_dmabuf_release;
 547        tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
 548                ttm_round_pot(sizeof(struct file));
 549        return tdev;
 550
 551out_no_object_hash:
 552        kfree(tdev);
 553        return NULL;
 554}
 555
 556void ttm_object_device_release(struct ttm_object_device **p_tdev)
 557{
 558        struct ttm_object_device *tdev = *p_tdev;
 559
 560        *p_tdev = NULL;
 561
 562        WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
 563        idr_destroy(&tdev->idr);
 564        drm_ht_remove(&tdev->object_hash);
 565
 566        kfree(tdev);
 567}
 568
 569/**
 570 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
 571 *
 572 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
 573 *
 574 * Obtain a file reference from a lookup structure that doesn't refcount
 575 * the file, but synchronizes with its release method to make sure it has
 576 * not been freed yet. See for example kref_get_unless_zero documentation.
 577 * Returns true if refcounting succeeds, false otherwise.
 578 *
 579 * Nobody really wants this as a public API yet, so let it mature here
 580 * for some time...
 581 */
 582static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
 583{
 584        return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
 585}
 586
 587/**
 588 * ttm_prime_refcount_release - refcount release method for a prime object.
 589 *
 590 * @p_base: Pointer to ttm_base_object pointer.
 591 *
 592 * This is a wrapper that calls the refcount_release founction of the
 593 * underlying object. At the same time it cleans up the prime object.
 594 * This function is called when all references to the base object we
 595 * derive from are gone.
 596 */
 597static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
 598{
 599        struct ttm_base_object *base = *p_base;
 600        struct ttm_prime_object *prime;
 601
 602        *p_base = NULL;
 603        prime = container_of(base, struct ttm_prime_object, base);
 604        BUG_ON(prime->dma_buf != NULL);
 605        mutex_destroy(&prime->mutex);
 606        if (prime->refcount_release)
 607                prime->refcount_release(&base);
 608}
 609
 610/**
 611 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
 612 *
 613 * @dma_buf:
 614 *
 615 * This function first calls the dma_buf release method the driver
 616 * provides. Then it cleans up our dma_buf pointer used for lookup,
 617 * and finally releases the reference the dma_buf has on our base
 618 * object.
 619 */
 620static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
 621{
 622        struct ttm_prime_object *prime =
 623                (struct ttm_prime_object *) dma_buf->priv;
 624        struct ttm_base_object *base = &prime->base;
 625        struct ttm_object_device *tdev = base->tfile->tdev;
 626
 627        if (tdev->dmabuf_release)
 628                tdev->dmabuf_release(dma_buf);
 629        mutex_lock(&prime->mutex);
 630        if (prime->dma_buf == dma_buf)
 631                prime->dma_buf = NULL;
 632        mutex_unlock(&prime->mutex);
 633        ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
 634        ttm_base_object_unref(&base);
 635}
 636
 637/**
 638 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
 639 *
 640 * @tfile: A struct ttm_object_file identifying the caller.
 641 * @fd: The prime / dmabuf fd.
 642 * @handle: The returned handle.
 643 *
 644 * This function returns a handle to an object that previously exported
 645 * a dma-buf. Note that we don't handle imports yet, because we simply
 646 * have no consumers of that implementation.
 647 */
 648int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
 649                           int fd, u32 *handle)
 650{
 651        struct ttm_object_device *tdev = tfile->tdev;
 652        struct dma_buf *dma_buf;
 653        struct ttm_prime_object *prime;
 654        struct ttm_base_object *base;
 655        int ret;
 656
 657        dma_buf = dma_buf_get(fd);
 658        if (IS_ERR(dma_buf))
 659                return PTR_ERR(dma_buf);
 660
 661        if (dma_buf->ops != &tdev->ops)
 662                return -ENOSYS;
 663
 664        prime = (struct ttm_prime_object *) dma_buf->priv;
 665        base = &prime->base;
 666        *handle = base->handle;
 667        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 668
 669        dma_buf_put(dma_buf);
 670
 671        return ret;
 672}
 673
 674/**
 675 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
 676 *
 677 * @tfile: Struct ttm_object_file identifying the caller.
 678 * @handle: Handle to the object we're exporting from.
 679 * @flags: flags for dma-buf creation. We just pass them on.
 680 * @prime_fd: The returned file descriptor.
 681 *
 682 */
 683int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 684                           uint32_t handle, uint32_t flags,
 685                           int *prime_fd)
 686{
 687        struct ttm_object_device *tdev = tfile->tdev;
 688        struct ttm_base_object *base;
 689        struct dma_buf *dma_buf;
 690        struct ttm_prime_object *prime;
 691        int ret;
 692
 693        base = ttm_base_object_lookup(tfile, handle);
 694        if (unlikely(base == NULL ||
 695                     base->object_type != ttm_prime_type)) {
 696                ret = -ENOENT;
 697                goto out_unref;
 698        }
 699
 700        prime = container_of(base, struct ttm_prime_object, base);
 701        if (unlikely(!base->shareable)) {
 702                ret = -EPERM;
 703                goto out_unref;
 704        }
 705
 706        ret = mutex_lock_interruptible(&prime->mutex);
 707        if (unlikely(ret != 0)) {
 708                ret = -ERESTARTSYS;
 709                goto out_unref;
 710        }
 711
 712        dma_buf = prime->dma_buf;
 713        if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
 714                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 715                struct ttm_operation_ctx ctx = {
 716                        .interruptible = true,
 717                        .no_wait_gpu = false
 718                };
 719                exp_info.ops = &tdev->ops;
 720                exp_info.size = prime->size;
 721                exp_info.flags = flags;
 722                exp_info.priv = prime;
 723
 724                /*
 725                 * Need to create a new dma_buf, with memory accounting.
 726                 */
 727                ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
 728                                           &ctx);
 729                if (unlikely(ret != 0)) {
 730                        mutex_unlock(&prime->mutex);
 731                        goto out_unref;
 732                }
 733
 734                dma_buf = dma_buf_export(&exp_info);
 735                if (IS_ERR(dma_buf)) {
 736                        ret = PTR_ERR(dma_buf);
 737                        ttm_mem_global_free(tdev->mem_glob,
 738                                            tdev->dma_buf_size);
 739                        mutex_unlock(&prime->mutex);
 740                        goto out_unref;
 741                }
 742
 743                /*
 744                 * dma_buf has taken the base object reference
 745                 */
 746                base = NULL;
 747                prime->dma_buf = dma_buf;
 748        }
 749        mutex_unlock(&prime->mutex);
 750
 751        ret = dma_buf_fd(dma_buf, flags);
 752        if (ret >= 0) {
 753                *prime_fd = ret;
 754                ret = 0;
 755        } else
 756                dma_buf_put(dma_buf);
 757
 758out_unref:
 759        if (base)
 760                ttm_base_object_unref(&base);
 761        return ret;
 762}
 763
 764/**
 765 * ttm_prime_object_init - Initialize a ttm_prime_object
 766 *
 767 * @tfile: struct ttm_object_file identifying the caller
 768 * @size: The size of the dma_bufs we export.
 769 * @prime: The object to be initialized.
 770 * @shareable: See ttm_base_object_init
 771 * @type: See ttm_base_object_init
 772 * @refcount_release: See ttm_base_object_init
 773 * @ref_obj_release: See ttm_base_object_init
 774 *
 775 * Initializes an object which is compatible with the drm_prime model
 776 * for data sharing between processes and devices.
 777 */
 778int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
 779                          struct ttm_prime_object *prime, bool shareable,
 780                          enum ttm_object_type type,
 781                          void (*refcount_release) (struct ttm_base_object **),
 782                          void (*ref_obj_release) (struct ttm_base_object *,
 783                                                   enum ttm_ref_type ref_type))
 784{
 785        mutex_init(&prime->mutex);
 786        prime->size = PAGE_ALIGN(size);
 787        prime->real_type = type;
 788        prime->dma_buf = NULL;
 789        prime->refcount_release = refcount_release;
 790        return ttm_base_object_init(tfile, &prime->base, shareable,
 791                                    ttm_prime_type,
 792                                    ttm_prime_refcount_release,
 793                                    ref_obj_release);
 794}
 795