linux/drivers/gpu/drm/ttm/ttm_object.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 *
  30 * While no substantial code is shared, the prime code is inspired by
  31 * drm_prime.c, with
  32 * Authors:
  33 *      Dave Airlie <airlied@redhat.com>
  34 *      Rob Clark <rob.clark@linaro.org>
  35 */
  36/** @file ttm_ref_object.c
  37 *
  38 * Base- and reference object implementation for the various
  39 * ttm objects. Implements reference counting, minimal security checks
  40 * and release on file close.
  41 */
  42
  43
  44/**
  45 * struct ttm_object_file
  46 *
  47 * @tdev: Pointer to the ttm_object_device.
  48 *
  49 * @lock: Lock that protects the ref_list list and the
  50 * ref_hash hash tables.
  51 *
  52 * @ref_list: List of ttm_ref_objects to be destroyed at
  53 * file release.
  54 *
  55 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
  56 * for fast lookup of ref objects given a base object.
  57 */
  58
  59#define pr_fmt(fmt) "[TTM] " fmt
  60
  61#include <drm/ttm/ttm_object.h>
  62#include <drm/ttm/ttm_module.h>
  63#include <linux/list.h>
  64#include <linux/spinlock.h>
  65#include <linux/slab.h>
  66#include <linux/module.h>
  67#include <linux/atomic.h>
  68
  69struct ttm_object_file {
  70        struct ttm_object_device *tdev;
  71        spinlock_t lock;
  72        struct list_head ref_list;
  73        struct drm_open_hash ref_hash[TTM_REF_NUM];
  74        struct kref refcount;
  75};
  76
  77/**
  78 * struct ttm_object_device
  79 *
  80 * @object_lock: lock that protects the object_hash hash table.
  81 *
  82 * @object_hash: hash table for fast lookup of object global names.
  83 *
  84 * @object_count: Per device object count.
  85 *
  86 * This is the per-device data structure needed for ttm object management.
  87 */
  88
  89struct ttm_object_device {
  90        spinlock_t object_lock;
  91        struct drm_open_hash object_hash;
  92        atomic_t object_count;
  93        struct ttm_mem_global *mem_glob;
  94        struct dma_buf_ops ops;
  95        void (*dmabuf_release)(struct dma_buf *dma_buf);
  96        size_t dma_buf_size;
  97};
  98
  99/**
 100 * struct ttm_ref_object
 101 *
 102 * @hash: Hash entry for the per-file object reference hash.
 103 *
 104 * @head: List entry for the per-file list of ref-objects.
 105 *
 106 * @kref: Ref count.
 107 *
 108 * @obj: Base object this ref object is referencing.
 109 *
 110 * @ref_type: Type of ref object.
 111 *
 112 * This is similar to an idr object, but it also has a hash table entry
 113 * that allows lookup with a pointer to the referenced object as a key. In
 114 * that way, one can easily detect whether a base object is referenced by
 115 * a particular ttm_object_file. It also carries a ref count to avoid creating
 116 * multiple ref objects if a ttm_object_file references the same base
 117 * object more than once.
 118 */
 119
 120struct ttm_ref_object {
 121        struct rcu_head rcu_head;
 122        struct drm_hash_item hash;
 123        struct list_head head;
 124        struct kref kref;
 125        enum ttm_ref_type ref_type;
 126        struct ttm_base_object *obj;
 127        struct ttm_object_file *tfile;
 128};
 129
 130static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
 131
 132static inline struct ttm_object_file *
 133ttm_object_file_ref(struct ttm_object_file *tfile)
 134{
 135        kref_get(&tfile->refcount);
 136        return tfile;
 137}
 138
 139static void ttm_object_file_destroy(struct kref *kref)
 140{
 141        struct ttm_object_file *tfile =
 142                container_of(kref, struct ttm_object_file, refcount);
 143
 144        kfree(tfile);
 145}
 146
 147
 148static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
 149{
 150        struct ttm_object_file *tfile = *p_tfile;
 151
 152        *p_tfile = NULL;
 153        kref_put(&tfile->refcount, ttm_object_file_destroy);
 154}
 155
 156
 157int ttm_base_object_init(struct ttm_object_file *tfile,
 158                         struct ttm_base_object *base,
 159                         bool shareable,
 160                         enum ttm_object_type object_type,
 161                         void (*refcount_release) (struct ttm_base_object **),
 162                         void (*ref_obj_release) (struct ttm_base_object *,
 163                                                  enum ttm_ref_type ref_type))
 164{
 165        struct ttm_object_device *tdev = tfile->tdev;
 166        int ret;
 167
 168        base->shareable = shareable;
 169        base->tfile = ttm_object_file_ref(tfile);
 170        base->refcount_release = refcount_release;
 171        base->ref_obj_release = ref_obj_release;
 172        base->object_type = object_type;
 173        kref_init(&base->refcount);
 174        spin_lock(&tdev->object_lock);
 175        ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
 176                                            &base->hash,
 177                                            (unsigned long)base, 31, 0, 0);
 178        spin_unlock(&tdev->object_lock);
 179        if (unlikely(ret != 0))
 180                goto out_err0;
 181
 182        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
 183        if (unlikely(ret != 0))
 184                goto out_err1;
 185
 186        ttm_base_object_unref(&base);
 187
 188        return 0;
 189out_err1:
 190        spin_lock(&tdev->object_lock);
 191        (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
 192        spin_unlock(&tdev->object_lock);
 193out_err0:
 194        return ret;
 195}
 196EXPORT_SYMBOL(ttm_base_object_init);
 197
 198static void ttm_release_base(struct kref *kref)
 199{
 200        struct ttm_base_object *base =
 201            container_of(kref, struct ttm_base_object, refcount);
 202        struct ttm_object_device *tdev = base->tfile->tdev;
 203
 204        spin_lock(&tdev->object_lock);
 205        (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
 206        spin_unlock(&tdev->object_lock);
 207
 208        /*
 209         * Note: We don't use synchronize_rcu() here because it's far
 210         * too slow. It's up to the user to free the object using
 211         * call_rcu() or ttm_base_object_kfree().
 212         */
 213
 214        ttm_object_file_unref(&base->tfile);
 215        if (base->refcount_release)
 216                base->refcount_release(&base);
 217}
 218
 219void ttm_base_object_unref(struct ttm_base_object **p_base)
 220{
 221        struct ttm_base_object *base = *p_base;
 222
 223        *p_base = NULL;
 224
 225        kref_put(&base->refcount, ttm_release_base);
 226}
 227EXPORT_SYMBOL(ttm_base_object_unref);
 228
 229struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 230                                               uint32_t key)
 231{
 232        struct ttm_base_object *base = NULL;
 233        struct drm_hash_item *hash;
 234        struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 235        int ret;
 236
 237        rcu_read_lock();
 238        ret = drm_ht_find_item_rcu(ht, key, &hash);
 239
 240        if (likely(ret == 0)) {
 241                base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
 242                if (!kref_get_unless_zero(&base->refcount))
 243                        base = NULL;
 244        }
 245        rcu_read_unlock();
 246
 247        return base;
 248}
 249EXPORT_SYMBOL(ttm_base_object_lookup);
 250
 251struct ttm_base_object *
 252ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
 253{
 254        struct ttm_base_object *base = NULL;
 255        struct drm_hash_item *hash;
 256        struct drm_open_hash *ht = &tdev->object_hash;
 257        int ret;
 258
 259        rcu_read_lock();
 260        ret = drm_ht_find_item_rcu(ht, key, &hash);
 261
 262        if (likely(ret == 0)) {
 263                base = drm_hash_entry(hash, struct ttm_base_object, hash);
 264                if (!kref_get_unless_zero(&base->refcount))
 265                        base = NULL;
 266        }
 267        rcu_read_unlock();
 268
 269        return base;
 270}
 271EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
 272
 273/**
 274 * ttm_ref_object_exists - Check whether a caller has a valid ref object
 275 * (has opened) a base object.
 276 *
 277 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
 278 * @base: Pointer to a struct base object.
 279 *
 280 * Checks wether the caller identified by @tfile has put a valid USAGE
 281 * reference object on the base object identified by @base.
 282 */
 283bool ttm_ref_object_exists(struct ttm_object_file *tfile,
 284                           struct ttm_base_object *base)
 285{
 286        struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 287        struct drm_hash_item *hash;
 288        struct ttm_ref_object *ref;
 289
 290        rcu_read_lock();
 291        if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
 292                goto out_false;
 293
 294        /*
 295         * Verify that the ref object is really pointing to our base object.
 296         * Our base object could actually be dead, and the ref object pointing
 297         * to another base object with the same handle.
 298         */
 299        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 300        if (unlikely(base != ref->obj))
 301                goto out_false;
 302
 303        /*
 304         * Verify that the ref->obj pointer was actually valid!
 305         */
 306        rmb();
 307        if (unlikely(atomic_read(&ref->kref.refcount) == 0))
 308                goto out_false;
 309
 310        rcu_read_unlock();
 311        return true;
 312
 313 out_false:
 314        rcu_read_unlock();
 315        return false;
 316}
 317EXPORT_SYMBOL(ttm_ref_object_exists);
 318
 319int ttm_ref_object_add(struct ttm_object_file *tfile,
 320                       struct ttm_base_object *base,
 321                       enum ttm_ref_type ref_type, bool *existed)
 322{
 323        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
 324        struct ttm_ref_object *ref;
 325        struct drm_hash_item *hash;
 326        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 327        int ret = -EINVAL;
 328
 329        if (base->tfile != tfile && !base->shareable)
 330                return -EPERM;
 331
 332        if (existed != NULL)
 333                *existed = true;
 334
 335        while (ret == -EINVAL) {
 336                rcu_read_lock();
 337                ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 338
 339                if (ret == 0) {
 340                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 341                        if (kref_get_unless_zero(&ref->kref)) {
 342                                rcu_read_unlock();
 343                                break;
 344                        }
 345                }
 346
 347                rcu_read_unlock();
 348                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
 349                                           false, false);
 350                if (unlikely(ret != 0))
 351                        return ret;
 352                ref = kmalloc(sizeof(*ref), GFP_KERNEL);
 353                if (unlikely(ref == NULL)) {
 354                        ttm_mem_global_free(mem_glob, sizeof(*ref));
 355                        return -ENOMEM;
 356                }
 357
 358                ref->hash.key = base->hash.key;
 359                ref->obj = base;
 360                ref->tfile = tfile;
 361                ref->ref_type = ref_type;
 362                kref_init(&ref->kref);
 363
 364                spin_lock(&tfile->lock);
 365                ret = drm_ht_insert_item_rcu(ht, &ref->hash);
 366
 367                if (likely(ret == 0)) {
 368                        list_add_tail(&ref->head, &tfile->ref_list);
 369                        kref_get(&base->refcount);
 370                        spin_unlock(&tfile->lock);
 371                        if (existed != NULL)
 372                                *existed = false;
 373                        break;
 374                }
 375
 376                spin_unlock(&tfile->lock);
 377                BUG_ON(ret != -EINVAL);
 378
 379                ttm_mem_global_free(mem_glob, sizeof(*ref));
 380                kfree(ref);
 381        }
 382
 383        return ret;
 384}
 385EXPORT_SYMBOL(ttm_ref_object_add);
 386
 387static void ttm_ref_object_release(struct kref *kref)
 388{
 389        struct ttm_ref_object *ref =
 390            container_of(kref, struct ttm_ref_object, kref);
 391        struct ttm_base_object *base = ref->obj;
 392        struct ttm_object_file *tfile = ref->tfile;
 393        struct drm_open_hash *ht;
 394        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 395
 396        ht = &tfile->ref_hash[ref->ref_type];
 397        (void)drm_ht_remove_item_rcu(ht, &ref->hash);
 398        list_del(&ref->head);
 399        spin_unlock(&tfile->lock);
 400
 401        if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
 402                base->ref_obj_release(base, ref->ref_type);
 403
 404        ttm_base_object_unref(&ref->obj);
 405        ttm_mem_global_free(mem_glob, sizeof(*ref));
 406        kfree_rcu(ref, rcu_head);
 407        spin_lock(&tfile->lock);
 408}
 409
 410int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
 411                              unsigned long key, enum ttm_ref_type ref_type)
 412{
 413        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
 414        struct ttm_ref_object *ref;
 415        struct drm_hash_item *hash;
 416        int ret;
 417
 418        spin_lock(&tfile->lock);
 419        ret = drm_ht_find_item(ht, key, &hash);
 420        if (unlikely(ret != 0)) {
 421                spin_unlock(&tfile->lock);
 422                return -EINVAL;
 423        }
 424        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 425        kref_put(&ref->kref, ttm_ref_object_release);
 426        spin_unlock(&tfile->lock);
 427        return 0;
 428}
 429EXPORT_SYMBOL(ttm_ref_object_base_unref);
 430
 431void ttm_object_file_release(struct ttm_object_file **p_tfile)
 432{
 433        struct ttm_ref_object *ref;
 434        struct list_head *list;
 435        unsigned int i;
 436        struct ttm_object_file *tfile = *p_tfile;
 437
 438        *p_tfile = NULL;
 439        spin_lock(&tfile->lock);
 440
 441        /*
 442         * Since we release the lock within the loop, we have to
 443         * restart it from the beginning each time.
 444         */
 445
 446        while (!list_empty(&tfile->ref_list)) {
 447                list = tfile->ref_list.next;
 448                ref = list_entry(list, struct ttm_ref_object, head);
 449                ttm_ref_object_release(&ref->kref);
 450        }
 451
 452        for (i = 0; i < TTM_REF_NUM; ++i)
 453                drm_ht_remove(&tfile->ref_hash[i]);
 454
 455        spin_unlock(&tfile->lock);
 456        ttm_object_file_unref(&tfile);
 457}
 458EXPORT_SYMBOL(ttm_object_file_release);
 459
 460struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
 461                                             unsigned int hash_order)
 462{
 463        struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
 464        unsigned int i;
 465        unsigned int j = 0;
 466        int ret;
 467
 468        if (unlikely(tfile == NULL))
 469                return NULL;
 470
 471        spin_lock_init(&tfile->lock);
 472        tfile->tdev = tdev;
 473        kref_init(&tfile->refcount);
 474        INIT_LIST_HEAD(&tfile->ref_list);
 475
 476        for (i = 0; i < TTM_REF_NUM; ++i) {
 477                ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
 478                if (ret) {
 479                        j = i;
 480                        goto out_err;
 481                }
 482        }
 483
 484        return tfile;
 485out_err:
 486        for (i = 0; i < j; ++i)
 487                drm_ht_remove(&tfile->ref_hash[i]);
 488
 489        kfree(tfile);
 490
 491        return NULL;
 492}
 493EXPORT_SYMBOL(ttm_object_file_init);
 494
 495struct ttm_object_device *
 496ttm_object_device_init(struct ttm_mem_global *mem_glob,
 497                       unsigned int hash_order,
 498                       const struct dma_buf_ops *ops)
 499{
 500        struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
 501        int ret;
 502
 503        if (unlikely(tdev == NULL))
 504                return NULL;
 505
 506        tdev->mem_glob = mem_glob;
 507        spin_lock_init(&tdev->object_lock);
 508        atomic_set(&tdev->object_count, 0);
 509        ret = drm_ht_create(&tdev->object_hash, hash_order);
 510        if (ret != 0)
 511                goto out_no_object_hash;
 512
 513        tdev->ops = *ops;
 514        tdev->dmabuf_release = tdev->ops.release;
 515        tdev->ops.release = ttm_prime_dmabuf_release;
 516        tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
 517                ttm_round_pot(sizeof(struct file));
 518        return tdev;
 519
 520out_no_object_hash:
 521        kfree(tdev);
 522        return NULL;
 523}
 524EXPORT_SYMBOL(ttm_object_device_init);
 525
 526void ttm_object_device_release(struct ttm_object_device **p_tdev)
 527{
 528        struct ttm_object_device *tdev = *p_tdev;
 529
 530        *p_tdev = NULL;
 531
 532        spin_lock(&tdev->object_lock);
 533        drm_ht_remove(&tdev->object_hash);
 534        spin_unlock(&tdev->object_lock);
 535
 536        kfree(tdev);
 537}
 538EXPORT_SYMBOL(ttm_object_device_release);
 539
 540/**
 541 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
 542 *
 543 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
 544 *
 545 * Obtain a file reference from a lookup structure that doesn't refcount
 546 * the file, but synchronizes with its release method to make sure it has
 547 * not been freed yet. See for example kref_get_unless_zero documentation.
 548 * Returns true if refcounting succeeds, false otherwise.
 549 *
 550 * Nobody really wants this as a public API yet, so let it mature here
 551 * for some time...
 552 */
 553static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
 554{
 555        return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
 556}
 557
 558/**
 559 * ttm_prime_refcount_release - refcount release method for a prime object.
 560 *
 561 * @p_base: Pointer to ttm_base_object pointer.
 562 *
 563 * This is a wrapper that calls the refcount_release founction of the
 564 * underlying object. At the same time it cleans up the prime object.
 565 * This function is called when all references to the base object we
 566 * derive from are gone.
 567 */
 568static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
 569{
 570        struct ttm_base_object *base = *p_base;
 571        struct ttm_prime_object *prime;
 572
 573        *p_base = NULL;
 574        prime = container_of(base, struct ttm_prime_object, base);
 575        BUG_ON(prime->dma_buf != NULL);
 576        mutex_destroy(&prime->mutex);
 577        if (prime->refcount_release)
 578                prime->refcount_release(&base);
 579}
 580
 581/**
 582 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
 583 *
 584 * @dma_buf:
 585 *
 586 * This function first calls the dma_buf release method the driver
 587 * provides. Then it cleans up our dma_buf pointer used for lookup,
 588 * and finally releases the reference the dma_buf has on our base
 589 * object.
 590 */
 591static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
 592{
 593        struct ttm_prime_object *prime =
 594                (struct ttm_prime_object *) dma_buf->priv;
 595        struct ttm_base_object *base = &prime->base;
 596        struct ttm_object_device *tdev = base->tfile->tdev;
 597
 598        if (tdev->dmabuf_release)
 599                tdev->dmabuf_release(dma_buf);
 600        mutex_lock(&prime->mutex);
 601        if (prime->dma_buf == dma_buf)
 602                prime->dma_buf = NULL;
 603        mutex_unlock(&prime->mutex);
 604        ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
 605        ttm_base_object_unref(&base);
 606}
 607
 608/**
 609 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
 610 *
 611 * @tfile: A struct ttm_object_file identifying the caller.
 612 * @fd: The prime / dmabuf fd.
 613 * @handle: The returned handle.
 614 *
 615 * This function returns a handle to an object that previously exported
 616 * a dma-buf. Note that we don't handle imports yet, because we simply
 617 * have no consumers of that implementation.
 618 */
 619int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
 620                           int fd, u32 *handle)
 621{
 622        struct ttm_object_device *tdev = tfile->tdev;
 623        struct dma_buf *dma_buf;
 624        struct ttm_prime_object *prime;
 625        struct ttm_base_object *base;
 626        int ret;
 627
 628        dma_buf = dma_buf_get(fd);
 629        if (IS_ERR(dma_buf))
 630                return PTR_ERR(dma_buf);
 631
 632        if (dma_buf->ops != &tdev->ops)
 633                return -ENOSYS;
 634
 635        prime = (struct ttm_prime_object *) dma_buf->priv;
 636        base = &prime->base;
 637        *handle = base->hash.key;
 638        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
 639
 640        dma_buf_put(dma_buf);
 641
 642        return ret;
 643}
 644EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
 645
 646/**
 647 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
 648 *
 649 * @tfile: Struct ttm_object_file identifying the caller.
 650 * @handle: Handle to the object we're exporting from.
 651 * @flags: flags for dma-buf creation. We just pass them on.
 652 * @prime_fd: The returned file descriptor.
 653 *
 654 */
 655int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 656                           uint32_t handle, uint32_t flags,
 657                           int *prime_fd)
 658{
 659        struct ttm_object_device *tdev = tfile->tdev;
 660        struct ttm_base_object *base;
 661        struct dma_buf *dma_buf;
 662        struct ttm_prime_object *prime;
 663        int ret;
 664
 665        base = ttm_base_object_lookup(tfile, handle);
 666        if (unlikely(base == NULL ||
 667                     base->object_type != ttm_prime_type)) {
 668                ret = -ENOENT;
 669                goto out_unref;
 670        }
 671
 672        prime = container_of(base, struct ttm_prime_object, base);
 673        if (unlikely(!base->shareable)) {
 674                ret = -EPERM;
 675                goto out_unref;
 676        }
 677
 678        ret = mutex_lock_interruptible(&prime->mutex);
 679        if (unlikely(ret != 0)) {
 680                ret = -ERESTARTSYS;
 681                goto out_unref;
 682        }
 683
 684        dma_buf = prime->dma_buf;
 685        if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
 686                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 687
 688                exp_info.ops = &tdev->ops;
 689                exp_info.size = prime->size;
 690                exp_info.flags = flags;
 691                exp_info.priv = prime;
 692
 693                /*
 694                 * Need to create a new dma_buf, with memory accounting.
 695                 */
 696                ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
 697                                           false, true);
 698                if (unlikely(ret != 0)) {
 699                        mutex_unlock(&prime->mutex);
 700                        goto out_unref;
 701                }
 702
 703                dma_buf = dma_buf_export(&exp_info);
 704                if (IS_ERR(dma_buf)) {
 705                        ret = PTR_ERR(dma_buf);
 706                        ttm_mem_global_free(tdev->mem_glob,
 707                                            tdev->dma_buf_size);
 708                        mutex_unlock(&prime->mutex);
 709                        goto out_unref;
 710                }
 711
 712                /*
 713                 * dma_buf has taken the base object reference
 714                 */
 715                base = NULL;
 716                prime->dma_buf = dma_buf;
 717        }
 718        mutex_unlock(&prime->mutex);
 719
 720        ret = dma_buf_fd(dma_buf, flags);
 721        if (ret >= 0) {
 722                *prime_fd = ret;
 723                ret = 0;
 724        } else
 725                dma_buf_put(dma_buf);
 726
 727out_unref:
 728        if (base)
 729                ttm_base_object_unref(&base);
 730        return ret;
 731}
 732EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
 733
 734/**
 735 * ttm_prime_object_init - Initialize a ttm_prime_object
 736 *
 737 * @tfile: struct ttm_object_file identifying the caller
 738 * @size: The size of the dma_bufs we export.
 739 * @prime: The object to be initialized.
 740 * @shareable: See ttm_base_object_init
 741 * @type: See ttm_base_object_init
 742 * @refcount_release: See ttm_base_object_init
 743 * @ref_obj_release: See ttm_base_object_init
 744 *
 745 * Initializes an object which is compatible with the drm_prime model
 746 * for data sharing between processes and devices.
 747 */
 748int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
 749                          struct ttm_prime_object *prime, bool shareable,
 750                          enum ttm_object_type type,
 751                          void (*refcount_release) (struct ttm_base_object **),
 752                          void (*ref_obj_release) (struct ttm_base_object *,
 753                                                   enum ttm_ref_type ref_type))
 754{
 755        mutex_init(&prime->mutex);
 756        prime->size = PAGE_ALIGN(size);
 757        prime->real_type = type;
 758        prime->dma_buf = NULL;
 759        prime->refcount_release = refcount_release;
 760        return ttm_base_object_init(tfile, &prime->base, shareable,
 761                                    ttm_prime_type,
 762                                    ttm_prime_refcount_release,
 763                                    ref_obj_release);
 764}
 765EXPORT_SYMBOL(ttm_prime_object_init);
 766