linux/drivers/gpu/drm/ttm/ttm_object.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30/** @file ttm_ref_object.c
  31 *
  32 * Base- and reference object implementation for the various
  33 * ttm objects. Implements reference counting, minimal security checks
  34 * and release on file close.
  35 */
  36
  37/**
  38 * struct ttm_object_file
  39 *
  40 * @tdev: Pointer to the ttm_object_device.
  41 *
  42 * @lock: Lock that protects the ref_list list and the
  43 * ref_hash hash tables.
  44 *
  45 * @ref_list: List of ttm_ref_objects to be destroyed at
  46 * file release.
  47 *
  48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
  49 * for fast lookup of ref objects given a base object.
  50 */
  51
  52#define pr_fmt(fmt) "[TTM] " fmt
  53
  54#include <drm/ttm/ttm_object.h>
  55#include <drm/ttm/ttm_module.h>
  56#include <linux/list.h>
  57#include <linux/spinlock.h>
  58#include <linux/slab.h>
  59#include <linux/module.h>
  60#include <linux/atomic.h>
  61
  62struct ttm_object_file {
  63        struct ttm_object_device *tdev;
  64        rwlock_t lock;
  65        struct list_head ref_list;
  66        struct drm_open_hash ref_hash[TTM_REF_NUM];
  67        struct kref refcount;
  68};
  69
  70/**
  71 * struct ttm_object_device
  72 *
  73 * @object_lock: lock that protects the object_hash hash table.
  74 *
  75 * @object_hash: hash table for fast lookup of object global names.
  76 *
  77 * @object_count: Per device object count.
  78 *
  79 * This is the per-device data structure needed for ttm object management.
  80 */
  81
  82struct ttm_object_device {
  83        spinlock_t object_lock;
  84        struct drm_open_hash object_hash;
  85        atomic_t object_count;
  86        struct ttm_mem_global *mem_glob;
  87};
  88
  89/**
  90 * struct ttm_ref_object
  91 *
  92 * @hash: Hash entry for the per-file object reference hash.
  93 *
  94 * @head: List entry for the per-file list of ref-objects.
  95 *
  96 * @kref: Ref count.
  97 *
  98 * @obj: Base object this ref object is referencing.
  99 *
 100 * @ref_type: Type of ref object.
 101 *
 102 * This is similar to an idr object, but it also has a hash table entry
 103 * that allows lookup with a pointer to the referenced object as a key. In
 104 * that way, one can easily detect whether a base object is referenced by
 105 * a particular ttm_object_file. It also carries a ref count to avoid creating
 106 * multiple ref objects if a ttm_object_file references the same base
 107 * object more than once.
 108 */
 109
 110struct ttm_ref_object {
 111        struct drm_hash_item hash;
 112        struct list_head head;
 113        struct kref kref;
 114        enum ttm_ref_type ref_type;
 115        struct ttm_base_object *obj;
 116        struct ttm_object_file *tfile;
 117};
 118
 119static inline struct ttm_object_file *
 120ttm_object_file_ref(struct ttm_object_file *tfile)
 121{
 122        kref_get(&tfile->refcount);
 123        return tfile;
 124}
 125
 126static void ttm_object_file_destroy(struct kref *kref)
 127{
 128        struct ttm_object_file *tfile =
 129                container_of(kref, struct ttm_object_file, refcount);
 130
 131        kfree(tfile);
 132}
 133
 134
 135static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
 136{
 137        struct ttm_object_file *tfile = *p_tfile;
 138
 139        *p_tfile = NULL;
 140        kref_put(&tfile->refcount, ttm_object_file_destroy);
 141}
 142
 143
 144int ttm_base_object_init(struct ttm_object_file *tfile,
 145                         struct ttm_base_object *base,
 146                         bool shareable,
 147                         enum ttm_object_type object_type,
 148                         void (*refcount_release) (struct ttm_base_object **),
 149                         void (*ref_obj_release) (struct ttm_base_object *,
 150                                                  enum ttm_ref_type ref_type))
 151{
 152        struct ttm_object_device *tdev = tfile->tdev;
 153        int ret;
 154
 155        base->shareable = shareable;
 156        base->tfile = ttm_object_file_ref(tfile);
 157        base->refcount_release = refcount_release;
 158        base->ref_obj_release = ref_obj_release;
 159        base->object_type = object_type;
 160        kref_init(&base->refcount);
 161        spin_lock(&tdev->object_lock);
 162        ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
 163                                            &base->hash,
 164                                            (unsigned long)base, 31, 0, 0);
 165        spin_unlock(&tdev->object_lock);
 166        if (unlikely(ret != 0))
 167                goto out_err0;
 168
 169        ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
 170        if (unlikely(ret != 0))
 171                goto out_err1;
 172
 173        ttm_base_object_unref(&base);
 174
 175        return 0;
 176out_err1:
 177        spin_lock(&tdev->object_lock);
 178        (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
 179        spin_unlock(&tdev->object_lock);
 180out_err0:
 181        return ret;
 182}
 183EXPORT_SYMBOL(ttm_base_object_init);
 184
 185static void ttm_release_base(struct kref *kref)
 186{
 187        struct ttm_base_object *base =
 188            container_of(kref, struct ttm_base_object, refcount);
 189        struct ttm_object_device *tdev = base->tfile->tdev;
 190
 191        spin_lock(&tdev->object_lock);
 192        (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
 193        spin_unlock(&tdev->object_lock);
 194
 195        /*
 196         * Note: We don't use synchronize_rcu() here because it's far
 197         * too slow. It's up to the user to free the object using
 198         * call_rcu() or ttm_base_object_kfree().
 199         */
 200
 201        if (base->refcount_release) {
 202                ttm_object_file_unref(&base->tfile);
 203                base->refcount_release(&base);
 204        }
 205}
 206
 207void ttm_base_object_unref(struct ttm_base_object **p_base)
 208{
 209        struct ttm_base_object *base = *p_base;
 210
 211        *p_base = NULL;
 212
 213        kref_put(&base->refcount, ttm_release_base);
 214}
 215EXPORT_SYMBOL(ttm_base_object_unref);
 216
 217struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 218                                               uint32_t key)
 219{
 220        struct ttm_object_device *tdev = tfile->tdev;
 221        struct ttm_base_object *base;
 222        struct drm_hash_item *hash;
 223        int ret;
 224
 225        rcu_read_lock();
 226        ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
 227
 228        if (likely(ret == 0)) {
 229                base = drm_hash_entry(hash, struct ttm_base_object, hash);
 230                ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
 231        }
 232        rcu_read_unlock();
 233
 234        if (unlikely(ret != 0))
 235                return NULL;
 236
 237        if (tfile != base->tfile && !base->shareable) {
 238                pr_err("Attempted access of non-shareable object\n");
 239                ttm_base_object_unref(&base);
 240                return NULL;
 241        }
 242
 243        return base;
 244}
 245EXPORT_SYMBOL(ttm_base_object_lookup);
 246
 247int ttm_ref_object_add(struct ttm_object_file *tfile,
 248                       struct ttm_base_object *base,
 249                       enum ttm_ref_type ref_type, bool *existed)
 250{
 251        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
 252        struct ttm_ref_object *ref;
 253        struct drm_hash_item *hash;
 254        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 255        int ret = -EINVAL;
 256
 257        if (existed != NULL)
 258                *existed = true;
 259
 260        while (ret == -EINVAL) {
 261                read_lock(&tfile->lock);
 262                ret = drm_ht_find_item(ht, base->hash.key, &hash);
 263
 264                if (ret == 0) {
 265                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 266                        kref_get(&ref->kref);
 267                        read_unlock(&tfile->lock);
 268                        break;
 269                }
 270
 271                read_unlock(&tfile->lock);
 272                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
 273                                           false, false);
 274                if (unlikely(ret != 0))
 275                        return ret;
 276                ref = kmalloc(sizeof(*ref), GFP_KERNEL);
 277                if (unlikely(ref == NULL)) {
 278                        ttm_mem_global_free(mem_glob, sizeof(*ref));
 279                        return -ENOMEM;
 280                }
 281
 282                ref->hash.key = base->hash.key;
 283                ref->obj = base;
 284                ref->tfile = tfile;
 285                ref->ref_type = ref_type;
 286                kref_init(&ref->kref);
 287
 288                write_lock(&tfile->lock);
 289                ret = drm_ht_insert_item(ht, &ref->hash);
 290
 291                if (likely(ret == 0)) {
 292                        list_add_tail(&ref->head, &tfile->ref_list);
 293                        kref_get(&base->refcount);
 294                        write_unlock(&tfile->lock);
 295                        if (existed != NULL)
 296                                *existed = false;
 297                        break;
 298                }
 299
 300                write_unlock(&tfile->lock);
 301                BUG_ON(ret != -EINVAL);
 302
 303                ttm_mem_global_free(mem_glob, sizeof(*ref));
 304                kfree(ref);
 305        }
 306
 307        return ret;
 308}
 309EXPORT_SYMBOL(ttm_ref_object_add);
 310
 311static void ttm_ref_object_release(struct kref *kref)
 312{
 313        struct ttm_ref_object *ref =
 314            container_of(kref, struct ttm_ref_object, kref);
 315        struct ttm_base_object *base = ref->obj;
 316        struct ttm_object_file *tfile = ref->tfile;
 317        struct drm_open_hash *ht;
 318        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 319
 320        ht = &tfile->ref_hash[ref->ref_type];
 321        (void)drm_ht_remove_item(ht, &ref->hash);
 322        list_del(&ref->head);
 323        write_unlock(&tfile->lock);
 324
 325        if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
 326                base->ref_obj_release(base, ref->ref_type);
 327
 328        ttm_base_object_unref(&ref->obj);
 329        ttm_mem_global_free(mem_glob, sizeof(*ref));
 330        kfree(ref);
 331        write_lock(&tfile->lock);
 332}
 333
 334int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
 335                              unsigned long key, enum ttm_ref_type ref_type)
 336{
 337        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
 338        struct ttm_ref_object *ref;
 339        struct drm_hash_item *hash;
 340        int ret;
 341
 342        write_lock(&tfile->lock);
 343        ret = drm_ht_find_item(ht, key, &hash);
 344        if (unlikely(ret != 0)) {
 345                write_unlock(&tfile->lock);
 346                return -EINVAL;
 347        }
 348        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 349        kref_put(&ref->kref, ttm_ref_object_release);
 350        write_unlock(&tfile->lock);
 351        return 0;
 352}
 353EXPORT_SYMBOL(ttm_ref_object_base_unref);
 354
 355void ttm_object_file_release(struct ttm_object_file **p_tfile)
 356{
 357        struct ttm_ref_object *ref;
 358        struct list_head *list;
 359        unsigned int i;
 360        struct ttm_object_file *tfile = *p_tfile;
 361
 362        *p_tfile = NULL;
 363        write_lock(&tfile->lock);
 364
 365        /*
 366         * Since we release the lock within the loop, we have to
 367         * restart it from the beginning each time.
 368         */
 369
 370        while (!list_empty(&tfile->ref_list)) {
 371                list = tfile->ref_list.next;
 372                ref = list_entry(list, struct ttm_ref_object, head);
 373                ttm_ref_object_release(&ref->kref);
 374        }
 375
 376        for (i = 0; i < TTM_REF_NUM; ++i)
 377                drm_ht_remove(&tfile->ref_hash[i]);
 378
 379        write_unlock(&tfile->lock);
 380        ttm_object_file_unref(&tfile);
 381}
 382EXPORT_SYMBOL(ttm_object_file_release);
 383
 384struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
 385                                             unsigned int hash_order)
 386{
 387        struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
 388        unsigned int i;
 389        unsigned int j = 0;
 390        int ret;
 391
 392        if (unlikely(tfile == NULL))
 393                return NULL;
 394
 395        rwlock_init(&tfile->lock);
 396        tfile->tdev = tdev;
 397        kref_init(&tfile->refcount);
 398        INIT_LIST_HEAD(&tfile->ref_list);
 399
 400        for (i = 0; i < TTM_REF_NUM; ++i) {
 401                ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
 402                if (ret) {
 403                        j = i;
 404                        goto out_err;
 405                }
 406        }
 407
 408        return tfile;
 409out_err:
 410        for (i = 0; i < j; ++i)
 411                drm_ht_remove(&tfile->ref_hash[i]);
 412
 413        kfree(tfile);
 414
 415        return NULL;
 416}
 417EXPORT_SYMBOL(ttm_object_file_init);
 418
 419struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
 420                                                 *mem_glob,
 421                                                 unsigned int hash_order)
 422{
 423        struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
 424        int ret;
 425
 426        if (unlikely(tdev == NULL))
 427                return NULL;
 428
 429        tdev->mem_glob = mem_glob;
 430        spin_lock_init(&tdev->object_lock);
 431        atomic_set(&tdev->object_count, 0);
 432        ret = drm_ht_create(&tdev->object_hash, hash_order);
 433
 434        if (likely(ret == 0))
 435                return tdev;
 436
 437        kfree(tdev);
 438        return NULL;
 439}
 440EXPORT_SYMBOL(ttm_object_device_init);
 441
 442void ttm_object_device_release(struct ttm_object_device **p_tdev)
 443{
 444        struct ttm_object_device *tdev = *p_tdev;
 445
 446        *p_tdev = NULL;
 447
 448        spin_lock(&tdev->object_lock);
 449        drm_ht_remove(&tdev->object_hash);
 450        spin_unlock(&tdev->object_lock);
 451
 452        kfree(tdev);
 453}
 454EXPORT_SYMBOL(ttm_object_device_release);
 455