linux/drivers/gpu/drm/drm_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *
  26 */
  27
  28#include <linux/types.h>
  29#include <linux/slab.h>
  30#include <linux/mm.h>
  31#include <linux/uaccess.h>
  32#include <linux/fs.h>
  33#include <linux/file.h>
  34#include <linux/module.h>
  35#include <linux/mman.h>
  36#include <linux/pagemap.h>
  37#include <linux/shmem_fs.h>
  38#include <linux/dma-buf.h>
  39#include <drm/drmP.h>
  40
  41/** @file drm_gem.c
  42 *
  43 * This file provides some of the base ioctls and library routines for
  44 * the graphics memory manager implemented by each device driver.
  45 *
  46 * Because various devices have different requirements in terms of
  47 * synchronization and migration strategies, implementing that is left up to
  48 * the driver, and all that the general API provides should be generic --
  49 * allocating objects, reading/writing data with the cpu, freeing objects.
  50 * Even there, platform-dependent optimizations for reading/writing data with
  51 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  52 * the DRI2 implementation wants to have at least allocate/mmap be generic.
  53 *
  54 * The goal was to have swap-backed object allocation managed through
  55 * struct file.  However, file descriptors as handles to a struct file have
  56 * two major failings:
  57 * - Process limits prevent more than 1024 or so being used at a time by
  58 *   default.
  59 * - Inability to allocate high fds will aggravate the X Server's select()
  60 *   handling, and likely that of many GL client applications as well.
  61 *
  62 * This led to a plan of using our own integer IDs (called handles, following
  63 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  64 * ioctls.  The objects themselves will still include the struct file so
  65 * that we can transition to fds if the required kernel infrastructure shows
  66 * up at a later date, and as our interface with shmfs for memory allocation.
  67 */
  68
  69/*
  70 * We make up offsets for buffer objects so we can recognize them at
  71 * mmap time.
  72 */
  73
  74/* pgoff in mmap is an unsigned long, so we need to make sure that
  75 * the faked up offset will fit
  76 */
  77
  78#if BITS_PER_LONG == 64
  79#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  80#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  81#else
  82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  84#endif
  85
  86/**
  87 * Initialize the GEM device fields
  88 */
  89
  90int
  91drm_gem_init(struct drm_device *dev)
  92{
  93        struct drm_gem_mm *mm;
  94
  95        spin_lock_init(&dev->object_name_lock);
  96        idr_init(&dev->object_name_idr);
  97
  98        mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
  99        if (!mm) {
 100                DRM_ERROR("out of memory\n");
 101                return -ENOMEM;
 102        }
 103
 104        dev->mm_private = mm;
 105
 106        if (drm_ht_create(&mm->offset_hash, 12)) {
 107                kfree(mm);
 108                return -ENOMEM;
 109        }
 110
 111        drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
 112                    DRM_FILE_PAGE_OFFSET_SIZE);
 113
 114        return 0;
 115}
 116
 117void
 118drm_gem_destroy(struct drm_device *dev)
 119{
 120        struct drm_gem_mm *mm = dev->mm_private;
 121
 122        drm_mm_takedown(&mm->offset_manager);
 123        drm_ht_remove(&mm->offset_hash);
 124        kfree(mm);
 125        dev->mm_private = NULL;
 126}
 127
 128/**
 129 * Initialize an already allocated GEM object of the specified size with
 130 * shmfs backing store.
 131 */
 132int drm_gem_object_init(struct drm_device *dev,
 133                        struct drm_gem_object *obj, size_t size)
 134{
 135        BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 136
 137        obj->dev = dev;
 138        obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
 139        if (IS_ERR(obj->filp))
 140                return PTR_ERR(obj->filp);
 141
 142        kref_init(&obj->refcount);
 143        atomic_set(&obj->handle_count, 0);
 144        obj->size = size;
 145
 146        return 0;
 147}
 148EXPORT_SYMBOL(drm_gem_object_init);
 149
 150/**
 151 * Initialize an already allocated GEM object of the specified size with
 152 * no GEM provided backing store. Instead the caller is responsible for
 153 * backing the object and handling it.
 154 */
 155int drm_gem_private_object_init(struct drm_device *dev,
 156                        struct drm_gem_object *obj, size_t size)
 157{
 158        BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 159
 160        obj->dev = dev;
 161        obj->filp = NULL;
 162
 163        kref_init(&obj->refcount);
 164        atomic_set(&obj->handle_count, 0);
 165        obj->size = size;
 166
 167        return 0;
 168}
 169EXPORT_SYMBOL(drm_gem_private_object_init);
 170
 171/**
 172 * Allocate a GEM object of the specified size with shmfs backing store
 173 */
 174struct drm_gem_object *
 175drm_gem_object_alloc(struct drm_device *dev, size_t size)
 176{
 177        struct drm_gem_object *obj;
 178
 179        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 180        if (!obj)
 181                goto free;
 182
 183        if (drm_gem_object_init(dev, obj, size) != 0)
 184                goto free;
 185
 186        if (dev->driver->gem_init_object != NULL &&
 187            dev->driver->gem_init_object(obj) != 0) {
 188                goto fput;
 189        }
 190        return obj;
 191fput:
 192        /* Object_init mangles the global counters - readjust them. */
 193        fput(obj->filp);
 194free:
 195        kfree(obj);
 196        return NULL;
 197}
 198EXPORT_SYMBOL(drm_gem_object_alloc);
 199
 200static void
 201drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 202{
 203        if (obj->import_attach) {
 204                drm_prime_remove_buf_handle(&filp->prime,
 205                                obj->import_attach->dmabuf);
 206        }
 207        if (obj->export_dma_buf) {
 208                drm_prime_remove_buf_handle(&filp->prime,
 209                                obj->export_dma_buf);
 210        }
 211}
 212
 213/**
 214 * Removes the mapping from handle to filp for this object.
 215 */
 216int
 217drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 218{
 219        struct drm_device *dev;
 220        struct drm_gem_object *obj;
 221
 222        /* This is gross. The idr system doesn't let us try a delete and
 223         * return an error code.  It just spews if you fail at deleting.
 224         * So, we have to grab a lock around finding the object and then
 225         * doing the delete on it and dropping the refcount, or the user
 226         * could race us to double-decrement the refcount and cause a
 227         * use-after-free later.  Given the frequency of our handle lookups,
 228         * we may want to use ida for number allocation and a hash table
 229         * for the pointers, anyway.
 230         */
 231        spin_lock(&filp->table_lock);
 232
 233        /* Check if we currently have a reference on the object */
 234        obj = idr_find(&filp->object_idr, handle);
 235        if (obj == NULL) {
 236                spin_unlock(&filp->table_lock);
 237                return -EINVAL;
 238        }
 239        dev = obj->dev;
 240
 241        /* Release reference and decrement refcount. */
 242        idr_remove(&filp->object_idr, handle);
 243        spin_unlock(&filp->table_lock);
 244
 245        drm_gem_remove_prime_handles(obj, filp);
 246
 247        if (dev->driver->gem_close_object)
 248                dev->driver->gem_close_object(obj, filp);
 249        drm_gem_object_handle_unreference_unlocked(obj);
 250
 251        return 0;
 252}
 253EXPORT_SYMBOL(drm_gem_handle_delete);
 254
 255/**
 256 * Create a handle for this object. This adds a handle reference
 257 * to the object, which includes a regular reference count. Callers
 258 * will likely want to dereference the object afterwards.
 259 */
 260int
 261drm_gem_handle_create(struct drm_file *file_priv,
 262                       struct drm_gem_object *obj,
 263                       u32 *handlep)
 264{
 265        struct drm_device *dev = obj->dev;
 266        int ret;
 267
 268        /*
 269         * Get the user-visible handle using idr.  Preload and perform
 270         * allocation under our spinlock.
 271         */
 272        idr_preload(GFP_KERNEL);
 273        spin_lock(&file_priv->table_lock);
 274
 275        ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
 276
 277        spin_unlock(&file_priv->table_lock);
 278        idr_preload_end();
 279        if (ret < 0)
 280                return ret;
 281        *handlep = ret;
 282
 283        drm_gem_object_handle_reference(obj);
 284
 285        if (dev->driver->gem_open_object) {
 286                ret = dev->driver->gem_open_object(obj, file_priv);
 287                if (ret) {
 288                        drm_gem_handle_delete(file_priv, *handlep);
 289                        return ret;
 290                }
 291        }
 292
 293        return 0;
 294}
 295EXPORT_SYMBOL(drm_gem_handle_create);
 296
 297
 298/**
 299 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
 300 * @obj: obj in question
 301 *
 302 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
 303 */
 304void
 305drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 306{
 307        struct drm_device *dev = obj->dev;
 308        struct drm_gem_mm *mm = dev->mm_private;
 309        struct drm_map_list *list = &obj->map_list;
 310
 311        drm_ht_remove_item(&mm->offset_hash, &list->hash);
 312        drm_mm_put_block(list->file_offset_node);
 313        kfree(list->map);
 314        list->map = NULL;
 315}
 316EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 317
 318/**
 319 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
 320 * @obj: obj in question
 321 *
 322 * GEM memory mapping works by handing back to userspace a fake mmap offset
 323 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 324 * up the object based on the offset and sets up the various memory mapping
 325 * structures.
 326 *
 327 * This routine allocates and attaches a fake offset for @obj.
 328 */
 329int
 330drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 331{
 332        struct drm_device *dev = obj->dev;
 333        struct drm_gem_mm *mm = dev->mm_private;
 334        struct drm_map_list *list;
 335        struct drm_local_map *map;
 336        int ret;
 337
 338        /* Set the object up for mmap'ing */
 339        list = &obj->map_list;
 340        list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
 341        if (!list->map)
 342                return -ENOMEM;
 343
 344        map = list->map;
 345        map->type = _DRM_GEM;
 346        map->size = obj->size;
 347        map->handle = obj;
 348
 349        /* Get a DRM GEM mmap offset allocated... */
 350        list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
 351                        obj->size / PAGE_SIZE, 0, false);
 352
 353        if (!list->file_offset_node) {
 354                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
 355                ret = -ENOSPC;
 356                goto out_free_list;
 357        }
 358
 359        list->file_offset_node = drm_mm_get_block(list->file_offset_node,
 360                        obj->size / PAGE_SIZE, 0);
 361        if (!list->file_offset_node) {
 362                ret = -ENOMEM;
 363                goto out_free_list;
 364        }
 365
 366        list->hash.key = list->file_offset_node->start;
 367        ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
 368        if (ret) {
 369                DRM_ERROR("failed to add to map hash\n");
 370                goto out_free_mm;
 371        }
 372
 373        return 0;
 374
 375out_free_mm:
 376        drm_mm_put_block(list->file_offset_node);
 377out_free_list:
 378        kfree(list->map);
 379        list->map = NULL;
 380
 381        return ret;
 382}
 383EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 384
 385/** Returns a reference to the object named by the handle. */
 386struct drm_gem_object *
 387drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
 388                      u32 handle)
 389{
 390        struct drm_gem_object *obj;
 391
 392        spin_lock(&filp->table_lock);
 393
 394        /* Check if we currently have a reference on the object */
 395        obj = idr_find(&filp->object_idr, handle);
 396        if (obj == NULL) {
 397                spin_unlock(&filp->table_lock);
 398                return NULL;
 399        }
 400
 401        drm_gem_object_reference(obj);
 402
 403        spin_unlock(&filp->table_lock);
 404
 405        return obj;
 406}
 407EXPORT_SYMBOL(drm_gem_object_lookup);
 408
 409/**
 410 * Releases the handle to an mm object.
 411 */
 412int
 413drm_gem_close_ioctl(struct drm_device *dev, void *data,
 414                    struct drm_file *file_priv)
 415{
 416        struct drm_gem_close *args = data;
 417        int ret;
 418
 419        if (!(dev->driver->driver_features & DRIVER_GEM))
 420                return -ENODEV;
 421
 422        ret = drm_gem_handle_delete(file_priv, args->handle);
 423
 424        return ret;
 425}
 426
 427/**
 428 * Create a global name for an object, returning the name.
 429 *
 430 * Note that the name does not hold a reference; when the object
 431 * is freed, the name goes away.
 432 */
 433int
 434drm_gem_flink_ioctl(struct drm_device *dev, void *data,
 435                    struct drm_file *file_priv)
 436{
 437        struct drm_gem_flink *args = data;
 438        struct drm_gem_object *obj;
 439        int ret;
 440
 441        if (!(dev->driver->driver_features & DRIVER_GEM))
 442                return -ENODEV;
 443
 444        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 445        if (obj == NULL)
 446                return -ENOENT;
 447
 448        idr_preload(GFP_KERNEL);
 449        spin_lock(&dev->object_name_lock);
 450        if (!obj->name) {
 451                ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
 452                if (ret < 0)
 453                        goto err;
 454
 455                obj->name = ret;
 456
 457                /* Allocate a reference for the name table.  */
 458                drm_gem_object_reference(obj);
 459        }
 460
 461        args->name = (uint64_t) obj->name;
 462        ret = 0;
 463
 464err:
 465        spin_unlock(&dev->object_name_lock);
 466        idr_preload_end();
 467        drm_gem_object_unreference_unlocked(obj);
 468        return ret;
 469}
 470
 471/**
 472 * Open an object using the global name, returning a handle and the size.
 473 *
 474 * This handle (of course) holds a reference to the object, so the object
 475 * will not go away until the handle is deleted.
 476 */
 477int
 478drm_gem_open_ioctl(struct drm_device *dev, void *data,
 479                   struct drm_file *file_priv)
 480{
 481        struct drm_gem_open *args = data;
 482        struct drm_gem_object *obj;
 483        int ret;
 484        u32 handle;
 485
 486        if (!(dev->driver->driver_features & DRIVER_GEM))
 487                return -ENODEV;
 488
 489        spin_lock(&dev->object_name_lock);
 490        obj = idr_find(&dev->object_name_idr, (int) args->name);
 491        if (obj)
 492                drm_gem_object_reference(obj);
 493        spin_unlock(&dev->object_name_lock);
 494        if (!obj)
 495                return -ENOENT;
 496
 497        ret = drm_gem_handle_create(file_priv, obj, &handle);
 498        drm_gem_object_unreference_unlocked(obj);
 499        if (ret)
 500                return ret;
 501
 502        args->handle = handle;
 503        args->size = obj->size;
 504
 505        return 0;
 506}
 507
 508/**
 509 * Called at device open time, sets up the structure for handling refcounting
 510 * of mm objects.
 511 */
 512void
 513drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
 514{
 515        idr_init(&file_private->object_idr);
 516        spin_lock_init(&file_private->table_lock);
 517}
 518
 519/**
 520 * Called at device close to release the file's
 521 * handle references on objects.
 522 */
 523static int
 524drm_gem_object_release_handle(int id, void *ptr, void *data)
 525{
 526        struct drm_file *file_priv = data;
 527        struct drm_gem_object *obj = ptr;
 528        struct drm_device *dev = obj->dev;
 529
 530        drm_gem_remove_prime_handles(obj, file_priv);
 531
 532        if (dev->driver->gem_close_object)
 533                dev->driver->gem_close_object(obj, file_priv);
 534
 535        drm_gem_object_handle_unreference_unlocked(obj);
 536
 537        return 0;
 538}
 539
 540/**
 541 * Called at close time when the filp is going away.
 542 *
 543 * Releases any remaining references on objects by this filp.
 544 */
 545void
 546drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 547{
 548        idr_for_each(&file_private->object_idr,
 549                     &drm_gem_object_release_handle, file_private);
 550        idr_destroy(&file_private->object_idr);
 551}
 552
 553void
 554drm_gem_object_release(struct drm_gem_object *obj)
 555{
 556        if (obj->filp)
 557            fput(obj->filp);
 558}
 559EXPORT_SYMBOL(drm_gem_object_release);
 560
 561/**
 562 * Called after the last reference to the object has been lost.
 563 * Must be called holding struct_ mutex
 564 *
 565 * Frees the object
 566 */
 567void
 568drm_gem_object_free(struct kref *kref)
 569{
 570        struct drm_gem_object *obj = (struct drm_gem_object *) kref;
 571        struct drm_device *dev = obj->dev;
 572
 573        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 574
 575        if (dev->driver->gem_free_object != NULL)
 576                dev->driver->gem_free_object(obj);
 577}
 578EXPORT_SYMBOL(drm_gem_object_free);
 579
 580static void drm_gem_object_ref_bug(struct kref *list_kref)
 581{
 582        BUG();
 583}
 584
 585/**
 586 * Called after the last handle to the object has been closed
 587 *
 588 * Removes any name for the object. Note that this must be
 589 * called before drm_gem_object_free or we'll be touching
 590 * freed memory
 591 */
 592void drm_gem_object_handle_free(struct drm_gem_object *obj)
 593{
 594        struct drm_device *dev = obj->dev;
 595
 596        /* Remove any name for this object */
 597        spin_lock(&dev->object_name_lock);
 598        if (obj->name) {
 599                idr_remove(&dev->object_name_idr, obj->name);
 600                obj->name = 0;
 601                spin_unlock(&dev->object_name_lock);
 602                /*
 603                 * The object name held a reference to this object, drop
 604                 * that now.
 605                *
 606                * This cannot be the last reference, since the handle holds one too.
 607                 */
 608                kref_put(&obj->refcount, drm_gem_object_ref_bug);
 609        } else
 610                spin_unlock(&dev->object_name_lock);
 611
 612}
 613EXPORT_SYMBOL(drm_gem_object_handle_free);
 614
 615void drm_gem_vm_open(struct vm_area_struct *vma)
 616{
 617        struct drm_gem_object *obj = vma->vm_private_data;
 618
 619        drm_gem_object_reference(obj);
 620
 621        mutex_lock(&obj->dev->struct_mutex);
 622        drm_vm_open_locked(obj->dev, vma);
 623        mutex_unlock(&obj->dev->struct_mutex);
 624}
 625EXPORT_SYMBOL(drm_gem_vm_open);
 626
 627void drm_gem_vm_close(struct vm_area_struct *vma)
 628{
 629        struct drm_gem_object *obj = vma->vm_private_data;
 630        struct drm_device *dev = obj->dev;
 631
 632        mutex_lock(&dev->struct_mutex);
 633        drm_vm_close_locked(obj->dev, vma);
 634        drm_gem_object_unreference(obj);
 635        mutex_unlock(&dev->struct_mutex);
 636}
 637EXPORT_SYMBOL(drm_gem_vm_close);
 638
 639/**
 640 * drm_gem_mmap_obj - memory map a GEM object
 641 * @obj: the GEM object to map
 642 * @obj_size: the object size to be mapped, in bytes
 643 * @vma: VMA for the area to be mapped
 644 *
 645 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
 646 * provided by the driver. Depending on their requirements, drivers can either
 647 * provide a fault handler in their gem_vm_ops (in which case any accesses to
 648 * the object will be trapped, to perform migration, GTT binding, surface
 649 * register allocation, or performance monitoring), or mmap the buffer memory
 650 * synchronously after calling drm_gem_mmap_obj.
 651 *
 652 * This function is mainly intended to implement the DMABUF mmap operation, when
 653 * the GEM object is not looked up based on its fake offset. To implement the
 654 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
 655 *
 656 * NOTE: This function has to be protected with dev->struct_mutex
 657 *
 658 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
 659 * size, or if no gem_vm_ops are provided.
 660 */
 661int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 662                     struct vm_area_struct *vma)
 663{
 664        struct drm_device *dev = obj->dev;
 665
 666        lockdep_assert_held(&dev->struct_mutex);
 667
 668        /* Check for valid size. */
 669        if (obj_size < vma->vm_end - vma->vm_start)
 670                return -EINVAL;
 671
 672        if (!dev->driver->gem_vm_ops)
 673                return -EINVAL;
 674
 675        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 676        vma->vm_ops = dev->driver->gem_vm_ops;
 677        vma->vm_private_data = obj;
 678        vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 679
 680        /* Take a ref for this mapping of the object, so that the fault
 681         * handler can dereference the mmap offset's pointer to the object.
 682         * This reference is cleaned up by the corresponding vm_close
 683         * (which should happen whether the vma was created by this call, or
 684         * by a vm_open due to mremap or partial unmap or whatever).
 685         */
 686        drm_gem_object_reference(obj);
 687
 688        drm_vm_open_locked(dev, vma);
 689        return 0;
 690}
 691EXPORT_SYMBOL(drm_gem_mmap_obj);
 692
 693/**
 694 * drm_gem_mmap - memory map routine for GEM objects
 695 * @filp: DRM file pointer
 696 * @vma: VMA for the area to be mapped
 697 *
 698 * If a driver supports GEM object mapping, mmap calls on the DRM file
 699 * descriptor will end up here.
 700 *
 701 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 702 * contain the fake offset we created when the GTT map ioctl was called on
 703 * the object) and map it with a call to drm_gem_mmap_obj().
 704 */
 705int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 706{
 707        struct drm_file *priv = filp->private_data;
 708        struct drm_device *dev = priv->minor->dev;
 709        struct drm_gem_mm *mm = dev->mm_private;
 710        struct drm_local_map *map = NULL;
 711        struct drm_hash_item *hash;
 712        int ret = 0;
 713
 714        if (drm_device_is_unplugged(dev))
 715                return -ENODEV;
 716
 717        mutex_lock(&dev->struct_mutex);
 718
 719        if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
 720                mutex_unlock(&dev->struct_mutex);
 721                return drm_mmap(filp, vma);
 722        }
 723
 724        map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
 725        if (!map ||
 726            ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
 727                ret =  -EPERM;
 728                goto out_unlock;
 729        }
 730
 731        ret = drm_gem_mmap_obj(map->handle, map->size, vma);
 732
 733out_unlock:
 734        mutex_unlock(&dev->struct_mutex);
 735
 736        return ret;
 737}
 738EXPORT_SYMBOL(drm_gem_mmap);
 739