linux/drivers/gpu/drm/drm_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *
  26 */
  27
  28#include <linux/types.h>
  29#include <linux/slab.h>
  30#include <linux/mm.h>
  31#include <linux/uaccess.h>
  32#include <linux/fs.h>
  33#include <linux/file.h>
  34#include <linux/module.h>
  35#include <linux/mman.h>
  36#include <linux/pagemap.h>
  37#include <linux/shmem_fs.h>
  38#include <linux/dma-buf.h>
  39#include <drm/drmP.h>
  40#include <drm/drm_vma_manager.h>
  41#include <drm/drm_gem.h>
  42#include "drm_internal.h"
  43
  44/** @file drm_gem.c
  45 *
  46 * This file provides some of the base ioctls and library routines for
  47 * the graphics memory manager implemented by each device driver.
  48 *
  49 * Because various devices have different requirements in terms of
  50 * synchronization and migration strategies, implementing that is left up to
  51 * the driver, and all that the general API provides should be generic --
  52 * allocating objects, reading/writing data with the cpu, freeing objects.
  53 * Even there, platform-dependent optimizations for reading/writing data with
  54 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  55 * the DRI2 implementation wants to have at least allocate/mmap be generic.
  56 *
  57 * The goal was to have swap-backed object allocation managed through
  58 * struct file.  However, file descriptors as handles to a struct file have
  59 * two major failings:
  60 * - Process limits prevent more than 1024 or so being used at a time by
  61 *   default.
  62 * - Inability to allocate high fds will aggravate the X Server's select()
  63 *   handling, and likely that of many GL client applications as well.
  64 *
  65 * This led to a plan of using our own integer IDs (called handles, following
  66 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  67 * ioctls.  The objects themselves will still include the struct file so
  68 * that we can transition to fds if the required kernel infrastructure shows
  69 * up at a later date, and as our interface with shmfs for memory allocation.
  70 */
  71
  72/*
  73 * We make up offsets for buffer objects so we can recognize them at
  74 * mmap time.
  75 */
  76
  77/* pgoff in mmap is an unsigned long, so we need to make sure that
  78 * the faked up offset will fit
  79 */
  80
  81#if BITS_PER_LONG == 64
  82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  84#else
  85#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  86#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  87#endif
  88
  89/**
  90 * drm_gem_init - Initialize the GEM device fields
  91 * @dev: drm_devic structure to initialize
  92 */
  93int
  94drm_gem_init(struct drm_device *dev)
  95{
  96        struct drm_vma_offset_manager *vma_offset_manager;
  97
  98        mutex_init(&dev->object_name_lock);
  99        idr_init(&dev->object_name_idr);
 100
 101        vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
 102        if (!vma_offset_manager) {
 103                DRM_ERROR("out of memory\n");
 104                return -ENOMEM;
 105        }
 106
 107        dev->vma_offset_manager = vma_offset_manager;
 108        drm_vma_offset_manager_init(vma_offset_manager,
 109                                    DRM_FILE_PAGE_OFFSET_START,
 110                                    DRM_FILE_PAGE_OFFSET_SIZE);
 111
 112        return 0;
 113}
 114
 115void
 116drm_gem_destroy(struct drm_device *dev)
 117{
 118
 119        drm_vma_offset_manager_destroy(dev->vma_offset_manager);
 120        kfree(dev->vma_offset_manager);
 121        dev->vma_offset_manager = NULL;
 122}
 123
 124/**
 125 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
 126 * @dev: drm_device the object should be initialized for
 127 * @obj: drm_gem_object to initialize
 128 * @size: object size
 129 *
 130 * Initialize an already allocated GEM object of the specified size with
 131 * shmfs backing store.
 132 */
 133int drm_gem_object_init(struct drm_device *dev,
 134                        struct drm_gem_object *obj, size_t size)
 135{
 136        struct file *filp;
 137
 138        drm_gem_private_object_init(dev, obj, size);
 139
 140        filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
 141        if (IS_ERR(filp))
 142                return PTR_ERR(filp);
 143
 144        obj->filp = filp;
 145
 146        return 0;
 147}
 148EXPORT_SYMBOL(drm_gem_object_init);
 149
 150/**
 151 * drm_gem_private_object_init - initialize an allocated private GEM object
 152 * @dev: drm_device the object should be initialized for
 153 * @obj: drm_gem_object to initialize
 154 * @size: object size
 155 *
 156 * Initialize an already allocated GEM object of the specified size with
 157 * no GEM provided backing store. Instead the caller is responsible for
 158 * backing the object and handling it.
 159 */
 160void drm_gem_private_object_init(struct drm_device *dev,
 161                                 struct drm_gem_object *obj, size_t size)
 162{
 163        BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 164
 165        obj->dev = dev;
 166        obj->filp = NULL;
 167
 168        kref_init(&obj->refcount);
 169        obj->handle_count = 0;
 170        obj->size = size;
 171        drm_vma_node_reset(&obj->vma_node);
 172}
 173EXPORT_SYMBOL(drm_gem_private_object_init);
 174
 175static void
 176drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 177{
 178        /*
 179         * Note: obj->dma_buf can't disappear as long as we still hold a
 180         * handle reference in obj->handle_count.
 181         */
 182        mutex_lock(&filp->prime.lock);
 183        if (obj->dma_buf) {
 184                drm_prime_remove_buf_handle_locked(&filp->prime,
 185                                                   obj->dma_buf);
 186        }
 187        mutex_unlock(&filp->prime.lock);
 188}
 189
 190/**
 191 * drm_gem_object_handle_free - release resources bound to userspace handles
 192 * @obj: GEM object to clean up.
 193 *
 194 * Called after the last handle to the object has been closed
 195 *
 196 * Removes any name for the object. Note that this must be
 197 * called before drm_gem_object_free or we'll be touching
 198 * freed memory
 199 */
 200static void drm_gem_object_handle_free(struct drm_gem_object *obj)
 201{
 202        struct drm_device *dev = obj->dev;
 203
 204        /* Remove any name for this object */
 205        if (obj->name) {
 206                idr_remove(&dev->object_name_idr, obj->name);
 207                obj->name = 0;
 208        }
 209}
 210
 211static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
 212{
 213        /* Unbreak the reference cycle if we have an exported dma_buf. */
 214        if (obj->dma_buf) {
 215                dma_buf_put(obj->dma_buf);
 216                obj->dma_buf = NULL;
 217        }
 218}
 219
 220static void
 221drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
 222{
 223        if (WARN_ON(obj->handle_count == 0))
 224                return;
 225
 226        /*
 227        * Must bump handle count first as this may be the last
 228        * ref, in which case the object would disappear before we
 229        * checked for a name
 230        */
 231
 232        mutex_lock(&obj->dev->object_name_lock);
 233        if (--obj->handle_count == 0) {
 234                drm_gem_object_handle_free(obj);
 235                drm_gem_object_exported_dma_buf_free(obj);
 236        }
 237        mutex_unlock(&obj->dev->object_name_lock);
 238
 239        drm_gem_object_unreference_unlocked(obj);
 240}
 241
 242/**
 243 * drm_gem_handle_delete - deletes the given file-private handle
 244 * @filp: drm file-private structure to use for the handle look up
 245 * @handle: userspace handle to delete
 246 *
 247 * Removes the GEM handle from the @filp lookup table and if this is the last
 248 * handle also cleans up linked resources like GEM names.
 249 */
 250int
 251drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 252{
 253        struct drm_device *dev;
 254        struct drm_gem_object *obj;
 255
 256        /* This is gross. The idr system doesn't let us try a delete and
 257         * return an error code.  It just spews if you fail at deleting.
 258         * So, we have to grab a lock around finding the object and then
 259         * doing the delete on it and dropping the refcount, or the user
 260         * could race us to double-decrement the refcount and cause a
 261         * use-after-free later.  Given the frequency of our handle lookups,
 262         * we may want to use ida for number allocation and a hash table
 263         * for the pointers, anyway.
 264         */
 265        spin_lock(&filp->table_lock);
 266
 267        /* Check if we currently have a reference on the object */
 268        obj = idr_find(&filp->object_idr, handle);
 269        if (obj == NULL) {
 270                spin_unlock(&filp->table_lock);
 271                return -EINVAL;
 272        }
 273        dev = obj->dev;
 274
 275        /* Release reference and decrement refcount. */
 276        idr_remove(&filp->object_idr, handle);
 277        spin_unlock(&filp->table_lock);
 278
 279        if (drm_core_check_feature(dev, DRIVER_PRIME))
 280                drm_gem_remove_prime_handles(obj, filp);
 281        drm_vma_node_revoke(&obj->vma_node, filp->filp);
 282
 283        if (dev->driver->gem_close_object)
 284                dev->driver->gem_close_object(obj, filp);
 285        drm_gem_object_handle_unreference_unlocked(obj);
 286
 287        return 0;
 288}
 289EXPORT_SYMBOL(drm_gem_handle_delete);
 290
 291/**
 292 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
 293 * @file: drm file-private structure to remove the dumb handle from
 294 * @dev: corresponding drm_device
 295 * @handle: the dumb handle to remove
 296 * 
 297 * This implements the ->dumb_destroy kms driver callback for drivers which use
 298 * gem to manage their backing storage.
 299 */
 300int drm_gem_dumb_destroy(struct drm_file *file,
 301                         struct drm_device *dev,
 302                         uint32_t handle)
 303{
 304        return drm_gem_handle_delete(file, handle);
 305}
 306EXPORT_SYMBOL(drm_gem_dumb_destroy);
 307
 308/**
 309 * drm_gem_handle_create_tail - internal functions to create a handle
 310 * @file_priv: drm file-private structure to register the handle for
 311 * @obj: object to register
 312 * @handlep: pointer to return the created handle to the caller
 313 * 
 314 * This expects the dev->object_name_lock to be held already and will drop it
 315 * before returning. Used to avoid races in establishing new handles when
 316 * importing an object from either an flink name or a dma-buf.
 317 */
 318int
 319drm_gem_handle_create_tail(struct drm_file *file_priv,
 320                           struct drm_gem_object *obj,
 321                           u32 *handlep)
 322{
 323        struct drm_device *dev = obj->dev;
 324        int ret;
 325
 326        WARN_ON(!mutex_is_locked(&dev->object_name_lock));
 327
 328        /*
 329         * Get the user-visible handle using idr.  Preload and perform
 330         * allocation under our spinlock.
 331         */
 332        idr_preload(GFP_KERNEL);
 333        spin_lock(&file_priv->table_lock);
 334
 335        ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
 336        drm_gem_object_reference(obj);
 337        obj->handle_count++;
 338        spin_unlock(&file_priv->table_lock);
 339        idr_preload_end();
 340        mutex_unlock(&dev->object_name_lock);
 341        if (ret < 0) {
 342                drm_gem_object_handle_unreference_unlocked(obj);
 343                return ret;
 344        }
 345        *handlep = ret;
 346
 347        ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
 348        if (ret) {
 349                drm_gem_handle_delete(file_priv, *handlep);
 350                return ret;
 351        }
 352
 353        if (dev->driver->gem_open_object) {
 354                ret = dev->driver->gem_open_object(obj, file_priv);
 355                if (ret) {
 356                        drm_gem_handle_delete(file_priv, *handlep);
 357                        return ret;
 358                }
 359        }
 360
 361        return 0;
 362}
 363
 364/**
 365 * drm_gem_handle_create - create a gem handle for an object
 366 * @file_priv: drm file-private structure to register the handle for
 367 * @obj: object to register
 368 * @handlep: pionter to return the created handle to the caller
 369 *
 370 * Create a handle for this object. This adds a handle reference
 371 * to the object, which includes a regular reference count. Callers
 372 * will likely want to dereference the object afterwards.
 373 */
 374int drm_gem_handle_create(struct drm_file *file_priv,
 375                          struct drm_gem_object *obj,
 376                          u32 *handlep)
 377{
 378        mutex_lock(&obj->dev->object_name_lock);
 379
 380        return drm_gem_handle_create_tail(file_priv, obj, handlep);
 381}
 382EXPORT_SYMBOL(drm_gem_handle_create);
 383
 384
 385/**
 386 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
 387 * @obj: obj in question
 388 *
 389 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
 390 */
 391void
 392drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 393{
 394        struct drm_device *dev = obj->dev;
 395
 396        drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
 397}
 398EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 399
 400/**
 401 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
 402 * @obj: obj in question
 403 * @size: the virtual size
 404 *
 405 * GEM memory mapping works by handing back to userspace a fake mmap offset
 406 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 407 * up the object based on the offset and sets up the various memory mapping
 408 * structures.
 409 *
 410 * This routine allocates and attaches a fake offset for @obj, in cases where
 411 * the virtual size differs from the physical size (ie. obj->size).  Otherwise
 412 * just use drm_gem_create_mmap_offset().
 413 */
 414int
 415drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 416{
 417        struct drm_device *dev = obj->dev;
 418
 419        return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
 420                                  size / PAGE_SIZE);
 421}
 422EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
 423
 424/**
 425 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
 426 * @obj: obj in question
 427 *
 428 * GEM memory mapping works by handing back to userspace a fake mmap offset
 429 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 430 * up the object based on the offset and sets up the various memory mapping
 431 * structures.
 432 *
 433 * This routine allocates and attaches a fake offset for @obj.
 434 */
 435int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 436{
 437        return drm_gem_create_mmap_offset_size(obj, obj->size);
 438}
 439EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 440
 441/**
 442 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
 443 * from shmem
 444 * @obj: obj in question
 445 *
 446 * This reads the page-array of the shmem-backing storage of the given gem
 447 * object. An array of pages is returned. If a page is not allocated or
 448 * swapped-out, this will allocate/swap-in the required pages. Note that the
 449 * whole object is covered by the page-array and pinned in memory.
 450 *
 451 * Use drm_gem_put_pages() to release the array and unpin all pages.
 452 *
 453 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
 454 * If you require other GFP-masks, you have to do those allocations yourself.
 455 *
 456 * Note that you are not allowed to change gfp-zones during runtime. That is,
 457 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
 458 * set during initialization. If you have special zone constraints, set them
 459 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
 460 * to keep pages in the required zone during swap-in.
 461 */
 462struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 463{
 464        struct address_space *mapping;
 465        struct page *p, **pages;
 466        int i, npages;
 467
 468        /* This is the shared memory object that backs the GEM resource */
 469        mapping = file_inode(obj->filp)->i_mapping;
 470
 471        /* We already BUG_ON() for non-page-aligned sizes in
 472         * drm_gem_object_init(), so we should never hit this unless
 473         * driver author is doing something really wrong:
 474         */
 475        WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
 476
 477        npages = obj->size >> PAGE_SHIFT;
 478
 479        pages = drm_malloc_ab(npages, sizeof(struct page *));
 480        if (pages == NULL)
 481                return ERR_PTR(-ENOMEM);
 482
 483        for (i = 0; i < npages; i++) {
 484                p = shmem_read_mapping_page(mapping, i);
 485                if (IS_ERR(p))
 486                        goto fail;
 487                pages[i] = p;
 488
 489                /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
 490                 * correct region during swapin. Note that this requires
 491                 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
 492                 * so shmem can relocate pages during swapin if required.
 493                 */
 494                BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
 495                                (page_to_pfn(p) >= 0x00100000UL));
 496        }
 497
 498        return pages;
 499
 500fail:
 501        while (i--)
 502                page_cache_release(pages[i]);
 503
 504        drm_free_large(pages);
 505        return ERR_CAST(p);
 506}
 507EXPORT_SYMBOL(drm_gem_get_pages);
 508
 509/**
 510 * drm_gem_put_pages - helper to free backing pages for a GEM object
 511 * @obj: obj in question
 512 * @pages: pages to free
 513 * @dirty: if true, pages will be marked as dirty
 514 * @accessed: if true, the pages will be marked as accessed
 515 */
 516void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 517                bool dirty, bool accessed)
 518{
 519        int i, npages;
 520
 521        /* We already BUG_ON() for non-page-aligned sizes in
 522         * drm_gem_object_init(), so we should never hit this unless
 523         * driver author is doing something really wrong:
 524         */
 525        WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
 526
 527        npages = obj->size >> PAGE_SHIFT;
 528
 529        for (i = 0; i < npages; i++) {
 530                if (dirty)
 531                        set_page_dirty(pages[i]);
 532
 533                if (accessed)
 534                        mark_page_accessed(pages[i]);
 535
 536                /* Undo the reference we took when populating the table */
 537                page_cache_release(pages[i]);
 538        }
 539
 540        drm_free_large(pages);
 541}
 542EXPORT_SYMBOL(drm_gem_put_pages);
 543
 544/** Returns a reference to the object named by the handle. */
 545struct drm_gem_object *
 546drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
 547                      u32 handle)
 548{
 549        struct drm_gem_object *obj;
 550
 551        spin_lock(&filp->table_lock);
 552
 553        /* Check if we currently have a reference on the object */
 554        obj = idr_find(&filp->object_idr, handle);
 555        if (obj == NULL) {
 556                spin_unlock(&filp->table_lock);
 557                return NULL;
 558        }
 559
 560        drm_gem_object_reference(obj);
 561
 562        spin_unlock(&filp->table_lock);
 563
 564        return obj;
 565}
 566EXPORT_SYMBOL(drm_gem_object_lookup);
 567
 568/**
 569 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
 570 * @dev: drm_device
 571 * @data: ioctl data
 572 * @file_priv: drm file-private structure
 573 *
 574 * Releases the handle to an mm object.
 575 */
 576int
 577drm_gem_close_ioctl(struct drm_device *dev, void *data,
 578                    struct drm_file *file_priv)
 579{
 580        struct drm_gem_close *args = data;
 581        int ret;
 582
 583        if (!drm_core_check_feature(dev, DRIVER_GEM))
 584                return -ENODEV;
 585
 586        ret = drm_gem_handle_delete(file_priv, args->handle);
 587
 588        return ret;
 589}
 590
 591/**
 592 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
 593 * @dev: drm_device
 594 * @data: ioctl data
 595 * @file_priv: drm file-private structure
 596 *
 597 * Create a global name for an object, returning the name.
 598 *
 599 * Note that the name does not hold a reference; when the object
 600 * is freed, the name goes away.
 601 */
 602int
 603drm_gem_flink_ioctl(struct drm_device *dev, void *data,
 604                    struct drm_file *file_priv)
 605{
 606        struct drm_gem_flink *args = data;
 607        struct drm_gem_object *obj;
 608        int ret;
 609
 610        if (!drm_core_check_feature(dev, DRIVER_GEM))
 611                return -ENODEV;
 612
 613        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 614        if (obj == NULL)
 615                return -ENOENT;
 616
 617        mutex_lock(&dev->object_name_lock);
 618        idr_preload(GFP_KERNEL);
 619        /* prevent races with concurrent gem_close. */
 620        if (obj->handle_count == 0) {
 621                ret = -ENOENT;
 622                goto err;
 623        }
 624
 625        if (!obj->name) {
 626                ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
 627                if (ret < 0)
 628                        goto err;
 629
 630                obj->name = ret;
 631        }
 632
 633        args->name = (uint64_t) obj->name;
 634        ret = 0;
 635
 636err:
 637        idr_preload_end();
 638        mutex_unlock(&dev->object_name_lock);
 639        drm_gem_object_unreference_unlocked(obj);
 640        return ret;
 641}
 642
 643/**
 644 * drm_gem_open - implementation of the GEM_OPEN ioctl
 645 * @dev: drm_device
 646 * @data: ioctl data
 647 * @file_priv: drm file-private structure
 648 *
 649 * Open an object using the global name, returning a handle and the size.
 650 *
 651 * This handle (of course) holds a reference to the object, so the object
 652 * will not go away until the handle is deleted.
 653 */
 654int
 655drm_gem_open_ioctl(struct drm_device *dev, void *data,
 656                   struct drm_file *file_priv)
 657{
 658        struct drm_gem_open *args = data;
 659        struct drm_gem_object *obj;
 660        int ret;
 661        u32 handle;
 662
 663        if (!drm_core_check_feature(dev, DRIVER_GEM))
 664                return -ENODEV;
 665
 666        mutex_lock(&dev->object_name_lock);
 667        obj = idr_find(&dev->object_name_idr, (int) args->name);
 668        if (obj) {
 669                drm_gem_object_reference(obj);
 670        } else {
 671                mutex_unlock(&dev->object_name_lock);
 672                return -ENOENT;
 673        }
 674
 675        /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
 676        ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
 677        drm_gem_object_unreference_unlocked(obj);
 678        if (ret)
 679                return ret;
 680
 681        args->handle = handle;
 682        args->size = obj->size;
 683
 684        return 0;
 685}
 686
 687/**
 688 * gem_gem_open - initalizes GEM file-private structures at devnode open time
 689 * @dev: drm_device which is being opened by userspace
 690 * @file_private: drm file-private structure to set up
 691 *
 692 * Called at device open time, sets up the structure for handling refcounting
 693 * of mm objects.
 694 */
 695void
 696drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
 697{
 698        idr_init(&file_private->object_idr);
 699        spin_lock_init(&file_private->table_lock);
 700}
 701
 702/*
 703 * Called at device close to release the file's
 704 * handle references on objects.
 705 */
 706static int
 707drm_gem_object_release_handle(int id, void *ptr, void *data)
 708{
 709        struct drm_file *file_priv = data;
 710        struct drm_gem_object *obj = ptr;
 711        struct drm_device *dev = obj->dev;
 712
 713        if (drm_core_check_feature(dev, DRIVER_PRIME))
 714                drm_gem_remove_prime_handles(obj, file_priv);
 715        drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 716
 717        if (dev->driver->gem_close_object)
 718                dev->driver->gem_close_object(obj, file_priv);
 719
 720        drm_gem_object_handle_unreference_unlocked(obj);
 721
 722        return 0;
 723}
 724
 725/**
 726 * drm_gem_release - release file-private GEM resources
 727 * @dev: drm_device which is being closed by userspace
 728 * @file_private: drm file-private structure to clean up
 729 *
 730 * Called at close time when the filp is going away.
 731 *
 732 * Releases any remaining references on objects by this filp.
 733 */
 734void
 735drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 736{
 737        idr_for_each(&file_private->object_idr,
 738                     &drm_gem_object_release_handle, file_private);
 739        idr_destroy(&file_private->object_idr);
 740}
 741
 742void
 743drm_gem_object_release(struct drm_gem_object *obj)
 744{
 745        WARN_ON(obj->dma_buf);
 746
 747        if (obj->filp)
 748                fput(obj->filp);
 749
 750        drm_gem_free_mmap_offset(obj);
 751}
 752EXPORT_SYMBOL(drm_gem_object_release);
 753
 754/**
 755 * drm_gem_object_free - free a GEM object
 756 * @kref: kref of the object to free
 757 *
 758 * Called after the last reference to the object has been lost.
 759 * Must be called holding struct_ mutex
 760 *
 761 * Frees the object
 762 */
 763void
 764drm_gem_object_free(struct kref *kref)
 765{
 766        struct drm_gem_object *obj =
 767                container_of(kref, struct drm_gem_object, refcount);
 768        struct drm_device *dev = obj->dev;
 769
 770        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 771
 772        if (dev->driver->gem_free_object != NULL)
 773                dev->driver->gem_free_object(obj);
 774}
 775EXPORT_SYMBOL(drm_gem_object_free);
 776
 777void drm_gem_vm_open(struct vm_area_struct *vma)
 778{
 779        struct drm_gem_object *obj = vma->vm_private_data;
 780
 781        drm_gem_object_reference(obj);
 782}
 783EXPORT_SYMBOL(drm_gem_vm_open);
 784
 785void drm_gem_vm_close(struct vm_area_struct *vma)
 786{
 787        struct drm_gem_object *obj = vma->vm_private_data;
 788
 789        drm_gem_object_unreference_unlocked(obj);
 790}
 791EXPORT_SYMBOL(drm_gem_vm_close);
 792
 793/**
 794 * drm_gem_mmap_obj - memory map a GEM object
 795 * @obj: the GEM object to map
 796 * @obj_size: the object size to be mapped, in bytes
 797 * @vma: VMA for the area to be mapped
 798 *
 799 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
 800 * provided by the driver. Depending on their requirements, drivers can either
 801 * provide a fault handler in their gem_vm_ops (in which case any accesses to
 802 * the object will be trapped, to perform migration, GTT binding, surface
 803 * register allocation, or performance monitoring), or mmap the buffer memory
 804 * synchronously after calling drm_gem_mmap_obj.
 805 *
 806 * This function is mainly intended to implement the DMABUF mmap operation, when
 807 * the GEM object is not looked up based on its fake offset. To implement the
 808 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
 809 *
 810 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
 811 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
 812 * callers must verify access restrictions before calling this helper.
 813 *
 814 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
 815 * size, or if no gem_vm_ops are provided.
 816 */
 817int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 818                     struct vm_area_struct *vma)
 819{
 820        struct drm_device *dev = obj->dev;
 821
 822        /* Check for valid size. */
 823        if (obj_size < vma->vm_end - vma->vm_start)
 824                return -EINVAL;
 825
 826        if (!dev->driver->gem_vm_ops)
 827                return -EINVAL;
 828
 829        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 830        vma->vm_ops = dev->driver->gem_vm_ops;
 831        vma->vm_private_data = obj;
 832        vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 833
 834        /* Take a ref for this mapping of the object, so that the fault
 835         * handler can dereference the mmap offset's pointer to the object.
 836         * This reference is cleaned up by the corresponding vm_close
 837         * (which should happen whether the vma was created by this call, or
 838         * by a vm_open due to mremap or partial unmap or whatever).
 839         */
 840        drm_gem_object_reference(obj);
 841
 842        return 0;
 843}
 844EXPORT_SYMBOL(drm_gem_mmap_obj);
 845
 846/**
 847 * drm_gem_mmap - memory map routine for GEM objects
 848 * @filp: DRM file pointer
 849 * @vma: VMA for the area to be mapped
 850 *
 851 * If a driver supports GEM object mapping, mmap calls on the DRM file
 852 * descriptor will end up here.
 853 *
 854 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 855 * contain the fake offset we created when the GTT map ioctl was called on
 856 * the object) and map it with a call to drm_gem_mmap_obj().
 857 *
 858 * If the caller is not granted access to the buffer object, the mmap will fail
 859 * with EACCES. Please see the vma manager for more information.
 860 */
 861int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 862{
 863        struct drm_file *priv = filp->private_data;
 864        struct drm_device *dev = priv->minor->dev;
 865        struct drm_gem_object *obj = NULL;
 866        struct drm_vma_offset_node *node;
 867        int ret;
 868
 869        if (drm_device_is_unplugged(dev))
 870                return -ENODEV;
 871
 872        drm_vma_offset_lock_lookup(dev->vma_offset_manager);
 873        node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
 874                                                  vma->vm_pgoff,
 875                                                  vma_pages(vma));
 876        if (likely(node)) {
 877                obj = container_of(node, struct drm_gem_object, vma_node);
 878                /*
 879                 * When the object is being freed, after it hits 0-refcnt it
 880                 * proceeds to tear down the object. In the process it will
 881                 * attempt to remove the VMA offset and so acquire this
 882                 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
 883                 * that matches our range, we know it is in the process of being
 884                 * destroyed and will be freed as soon as we release the lock -
 885                 * so we have to check for the 0-refcnted object and treat it as
 886                 * invalid.
 887                 */
 888                if (!kref_get_unless_zero(&obj->refcount))
 889                        obj = NULL;
 890        }
 891        drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
 892
 893        if (!obj)
 894                return -EINVAL;
 895
 896        if (!drm_vma_node_is_allowed(node, filp)) {
 897                drm_gem_object_unreference_unlocked(obj);
 898                return -EACCES;
 899        }
 900
 901        ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
 902                               vma);
 903
 904        drm_gem_object_unreference_unlocked(obj);
 905
 906        return ret;
 907}
 908EXPORT_SYMBOL(drm_gem_mmap);
 909