linux/drivers/gpu/drm/drm_file.c
<<
>>
Prefs
   1/*
   2 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   3 * \author Daryll Strauss <daryll@valinux.com>
   4 * \author Gareth Hughes <gareth@valinux.com>
   5 */
   6
   7/*
   8 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
   9 *
  10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  12 * All Rights Reserved.
  13 *
  14 * Permission is hereby granted, free of charge, to any person obtaining a
  15 * copy of this software and associated documentation files (the "Software"),
  16 * to deal in the Software without restriction, including without limitation
  17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  18 * and/or sell copies of the Software, and to permit persons to whom the
  19 * Software is furnished to do so, subject to the following conditions:
  20 *
  21 * The above copyright notice and this permission notice (including the next
  22 * paragraph) shall be included in all copies or substantial portions of the
  23 * Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  31 * OTHER DEALINGS IN THE SOFTWARE.
  32 */
  33
  34#include <linux/anon_inodes.h>
  35#include <linux/dma-fence.h>
  36#include <linux/file.h>
  37#include <linux/module.h>
  38#include <linux/pci.h>
  39#include <linux/poll.h>
  40#include <linux/slab.h>
  41
  42#include <drm/drm_client.h>
  43#include <drm/drm_drv.h>
  44#include <drm/drm_file.h>
  45#include <drm/drm_print.h>
  46
  47#include "drm_crtc_internal.h"
  48#include "drm_internal.h"
  49#include "drm_legacy.h"
  50
  51#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  52#include <uapi/asm/mman.h>
  53#include <drm/drm_vma_manager.h>
  54#endif
  55
  56/* from BKL pushdown */
  57DEFINE_MUTEX(drm_global_mutex);
  58
  59bool drm_dev_needs_global_mutex(struct drm_device *dev)
  60{
  61        /*
  62         * Legacy drivers rely on all kinds of BKL locking semantics, don't
  63         * bother. They also still need BKL locking for their ioctls, so better
  64         * safe than sorry.
  65         */
  66        if (drm_core_check_feature(dev, DRIVER_LEGACY))
  67                return true;
  68
  69        /*
  70         * The deprecated ->load callback must be called after the driver is
  71         * already registered. This means such drivers rely on the BKL to make
  72         * sure an open can't proceed until the driver is actually fully set up.
  73         * Similar hilarity holds for the unload callback.
  74         */
  75        if (dev->driver->load || dev->driver->unload)
  76                return true;
  77
  78        /*
  79         * Drivers with the lastclose callback assume that it's synchronized
  80         * against concurrent opens, which again needs the BKL. The proper fix
  81         * is to use the drm_client infrastructure with proper locking for each
  82         * client.
  83         */
  84        if (dev->driver->lastclose)
  85                return true;
  86
  87        return false;
  88}
  89
  90/**
  91 * DOC: file operations
  92 *
  93 * Drivers must define the file operations structure that forms the DRM
  94 * userspace API entry point, even though most of those operations are
  95 * implemented in the DRM core. The resulting &struct file_operations must be
  96 * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
  97 * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
  98 * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
  99 * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
 100 * that require 32/64 bit compatibility support must provide their own
 101 * &file_operations.compat_ioctl handler that processes private ioctls and calls
 102 * drm_compat_ioctl() for core ioctls.
 103 *
 104 * In addition drm_read() and drm_poll() provide support for DRM events. DRM
 105 * events are a generic and extensible means to send asynchronous events to
 106 * userspace through the file descriptor. They are used to send vblank event and
 107 * page flip completions by the KMS API. But drivers can also use it for their
 108 * own needs, e.g. to signal completion of rendering.
 109 *
 110 * For the driver-side event interface see drm_event_reserve_init() and
 111 * drm_send_event() as the main starting points.
 112 *
 113 * The memory mapping implementation will vary depending on how the driver
 114 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
 115 * function, modern drivers should use one of the provided memory-manager
 116 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
 117 *
 118 * No other file operations are supported by the DRM userspace API. Overall the
 119 * following is an example &file_operations structure::
 120 *
 121 *     static const example_drm_fops = {
 122 *             .owner = THIS_MODULE,
 123 *             .open = drm_open,
 124 *             .release = drm_release,
 125 *             .unlocked_ioctl = drm_ioctl,
 126 *             .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
 127 *             .poll = drm_poll,
 128 *             .read = drm_read,
 129 *             .llseek = no_llseek,
 130 *             .mmap = drm_gem_mmap,
 131 *     };
 132 *
 133 * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
 134 * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
 135 * simpler.
 136 *
 137 * The driver's &file_operations must be stored in &drm_driver.fops.
 138 *
 139 * For driver-private IOCTL handling see the more detailed discussion in
 140 * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
 141 */
 142
 143/**
 144 * drm_file_alloc - allocate file context
 145 * @minor: minor to allocate on
 146 *
 147 * This allocates a new DRM file context. It is not linked into any context and
 148 * can be used by the caller freely. Note that the context keeps a pointer to
 149 * @minor, so it must be freed before @minor is.
 150 *
 151 * RETURNS:
 152 * Pointer to newly allocated context, ERR_PTR on failure.
 153 */
 154struct drm_file *drm_file_alloc(struct drm_minor *minor)
 155{
 156        struct drm_device *dev = minor->dev;
 157        struct drm_file *file;
 158        int ret;
 159
 160        file = kzalloc(sizeof(*file), GFP_KERNEL);
 161        if (!file)
 162                return ERR_PTR(-ENOMEM);
 163
 164        file->pid = get_pid(task_pid(current));
 165        file->minor = minor;
 166
 167        /* for compatibility root is always authenticated */
 168        file->authenticated = capable(CAP_SYS_ADMIN);
 169
 170        INIT_LIST_HEAD(&file->lhead);
 171        INIT_LIST_HEAD(&file->fbs);
 172        mutex_init(&file->fbs_lock);
 173        INIT_LIST_HEAD(&file->blobs);
 174        INIT_LIST_HEAD(&file->pending_event_list);
 175        INIT_LIST_HEAD(&file->event_list);
 176        init_waitqueue_head(&file->event_wait);
 177        file->event_space = 4096; /* set aside 4k for event buffer */
 178
 179        spin_lock_init(&file->master_lookup_lock);
 180        mutex_init(&file->event_read_lock);
 181
 182        if (drm_core_check_feature(dev, DRIVER_GEM))
 183                drm_gem_open(dev, file);
 184
 185        if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
 186                drm_syncobj_open(file);
 187
 188        drm_prime_init_file_private(&file->prime);
 189
 190        if (dev->driver->open) {
 191                ret = dev->driver->open(dev, file);
 192                if (ret < 0)
 193                        goto out_prime_destroy;
 194        }
 195
 196        return file;
 197
 198out_prime_destroy:
 199        drm_prime_destroy_file_private(&file->prime);
 200        if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
 201                drm_syncobj_release(file);
 202        if (drm_core_check_feature(dev, DRIVER_GEM))
 203                drm_gem_release(dev, file);
 204        put_pid(file->pid);
 205        kfree(file);
 206
 207        return ERR_PTR(ret);
 208}
 209
 210static void drm_events_release(struct drm_file *file_priv)
 211{
 212        struct drm_device *dev = file_priv->minor->dev;
 213        struct drm_pending_event *e, *et;
 214        unsigned long flags;
 215
 216        spin_lock_irqsave(&dev->event_lock, flags);
 217
 218        /* Unlink pending events */
 219        list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
 220                                 pending_link) {
 221                list_del(&e->pending_link);
 222                e->file_priv = NULL;
 223        }
 224
 225        /* Remove unconsumed events */
 226        list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
 227                list_del(&e->link);
 228                kfree(e);
 229        }
 230
 231        spin_unlock_irqrestore(&dev->event_lock, flags);
 232}
 233
 234/**
 235 * drm_file_free - free file context
 236 * @file: context to free, or NULL
 237 *
 238 * This destroys and deallocates a DRM file context previously allocated via
 239 * drm_file_alloc(). The caller must make sure to unlink it from any contexts
 240 * before calling this.
 241 *
 242 * If NULL is passed, this is a no-op.
 243 */
 244void drm_file_free(struct drm_file *file)
 245{
 246        struct drm_device *dev;
 247
 248        if (!file)
 249                return;
 250
 251        dev = file->minor->dev;
 252
 253        DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
 254                  current->comm, task_pid_nr(current),
 255                  (long)old_encode_dev(file->minor->kdev->devt),
 256                  atomic_read(&dev->open_count));
 257
 258#ifdef CONFIG_DRM_LEGACY
 259        if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
 260            dev->driver->preclose)
 261                dev->driver->preclose(dev, file);
 262#endif
 263
 264        if (drm_core_check_feature(dev, DRIVER_LEGACY))
 265                drm_legacy_lock_release(dev, file->filp);
 266
 267        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 268                drm_legacy_reclaim_buffers(dev, file);
 269
 270        drm_events_release(file);
 271
 272        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 273                drm_fb_release(file);
 274                drm_property_destroy_user_blobs(dev, file);
 275        }
 276
 277        if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
 278                drm_syncobj_release(file);
 279
 280        if (drm_core_check_feature(dev, DRIVER_GEM))
 281                drm_gem_release(dev, file);
 282
 283        drm_legacy_ctxbitmap_flush(dev, file);
 284
 285        if (drm_is_primary_client(file))
 286                drm_master_release(file);
 287
 288        if (dev->driver->postclose)
 289                dev->driver->postclose(dev, file);
 290
 291        drm_prime_destroy_file_private(&file->prime);
 292
 293        WARN_ON(!list_empty(&file->event_list));
 294
 295        put_pid(file->pid);
 296        kfree(file);
 297}
 298
 299static void drm_close_helper(struct file *filp)
 300{
 301        struct drm_file *file_priv = filp->private_data;
 302        struct drm_device *dev = file_priv->minor->dev;
 303
 304        mutex_lock(&dev->filelist_mutex);
 305        list_del(&file_priv->lhead);
 306        mutex_unlock(&dev->filelist_mutex);
 307
 308        drm_file_free(file_priv);
 309}
 310
 311/*
 312 * Check whether DRI will run on this CPU.
 313 *
 314 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
 315 */
 316static int drm_cpu_valid(void)
 317{
 318#if defined(__sparc__) && !defined(__sparc_v9__)
 319        return 0;               /* No cmpxchg before v9 sparc. */
 320#endif
 321        return 1;
 322}
 323
 324/*
 325 * Called whenever a process opens a drm node
 326 *
 327 * \param filp file pointer.
 328 * \param minor acquired minor-object.
 329 * \return zero on success or a negative number on failure.
 330 *
 331 * Creates and initializes a drm_file structure for the file private data in \p
 332 * filp and add it into the double linked list in \p dev.
 333 */
 334static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 335{
 336        struct drm_device *dev = minor->dev;
 337        struct drm_file *priv;
 338        int ret;
 339
 340        if (filp->f_flags & O_EXCL)
 341                return -EBUSY;  /* No exclusive opens */
 342        if (!drm_cpu_valid())
 343                return -EINVAL;
 344        if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
 345            dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
 346                return -EINVAL;
 347
 348        DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
 349                  task_pid_nr(current), minor->index);
 350
 351        priv = drm_file_alloc(minor);
 352        if (IS_ERR(priv))
 353                return PTR_ERR(priv);
 354
 355        if (drm_is_primary_client(priv)) {
 356                ret = drm_master_open(priv);
 357                if (ret) {
 358                        drm_file_free(priv);
 359                        return ret;
 360                }
 361        }
 362
 363        filp->private_data = priv;
 364        filp->f_mode |= FMODE_UNSIGNED_OFFSET;
 365        priv->filp = filp;
 366
 367        mutex_lock(&dev->filelist_mutex);
 368        list_add(&priv->lhead, &dev->filelist);
 369        mutex_unlock(&dev->filelist_mutex);
 370
 371#ifdef CONFIG_DRM_LEGACY
 372#ifdef __alpha__
 373        /*
 374         * Default the hose
 375         */
 376        if (!dev->hose) {
 377                struct pci_dev *pci_dev;
 378
 379                pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
 380                if (pci_dev) {
 381                        dev->hose = pci_dev->sysdata;
 382                        pci_dev_put(pci_dev);
 383                }
 384                if (!dev->hose) {
 385                        struct pci_bus *b = list_entry(pci_root_buses.next,
 386                                struct pci_bus, node);
 387                        if (b)
 388                                dev->hose = b->sysdata;
 389                }
 390        }
 391#endif
 392#endif
 393
 394        return 0;
 395}
 396
 397/**
 398 * drm_open - open method for DRM file
 399 * @inode: device inode
 400 * @filp: file pointer.
 401 *
 402 * This function must be used by drivers as their &file_operations.open method.
 403 * It looks up the correct DRM device and instantiates all the per-file
 404 * resources for it. It also calls the &drm_driver.open driver callback.
 405 *
 406 * RETURNS:
 407 *
 408 * 0 on success or negative errno value on failure.
 409 */
 410int drm_open(struct inode *inode, struct file *filp)
 411{
 412        struct drm_device *dev;
 413        struct drm_minor *minor;
 414        int retcode;
 415        int need_setup = 0;
 416
 417        minor = drm_minor_acquire(iminor(inode));
 418        if (IS_ERR(minor))
 419                return PTR_ERR(minor);
 420
 421        dev = minor->dev;
 422        if (drm_dev_needs_global_mutex(dev))
 423                mutex_lock(&drm_global_mutex);
 424
 425        if (!atomic_fetch_inc(&dev->open_count))
 426                need_setup = 1;
 427
 428        /* share address_space across all char-devs of a single device */
 429        filp->f_mapping = dev->anon_inode->i_mapping;
 430
 431        retcode = drm_open_helper(filp, minor);
 432        if (retcode)
 433                goto err_undo;
 434        if (need_setup) {
 435                retcode = drm_legacy_setup(dev);
 436                if (retcode) {
 437                        drm_close_helper(filp);
 438                        goto err_undo;
 439                }
 440        }
 441
 442        if (drm_dev_needs_global_mutex(dev))
 443                mutex_unlock(&drm_global_mutex);
 444
 445        return 0;
 446
 447err_undo:
 448        atomic_dec(&dev->open_count);
 449        if (drm_dev_needs_global_mutex(dev))
 450                mutex_unlock(&drm_global_mutex);
 451        drm_minor_release(minor);
 452        return retcode;
 453}
 454EXPORT_SYMBOL(drm_open);
 455
 456void drm_lastclose(struct drm_device * dev)
 457{
 458        DRM_DEBUG("\n");
 459
 460        if (dev->driver->lastclose)
 461                dev->driver->lastclose(dev);
 462        DRM_DEBUG("driver lastclose completed\n");
 463
 464        if (drm_core_check_feature(dev, DRIVER_LEGACY))
 465                drm_legacy_dev_reinit(dev);
 466
 467        drm_client_dev_restore(dev);
 468}
 469
 470/**
 471 * drm_release - release method for DRM file
 472 * @inode: device inode
 473 * @filp: file pointer.
 474 *
 475 * This function must be used by drivers as their &file_operations.release
 476 * method. It frees any resources associated with the open file, and calls the
 477 * &drm_driver.postclose driver callback. If this is the last open file for the
 478 * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
 479 *
 480 * RETURNS:
 481 *
 482 * Always succeeds and returns 0.
 483 */
 484int drm_release(struct inode *inode, struct file *filp)
 485{
 486        struct drm_file *file_priv = filp->private_data;
 487        struct drm_minor *minor = file_priv->minor;
 488        struct drm_device *dev = minor->dev;
 489
 490        if (drm_dev_needs_global_mutex(dev))
 491                mutex_lock(&drm_global_mutex);
 492
 493        DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
 494
 495        drm_close_helper(filp);
 496
 497        if (atomic_dec_and_test(&dev->open_count))
 498                drm_lastclose(dev);
 499
 500        if (drm_dev_needs_global_mutex(dev))
 501                mutex_unlock(&drm_global_mutex);
 502
 503        drm_minor_release(minor);
 504
 505        return 0;
 506}
 507EXPORT_SYMBOL(drm_release);
 508
 509/**
 510 * drm_release_noglobal - release method for DRM file
 511 * @inode: device inode
 512 * @filp: file pointer.
 513 *
 514 * This function may be used by drivers as their &file_operations.release
 515 * method. It frees any resources associated with the open file prior to taking
 516 * the drm_global_mutex, which then calls the &drm_driver.postclose driver
 517 * callback. If this is the last open file for the DRM device also proceeds to
 518 * call the &drm_driver.lastclose driver callback.
 519 *
 520 * RETURNS:
 521 *
 522 * Always succeeds and returns 0.
 523 */
 524int drm_release_noglobal(struct inode *inode, struct file *filp)
 525{
 526        struct drm_file *file_priv = filp->private_data;
 527        struct drm_minor *minor = file_priv->minor;
 528        struct drm_device *dev = minor->dev;
 529
 530        drm_close_helper(filp);
 531
 532        if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
 533                drm_lastclose(dev);
 534                mutex_unlock(&drm_global_mutex);
 535        }
 536
 537        drm_minor_release(minor);
 538
 539        return 0;
 540}
 541EXPORT_SYMBOL(drm_release_noglobal);
 542
 543/**
 544 * drm_read - read method for DRM file
 545 * @filp: file pointer
 546 * @buffer: userspace destination pointer for the read
 547 * @count: count in bytes to read
 548 * @offset: offset to read
 549 *
 550 * This function must be used by drivers as their &file_operations.read
 551 * method if they use DRM events for asynchronous signalling to userspace.
 552 * Since events are used by the KMS API for vblank and page flip completion this
 553 * means all modern display drivers must use it.
 554 *
 555 * @offset is ignored, DRM events are read like a pipe. Therefore drivers also
 556 * must set the &file_operation.llseek to no_llseek(). Polling support is
 557 * provided by drm_poll().
 558 *
 559 * This function will only ever read a full event. Therefore userspace must
 560 * supply a big enough buffer to fit any event to ensure forward progress. Since
 561 * the maximum event space is currently 4K it's recommended to just use that for
 562 * safety.
 563 *
 564 * RETURNS:
 565 *
 566 * Number of bytes read (always aligned to full events, and can be 0) or a
 567 * negative error code on failure.
 568 */
 569ssize_t drm_read(struct file *filp, char __user *buffer,
 570                 size_t count, loff_t *offset)
 571{
 572        struct drm_file *file_priv = filp->private_data;
 573        struct drm_device *dev = file_priv->minor->dev;
 574        ssize_t ret;
 575
 576        ret = mutex_lock_interruptible(&file_priv->event_read_lock);
 577        if (ret)
 578                return ret;
 579
 580        for (;;) {
 581                struct drm_pending_event *e = NULL;
 582
 583                spin_lock_irq(&dev->event_lock);
 584                if (!list_empty(&file_priv->event_list)) {
 585                        e = list_first_entry(&file_priv->event_list,
 586                                        struct drm_pending_event, link);
 587                        file_priv->event_space += e->event->length;
 588                        list_del(&e->link);
 589                }
 590                spin_unlock_irq(&dev->event_lock);
 591
 592                if (e == NULL) {
 593                        if (ret)
 594                                break;
 595
 596                        if (filp->f_flags & O_NONBLOCK) {
 597                                ret = -EAGAIN;
 598                                break;
 599                        }
 600
 601                        mutex_unlock(&file_priv->event_read_lock);
 602                        ret = wait_event_interruptible(file_priv->event_wait,
 603                                                       !list_empty(&file_priv->event_list));
 604                        if (ret >= 0)
 605                                ret = mutex_lock_interruptible(&file_priv->event_read_lock);
 606                        if (ret)
 607                                return ret;
 608                } else {
 609                        unsigned length = e->event->length;
 610
 611                        if (length > count - ret) {
 612put_back_event:
 613                                spin_lock_irq(&dev->event_lock);
 614                                file_priv->event_space -= length;
 615                                list_add(&e->link, &file_priv->event_list);
 616                                spin_unlock_irq(&dev->event_lock);
 617                                wake_up_interruptible_poll(&file_priv->event_wait,
 618                                        EPOLLIN | EPOLLRDNORM);
 619                                break;
 620                        }
 621
 622                        if (copy_to_user(buffer + ret, e->event, length)) {
 623                                if (ret == 0)
 624                                        ret = -EFAULT;
 625                                goto put_back_event;
 626                        }
 627
 628                        ret += length;
 629                        kfree(e);
 630                }
 631        }
 632        mutex_unlock(&file_priv->event_read_lock);
 633
 634        return ret;
 635}
 636EXPORT_SYMBOL(drm_read);
 637
 638/**
 639 * drm_poll - poll method for DRM file
 640 * @filp: file pointer
 641 * @wait: poll waiter table
 642 *
 643 * This function must be used by drivers as their &file_operations.read method
 644 * if they use DRM events for asynchronous signalling to userspace.  Since
 645 * events are used by the KMS API for vblank and page flip completion this means
 646 * all modern display drivers must use it.
 647 *
 648 * See also drm_read().
 649 *
 650 * RETURNS:
 651 *
 652 * Mask of POLL flags indicating the current status of the file.
 653 */
 654__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
 655{
 656        struct drm_file *file_priv = filp->private_data;
 657        __poll_t mask = 0;
 658
 659        poll_wait(filp, &file_priv->event_wait, wait);
 660
 661        if (!list_empty(&file_priv->event_list))
 662                mask |= EPOLLIN | EPOLLRDNORM;
 663
 664        return mask;
 665}
 666EXPORT_SYMBOL(drm_poll);
 667
 668/**
 669 * drm_event_reserve_init_locked - init a DRM event and reserve space for it
 670 * @dev: DRM device
 671 * @file_priv: DRM file private data
 672 * @p: tracking structure for the pending event
 673 * @e: actual event data to deliver to userspace
 674 *
 675 * This function prepares the passed in event for eventual delivery. If the event
 676 * doesn't get delivered (because the IOCTL fails later on, before queuing up
 677 * anything) then the even must be cancelled and freed using
 678 * drm_event_cancel_free(). Successfully initialized events should be sent out
 679 * using drm_send_event() or drm_send_event_locked() to signal completion of the
 680 * asynchronous event to userspace.
 681 *
 682 * If callers embedded @p into a larger structure it must be allocated with
 683 * kmalloc and @p must be the first member element.
 684 *
 685 * This is the locked version of drm_event_reserve_init() for callers which
 686 * already hold &drm_device.event_lock.
 687 *
 688 * RETURNS:
 689 *
 690 * 0 on success or a negative error code on failure.
 691 */
 692int drm_event_reserve_init_locked(struct drm_device *dev,
 693                                  struct drm_file *file_priv,
 694                                  struct drm_pending_event *p,
 695                                  struct drm_event *e)
 696{
 697        if (file_priv->event_space < e->length)
 698                return -ENOMEM;
 699
 700        file_priv->event_space -= e->length;
 701
 702        p->event = e;
 703        list_add(&p->pending_link, &file_priv->pending_event_list);
 704        p->file_priv = file_priv;
 705
 706        return 0;
 707}
 708EXPORT_SYMBOL(drm_event_reserve_init_locked);
 709
 710/**
 711 * drm_event_reserve_init - init a DRM event and reserve space for it
 712 * @dev: DRM device
 713 * @file_priv: DRM file private data
 714 * @p: tracking structure for the pending event
 715 * @e: actual event data to deliver to userspace
 716 *
 717 * This function prepares the passed in event for eventual delivery. If the event
 718 * doesn't get delivered (because the IOCTL fails later on, before queuing up
 719 * anything) then the even must be cancelled and freed using
 720 * drm_event_cancel_free(). Successfully initialized events should be sent out
 721 * using drm_send_event() or drm_send_event_locked() to signal completion of the
 722 * asynchronous event to userspace.
 723 *
 724 * If callers embedded @p into a larger structure it must be allocated with
 725 * kmalloc and @p must be the first member element.
 726 *
 727 * Callers which already hold &drm_device.event_lock should use
 728 * drm_event_reserve_init_locked() instead.
 729 *
 730 * RETURNS:
 731 *
 732 * 0 on success or a negative error code on failure.
 733 */
 734int drm_event_reserve_init(struct drm_device *dev,
 735                           struct drm_file *file_priv,
 736                           struct drm_pending_event *p,
 737                           struct drm_event *e)
 738{
 739        unsigned long flags;
 740        int ret;
 741
 742        spin_lock_irqsave(&dev->event_lock, flags);
 743        ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
 744        spin_unlock_irqrestore(&dev->event_lock, flags);
 745
 746        return ret;
 747}
 748EXPORT_SYMBOL(drm_event_reserve_init);
 749
 750/**
 751 * drm_event_cancel_free - free a DRM event and release its space
 752 * @dev: DRM device
 753 * @p: tracking structure for the pending event
 754 *
 755 * This function frees the event @p initialized with drm_event_reserve_init()
 756 * and releases any allocated space. It is used to cancel an event when the
 757 * nonblocking operation could not be submitted and needed to be aborted.
 758 */
 759void drm_event_cancel_free(struct drm_device *dev,
 760                           struct drm_pending_event *p)
 761{
 762        unsigned long flags;
 763
 764        spin_lock_irqsave(&dev->event_lock, flags);
 765        if (p->file_priv) {
 766                p->file_priv->event_space += p->event->length;
 767                list_del(&p->pending_link);
 768        }
 769        spin_unlock_irqrestore(&dev->event_lock, flags);
 770
 771        if (p->fence)
 772                dma_fence_put(p->fence);
 773
 774        kfree(p);
 775}
 776EXPORT_SYMBOL(drm_event_cancel_free);
 777
 778static void drm_send_event_helper(struct drm_device *dev,
 779                           struct drm_pending_event *e, ktime_t timestamp)
 780{
 781        assert_spin_locked(&dev->event_lock);
 782
 783        if (e->completion) {
 784                complete_all(e->completion);
 785                e->completion_release(e->completion);
 786                e->completion = NULL;
 787        }
 788
 789        if (e->fence) {
 790                if (timestamp)
 791                        dma_fence_signal_timestamp(e->fence, timestamp);
 792                else
 793                        dma_fence_signal(e->fence);
 794                dma_fence_put(e->fence);
 795        }
 796
 797        if (!e->file_priv) {
 798                kfree(e);
 799                return;
 800        }
 801
 802        list_del(&e->pending_link);
 803        list_add_tail(&e->link,
 804                      &e->file_priv->event_list);
 805        wake_up_interruptible_poll(&e->file_priv->event_wait,
 806                EPOLLIN | EPOLLRDNORM);
 807}
 808
 809/**
 810 * drm_send_event_timestamp_locked - send DRM event to file descriptor
 811 * @dev: DRM device
 812 * @e: DRM event to deliver
 813 * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
 814 * time domain
 815 *
 816 * This function sends the event @e, initialized with drm_event_reserve_init(),
 817 * to its associated userspace DRM file. Callers must already hold
 818 * &drm_device.event_lock.
 819 *
 820 * Note that the core will take care of unlinking and disarming events when the
 821 * corresponding DRM file is closed. Drivers need not worry about whether the
 822 * DRM file for this event still exists and can call this function upon
 823 * completion of the asynchronous work unconditionally.
 824 */
 825void drm_send_event_timestamp_locked(struct drm_device *dev,
 826                                     struct drm_pending_event *e, ktime_t timestamp)
 827{
 828        drm_send_event_helper(dev, e, timestamp);
 829}
 830EXPORT_SYMBOL(drm_send_event_timestamp_locked);
 831
 832/**
 833 * drm_send_event_locked - send DRM event to file descriptor
 834 * @dev: DRM device
 835 * @e: DRM event to deliver
 836 *
 837 * This function sends the event @e, initialized with drm_event_reserve_init(),
 838 * to its associated userspace DRM file. Callers must already hold
 839 * &drm_device.event_lock, see drm_send_event() for the unlocked version.
 840 *
 841 * Note that the core will take care of unlinking and disarming events when the
 842 * corresponding DRM file is closed. Drivers need not worry about whether the
 843 * DRM file for this event still exists and can call this function upon
 844 * completion of the asynchronous work unconditionally.
 845 */
 846void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
 847{
 848        drm_send_event_helper(dev, e, 0);
 849}
 850EXPORT_SYMBOL(drm_send_event_locked);
 851
 852/**
 853 * drm_send_event - send DRM event to file descriptor
 854 * @dev: DRM device
 855 * @e: DRM event to deliver
 856 *
 857 * This function sends the event @e, initialized with drm_event_reserve_init(),
 858 * to its associated userspace DRM file. This function acquires
 859 * &drm_device.event_lock, see drm_send_event_locked() for callers which already
 860 * hold this lock.
 861 *
 862 * Note that the core will take care of unlinking and disarming events when the
 863 * corresponding DRM file is closed. Drivers need not worry about whether the
 864 * DRM file for this event still exists and can call this function upon
 865 * completion of the asynchronous work unconditionally.
 866 */
 867void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
 868{
 869        unsigned long irqflags;
 870
 871        spin_lock_irqsave(&dev->event_lock, irqflags);
 872        drm_send_event_helper(dev, e, 0);
 873        spin_unlock_irqrestore(&dev->event_lock, irqflags);
 874}
 875EXPORT_SYMBOL(drm_send_event);
 876
 877/**
 878 * mock_drm_getfile - Create a new struct file for the drm device
 879 * @minor: drm minor to wrap (e.g. #drm_device.primary)
 880 * @flags: file creation mode (O_RDWR etc)
 881 *
 882 * This create a new struct file that wraps a DRM file context around a
 883 * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
 884 * invoking userspace. The struct file may be operated on using its f_op
 885 * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
 886 * to userspace facing functions as an internal/anonymous client.
 887 *
 888 * RETURNS:
 889 * Pointer to newly created struct file, ERR_PTR on failure.
 890 */
 891struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
 892{
 893        struct drm_device *dev = minor->dev;
 894        struct drm_file *priv;
 895        struct file *file;
 896
 897        priv = drm_file_alloc(minor);
 898        if (IS_ERR(priv))
 899                return ERR_CAST(priv);
 900
 901        file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
 902        if (IS_ERR(file)) {
 903                drm_file_free(priv);
 904                return file;
 905        }
 906
 907        /* Everyone shares a single global address space */
 908        file->f_mapping = dev->anon_inode->i_mapping;
 909
 910        drm_dev_get(dev);
 911        priv->filp = file;
 912
 913        return file;
 914}
 915EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
 916
 917#ifdef CONFIG_MMU
 918#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 919/*
 920 * drm_addr_inflate() attempts to construct an aligned area by inflating
 921 * the area size and skipping the unaligned start of the area.
 922 * adapted from shmem_get_unmapped_area()
 923 */
 924static unsigned long drm_addr_inflate(unsigned long addr,
 925                                      unsigned long len,
 926                                      unsigned long pgoff,
 927                                      unsigned long flags,
 928                                      unsigned long huge_size)
 929{
 930        unsigned long offset, inflated_len;
 931        unsigned long inflated_addr;
 932        unsigned long inflated_offset;
 933
 934        offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
 935        if (offset && offset + len < 2 * huge_size)
 936                return addr;
 937        if ((addr & (huge_size - 1)) == offset)
 938                return addr;
 939
 940        inflated_len = len + huge_size - PAGE_SIZE;
 941        if (inflated_len > TASK_SIZE)
 942                return addr;
 943        if (inflated_len < len)
 944                return addr;
 945
 946        inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
 947                                                       0, flags);
 948        if (IS_ERR_VALUE(inflated_addr))
 949                return addr;
 950        if (inflated_addr & ~PAGE_MASK)
 951                return addr;
 952
 953        inflated_offset = inflated_addr & (huge_size - 1);
 954        inflated_addr += offset - inflated_offset;
 955        if (inflated_offset > offset)
 956                inflated_addr += huge_size;
 957
 958        if (inflated_addr > TASK_SIZE - len)
 959                return addr;
 960
 961        return inflated_addr;
 962}
 963
 964/**
 965 * drm_get_unmapped_area() - Get an unused user-space virtual memory area
 966 * suitable for huge page table entries.
 967 * @file: The struct file representing the address space being mmap()'d.
 968 * @uaddr: Start address suggested by user-space.
 969 * @len: Length of the area.
 970 * @pgoff: The page offset into the address space.
 971 * @flags: mmap flags
 972 * @mgr: The address space manager used by the drm driver. This argument can
 973 * probably be removed at some point when all drivers use the same
 974 * address space manager.
 975 *
 976 * This function attempts to find an unused user-space virtual memory area
 977 * that can accommodate the size we want to map, and that is properly
 978 * aligned to facilitate huge page table entries matching actual
 979 * huge pages or huge page aligned memory in buffer objects. Buffer objects
 980 * are assumed to start at huge page boundary pfns (io memory) or be
 981 * populated by huge pages aligned to the start of the buffer object
 982 * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
 983 *
 984 * Return: aligned user-space address.
 985 */
 986unsigned long drm_get_unmapped_area(struct file *file,
 987                                    unsigned long uaddr, unsigned long len,
 988                                    unsigned long pgoff, unsigned long flags,
 989                                    struct drm_vma_offset_manager *mgr)
 990{
 991        unsigned long addr;
 992        unsigned long inflated_addr;
 993        struct drm_vma_offset_node *node;
 994
 995        if (len > TASK_SIZE)
 996                return -ENOMEM;
 997
 998        /*
 999         * @pgoff is the file page-offset the huge page boundaries of
1000         * which typically aligns to physical address huge page boundaries.
1001         * That's not true for DRM, however, where physical address huge
1002         * page boundaries instead are aligned with the offset from
1003         * buffer object start. So adjust @pgoff to be the offset from
1004         * buffer object start.
1005         */
1006        drm_vma_offset_lock_lookup(mgr);
1007        node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
1008        if (node)
1009                pgoff -= node->vm_node.start;
1010        drm_vma_offset_unlock_lookup(mgr);
1011
1012        addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1013        if (IS_ERR_VALUE(addr))
1014                return addr;
1015        if (addr & ~PAGE_MASK)
1016                return addr;
1017        if (addr > TASK_SIZE - len)
1018                return addr;
1019
1020        if (len < HPAGE_PMD_SIZE)
1021                return addr;
1022        if (flags & MAP_FIXED)
1023                return addr;
1024        /*
1025         * Our priority is to support MAP_SHARED mapped hugely;
1026         * and support MAP_PRIVATE mapped hugely too, until it is COWed.
1027         * But if caller specified an address hint, respect that as before.
1028         */
1029        if (uaddr)
1030                return addr;
1031
1032        inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1033                                         HPAGE_PMD_SIZE);
1034
1035        if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1036            len >= HPAGE_PUD_SIZE)
1037                inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1038                                                 flags, HPAGE_PUD_SIZE);
1039        return inflated_addr;
1040}
1041#else /* CONFIG_TRANSPARENT_HUGEPAGE */
1042unsigned long drm_get_unmapped_area(struct file *file,
1043                                    unsigned long uaddr, unsigned long len,
1044                                    unsigned long pgoff, unsigned long flags,
1045                                    struct drm_vma_offset_manager *mgr)
1046{
1047        return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1048}
1049#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1050EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1051#endif /* CONFIG_MMU */
1052