linux/drivers/gpu/drm/drm_fops.c
<<
>>
Prefs
   1/**
   2 * \file drm_fops.c
   3 * File operations for DRM
   4 *
   5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
   6 * \author Daryll Strauss <daryll@valinux.com>
   7 * \author Gareth Hughes <gareth@valinux.com>
   8 */
   9
  10/*
  11 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
  12 *
  13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  15 * All Rights Reserved.
  16 *
  17 * Permission is hereby granted, free of charge, to any person obtaining a
  18 * copy of this software and associated documentation files (the "Software"),
  19 * to deal in the Software without restriction, including without limitation
  20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  21 * and/or sell copies of the Software, and to permit persons to whom the
  22 * Software is furnished to do so, subject to the following conditions:
  23 *
  24 * The above copyright notice and this permission notice (including the next
  25 * paragraph) shall be included in all copies or substantial portions of the
  26 * Software.
  27 *
  28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  34 * OTHER DEALINGS IN THE SOFTWARE.
  35 */
  36
  37#include "drmP.h"
  38#include <linux/poll.h>
  39#include <linux/slab.h>
  40
  41/* from BKL pushdown: note that nothing else serializes idr_find() */
  42DEFINE_MUTEX(drm_global_mutex);
  43EXPORT_SYMBOL(drm_global_mutex);
  44
  45static int drm_open_helper(struct inode *inode, struct file *filp,
  46                           struct drm_device * dev);
  47
  48static int drm_setup(struct drm_device * dev)
  49{
  50        int i;
  51        int ret;
  52
  53        if (dev->driver->firstopen) {
  54                ret = dev->driver->firstopen(dev);
  55                if (ret != 0)
  56                        return ret;
  57        }
  58
  59        atomic_set(&dev->ioctl_count, 0);
  60        atomic_set(&dev->vma_count, 0);
  61
  62        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
  63            !drm_core_check_feature(dev, DRIVER_MODESET)) {
  64                dev->buf_use = 0;
  65                atomic_set(&dev->buf_alloc, 0);
  66
  67                i = drm_dma_setup(dev);
  68                if (i < 0)
  69                        return i;
  70        }
  71
  72        for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
  73                atomic_set(&dev->counts[i], 0);
  74
  75        dev->sigdata.lock = NULL;
  76
  77        dev->queue_count = 0;
  78        dev->queue_reserved = 0;
  79        dev->queue_slots = 0;
  80        dev->queuelist = NULL;
  81        dev->context_flag = 0;
  82        dev->interrupt_flag = 0;
  83        dev->dma_flag = 0;
  84        dev->last_context = 0;
  85        dev->last_switch = 0;
  86        dev->last_checked = 0;
  87        init_waitqueue_head(&dev->context_wait);
  88        dev->if_version = 0;
  89
  90        dev->ctx_start = 0;
  91        dev->lck_start = 0;
  92
  93        dev->buf_async = NULL;
  94        init_waitqueue_head(&dev->buf_readers);
  95        init_waitqueue_head(&dev->buf_writers);
  96
  97        DRM_DEBUG("\n");
  98
  99        /*
 100         * The kernel's context could be created here, but is now created
 101         * in drm_dma_enqueue.  This is more resource-efficient for
 102         * hardware that does not do DMA, but may mean that
 103         * drm_select_queue fails between the time the interrupt is
 104         * initialized and the time the queues are initialized.
 105         */
 106
 107        return 0;
 108}
 109
 110/**
 111 * Open file.
 112 *
 113 * \param inode device inode
 114 * \param filp file pointer.
 115 * \return zero on success or a negative number on failure.
 116 *
 117 * Searches the DRM device with the same minor number, calls open_helper(), and
 118 * increments the device open count. If the open count was previous at zero,
 119 * i.e., it's the first that the device is open, then calls setup().
 120 */
 121int drm_open(struct inode *inode, struct file *filp)
 122{
 123        struct drm_device *dev = NULL;
 124        int minor_id = iminor(inode);
 125        struct drm_minor *minor;
 126        int retcode = 0;
 127
 128        minor = idr_find(&drm_minors_idr, minor_id);
 129        if (!minor)
 130                return -ENODEV;
 131
 132        if (!(dev = minor->dev))
 133                return -ENODEV;
 134
 135        retcode = drm_open_helper(inode, filp, dev);
 136        if (!retcode) {
 137                atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
 138                if (!dev->open_count++)
 139                        retcode = drm_setup(dev);
 140        }
 141        if (!retcode) {
 142                mutex_lock(&dev->struct_mutex);
 143                if (minor->type == DRM_MINOR_LEGACY) {
 144                        if (dev->dev_mapping == NULL)
 145                                dev->dev_mapping = inode->i_mapping;
 146                        else if (dev->dev_mapping != inode->i_mapping)
 147                                retcode = -ENODEV;
 148                }
 149                mutex_unlock(&dev->struct_mutex);
 150        }
 151
 152        return retcode;
 153}
 154EXPORT_SYMBOL(drm_open);
 155
 156/**
 157 * File \c open operation.
 158 *
 159 * \param inode device inode.
 160 * \param filp file pointer.
 161 *
 162 * Puts the dev->fops corresponding to the device minor number into
 163 * \p filp, call the \c open method, and restore the file operations.
 164 */
 165int drm_stub_open(struct inode *inode, struct file *filp)
 166{
 167        struct drm_device *dev = NULL;
 168        struct drm_minor *minor;
 169        int minor_id = iminor(inode);
 170        int err = -ENODEV;
 171        const struct file_operations *old_fops;
 172
 173        DRM_DEBUG("\n");
 174
 175        mutex_lock(&drm_global_mutex);
 176        minor = idr_find(&drm_minors_idr, minor_id);
 177        if (!minor)
 178                goto out;
 179
 180        if (!(dev = minor->dev))
 181                goto out;
 182
 183        old_fops = filp->f_op;
 184        filp->f_op = fops_get(&dev->driver->fops);
 185        if (filp->f_op == NULL) {
 186                filp->f_op = old_fops;
 187                goto out;
 188        }
 189        if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
 190                fops_put(filp->f_op);
 191                filp->f_op = fops_get(old_fops);
 192        }
 193        fops_put(old_fops);
 194
 195out:
 196        mutex_unlock(&drm_global_mutex);
 197        return err;
 198}
 199
 200/**
 201 * Check whether DRI will run on this CPU.
 202 *
 203 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
 204 */
 205static int drm_cpu_valid(void)
 206{
 207#if defined(__i386__)
 208        if (boot_cpu_data.x86 == 3)
 209                return 0;       /* No cmpxchg on a 386 */
 210#endif
 211#if defined(__sparc__) && !defined(__sparc_v9__)
 212        return 0;               /* No cmpxchg before v9 sparc. */
 213#endif
 214        return 1;
 215}
 216
 217/**
 218 * Called whenever a process opens /dev/drm.
 219 *
 220 * \param inode device inode.
 221 * \param filp file pointer.
 222 * \param dev device.
 223 * \return zero on success or a negative number on failure.
 224 *
 225 * Creates and initializes a drm_file structure for the file private data in \p
 226 * filp and add it into the double linked list in \p dev.
 227 */
 228static int drm_open_helper(struct inode *inode, struct file *filp,
 229                           struct drm_device * dev)
 230{
 231        int minor_id = iminor(inode);
 232        struct drm_file *priv;
 233        int ret;
 234
 235        if (filp->f_flags & O_EXCL)
 236                return -EBUSY;  /* No exclusive opens */
 237        if (!drm_cpu_valid())
 238                return -EINVAL;
 239        if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
 240                return -EINVAL;
 241
 242        DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
 243
 244        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 245        if (!priv)
 246                return -ENOMEM;
 247
 248        filp->private_data = priv;
 249        priv->filp = filp;
 250        priv->uid = current_euid();
 251        priv->pid = task_pid_nr(current);
 252        priv->minor = idr_find(&drm_minors_idr, minor_id);
 253        priv->ioctl_count = 0;
 254        /* for compatibility root is always authenticated */
 255        priv->authenticated = capable(CAP_SYS_ADMIN);
 256        priv->lock_count = 0;
 257
 258        INIT_LIST_HEAD(&priv->lhead);
 259        INIT_LIST_HEAD(&priv->fbs);
 260        INIT_LIST_HEAD(&priv->event_list);
 261        init_waitqueue_head(&priv->event_wait);
 262        priv->event_space = 4096; /* set aside 4k for event buffer */
 263
 264        if (dev->driver->driver_features & DRIVER_GEM)
 265                drm_gem_open(dev, priv);
 266
 267        if (dev->driver->open) {
 268                ret = dev->driver->open(dev, priv);
 269                if (ret < 0)
 270                        goto out_free;
 271        }
 272
 273
 274        /* if there is no current master make this fd it */
 275        mutex_lock(&dev->struct_mutex);
 276        if (!priv->minor->master) {
 277                /* create a new master */
 278                priv->minor->master = drm_master_create(priv->minor);
 279                if (!priv->minor->master) {
 280                        mutex_unlock(&dev->struct_mutex);
 281                        ret = -ENOMEM;
 282                        goto out_free;
 283                }
 284
 285                priv->is_master = 1;
 286                /* take another reference for the copy in the local file priv */
 287                priv->master = drm_master_get(priv->minor->master);
 288
 289                priv->authenticated = 1;
 290
 291                mutex_unlock(&dev->struct_mutex);
 292                if (dev->driver->master_create) {
 293                        ret = dev->driver->master_create(dev, priv->master);
 294                        if (ret) {
 295                                mutex_lock(&dev->struct_mutex);
 296                                /* drop both references if this fails */
 297                                drm_master_put(&priv->minor->master);
 298                                drm_master_put(&priv->master);
 299                                mutex_unlock(&dev->struct_mutex);
 300                                goto out_free;
 301                        }
 302                }
 303                mutex_lock(&dev->struct_mutex);
 304                if (dev->driver->master_set) {
 305                        ret = dev->driver->master_set(dev, priv, true);
 306                        if (ret) {
 307                                /* drop both references if this fails */
 308                                drm_master_put(&priv->minor->master);
 309                                drm_master_put(&priv->master);
 310                                mutex_unlock(&dev->struct_mutex);
 311                                goto out_free;
 312                        }
 313                }
 314                mutex_unlock(&dev->struct_mutex);
 315        } else {
 316                /* get a reference to the master */
 317                priv->master = drm_master_get(priv->minor->master);
 318                mutex_unlock(&dev->struct_mutex);
 319        }
 320
 321        mutex_lock(&dev->struct_mutex);
 322        list_add(&priv->lhead, &dev->filelist);
 323        mutex_unlock(&dev->struct_mutex);
 324
 325#ifdef __alpha__
 326        /*
 327         * Default the hose
 328         */
 329        if (!dev->hose) {
 330                struct pci_dev *pci_dev;
 331                pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
 332                if (pci_dev) {
 333                        dev->hose = pci_dev->sysdata;
 334                        pci_dev_put(pci_dev);
 335                }
 336                if (!dev->hose) {
 337                        struct pci_bus *b = pci_bus_b(pci_root_buses.next);
 338                        if (b)
 339                                dev->hose = b->sysdata;
 340                }
 341        }
 342#endif
 343
 344        return 0;
 345      out_free:
 346        kfree(priv);
 347        filp->private_data = NULL;
 348        return ret;
 349}
 350
 351/** No-op. */
 352int drm_fasync(int fd, struct file *filp, int on)
 353{
 354        struct drm_file *priv = filp->private_data;
 355        struct drm_device *dev = priv->minor->dev;
 356
 357        DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
 358                  (long)old_encode_dev(priv->minor->device));
 359        return fasync_helper(fd, filp, on, &dev->buf_async);
 360}
 361EXPORT_SYMBOL(drm_fasync);
 362
 363/*
 364 * Reclaim locked buffers; note that this may be a bad idea if the current
 365 * context doesn't have the hw lock...
 366 */
 367static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
 368{
 369        struct drm_file *file_priv = f->private_data;
 370
 371        if (drm_i_have_hw_lock(dev, file_priv)) {
 372                dev->driver->reclaim_buffers_locked(dev, file_priv);
 373        } else {
 374                unsigned long _end = jiffies + 3 * DRM_HZ;
 375                int locked = 0;
 376
 377                drm_idlelock_take(&file_priv->master->lock);
 378
 379                /*
 380                 * Wait for a while.
 381                 */
 382                do {
 383                        spin_lock_bh(&file_priv->master->lock.spinlock);
 384                        locked = file_priv->master->lock.idle_has_lock;
 385                        spin_unlock_bh(&file_priv->master->lock.spinlock);
 386                        if (locked)
 387                                break;
 388                        schedule();
 389                } while (!time_after_eq(jiffies, _end));
 390
 391                if (!locked) {
 392                        DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
 393                                  "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
 394                                  "\tI will go on reclaiming the buffers anyway.\n");
 395                }
 396
 397                dev->driver->reclaim_buffers_locked(dev, file_priv);
 398                drm_idlelock_release(&file_priv->master->lock);
 399        }
 400}
 401
 402static void drm_master_release(struct drm_device *dev, struct file *filp)
 403{
 404        struct drm_file *file_priv = filp->private_data;
 405
 406        if (dev->driver->reclaim_buffers_locked &&
 407            file_priv->master->lock.hw_lock)
 408                drm_reclaim_locked_buffers(dev, filp);
 409
 410        if (dev->driver->reclaim_buffers_idlelocked &&
 411            file_priv->master->lock.hw_lock) {
 412                drm_idlelock_take(&file_priv->master->lock);
 413                dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
 414                drm_idlelock_release(&file_priv->master->lock);
 415        }
 416
 417
 418        if (drm_i_have_hw_lock(dev, file_priv)) {
 419                DRM_DEBUG("File %p released, freeing lock for context %d\n",
 420                          filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 421                drm_lock_free(&file_priv->master->lock,
 422                              _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 423        }
 424
 425        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
 426            !dev->driver->reclaim_buffers_locked) {
 427                dev->driver->reclaim_buffers(dev, file_priv);
 428        }
 429}
 430
 431static void drm_events_release(struct drm_file *file_priv)
 432{
 433        struct drm_device *dev = file_priv->minor->dev;
 434        struct drm_pending_event *e, *et;
 435        struct drm_pending_vblank_event *v, *vt;
 436        unsigned long flags;
 437
 438        spin_lock_irqsave(&dev->event_lock, flags);
 439
 440        /* Remove pending flips */
 441        list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
 442                if (v->base.file_priv == file_priv) {
 443                        list_del(&v->base.link);
 444                        drm_vblank_put(dev, v->pipe);
 445                        v->base.destroy(&v->base);
 446                }
 447
 448        /* Remove unconsumed events */
 449        list_for_each_entry_safe(e, et, &file_priv->event_list, link)
 450                e->destroy(e);
 451
 452        spin_unlock_irqrestore(&dev->event_lock, flags);
 453}
 454
 455/**
 456 * Release file.
 457 *
 458 * \param inode device inode
 459 * \param file_priv DRM file private.
 460 * \return zero on success or a negative number on failure.
 461 *
 462 * If the hardware lock is held then free it, and take it again for the kernel
 463 * context since it's necessary to reclaim buffers. Unlink the file private
 464 * data from its list and free it. Decreases the open count and if it reaches
 465 * zero calls drm_lastclose().
 466 */
 467int drm_release(struct inode *inode, struct file *filp)
 468{
 469        struct drm_file *file_priv = filp->private_data;
 470        struct drm_device *dev = file_priv->minor->dev;
 471        int retcode = 0;
 472
 473        mutex_lock(&drm_global_mutex);
 474
 475        DRM_DEBUG("open_count = %d\n", dev->open_count);
 476
 477        if (dev->driver->preclose)
 478                dev->driver->preclose(dev, file_priv);
 479
 480        /* ========================================================
 481         * Begin inline drm_release
 482         */
 483
 484        DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
 485                  task_pid_nr(current),
 486                  (long)old_encode_dev(file_priv->minor->device),
 487                  dev->open_count);
 488
 489        /* if the master has gone away we can't do anything with the lock */
 490        if (file_priv->minor->master)
 491                drm_master_release(dev, filp);
 492
 493        drm_events_release(file_priv);
 494
 495        if (dev->driver->driver_features & DRIVER_GEM)
 496                drm_gem_release(dev, file_priv);
 497
 498        if (dev->driver->driver_features & DRIVER_MODESET)
 499                drm_fb_release(file_priv);
 500
 501        mutex_lock(&dev->ctxlist_mutex);
 502        if (!list_empty(&dev->ctxlist)) {
 503                struct drm_ctx_list *pos, *n;
 504
 505                list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
 506                        if (pos->tag == file_priv &&
 507                            pos->handle != DRM_KERNEL_CONTEXT) {
 508                                if (dev->driver->context_dtor)
 509                                        dev->driver->context_dtor(dev,
 510                                                                  pos->handle);
 511
 512                                drm_ctxbitmap_free(dev, pos->handle);
 513
 514                                list_del(&pos->head);
 515                                kfree(pos);
 516                                --dev->ctx_count;
 517                        }
 518                }
 519        }
 520        mutex_unlock(&dev->ctxlist_mutex);
 521
 522        mutex_lock(&dev->struct_mutex);
 523
 524        if (file_priv->is_master) {
 525                struct drm_master *master = file_priv->master;
 526                struct drm_file *temp;
 527                list_for_each_entry(temp, &dev->filelist, lhead) {
 528                        if ((temp->master == file_priv->master) &&
 529                            (temp != file_priv))
 530                                temp->authenticated = 0;
 531                }
 532
 533                /**
 534                 * Since the master is disappearing, so is the
 535                 * possibility to lock.
 536                 */
 537
 538                if (master->lock.hw_lock) {
 539                        if (dev->sigdata.lock == master->lock.hw_lock)
 540                                dev->sigdata.lock = NULL;
 541                        master->lock.hw_lock = NULL;
 542                        master->lock.file_priv = NULL;
 543                        wake_up_interruptible_all(&master->lock.lock_queue);
 544                }
 545
 546                if (file_priv->minor->master == file_priv->master) {
 547                        /* drop the reference held my the minor */
 548                        if (dev->driver->master_drop)
 549                                dev->driver->master_drop(dev, file_priv, true);
 550                        drm_master_put(&file_priv->minor->master);
 551                }
 552        }
 553
 554        /* drop the reference held my the file priv */
 555        drm_master_put(&file_priv->master);
 556        file_priv->is_master = 0;
 557        list_del(&file_priv->lhead);
 558        mutex_unlock(&dev->struct_mutex);
 559
 560        if (dev->driver->postclose)
 561                dev->driver->postclose(dev, file_priv);
 562        kfree(file_priv);
 563
 564        /* ========================================================
 565         * End inline drm_release
 566         */
 567
 568        atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
 569        if (!--dev->open_count) {
 570                if (atomic_read(&dev->ioctl_count)) {
 571                        DRM_ERROR("Device busy: %d\n",
 572                                  atomic_read(&dev->ioctl_count));
 573                        retcode = -EBUSY;
 574                } else
 575                        retcode = drm_lastclose(dev);
 576        }
 577        mutex_unlock(&drm_global_mutex);
 578
 579        return retcode;
 580}
 581EXPORT_SYMBOL(drm_release);
 582
 583static bool
 584drm_dequeue_event(struct drm_file *file_priv,
 585                  size_t total, size_t max, struct drm_pending_event **out)
 586{
 587        struct drm_device *dev = file_priv->minor->dev;
 588        struct drm_pending_event *e;
 589        unsigned long flags;
 590        bool ret = false;
 591
 592        spin_lock_irqsave(&dev->event_lock, flags);
 593
 594        *out = NULL;
 595        if (list_empty(&file_priv->event_list))
 596                goto out;
 597        e = list_first_entry(&file_priv->event_list,
 598                             struct drm_pending_event, link);
 599        if (e->event->length + total > max)
 600                goto out;
 601
 602        file_priv->event_space += e->event->length;
 603        list_del(&e->link);
 604        *out = e;
 605        ret = true;
 606
 607out:
 608        spin_unlock_irqrestore(&dev->event_lock, flags);
 609        return ret;
 610}
 611
 612ssize_t drm_read(struct file *filp, char __user *buffer,
 613                 size_t count, loff_t *offset)
 614{
 615        struct drm_file *file_priv = filp->private_data;
 616        struct drm_pending_event *e;
 617        size_t total;
 618        ssize_t ret;
 619
 620        ret = wait_event_interruptible(file_priv->event_wait,
 621                                       !list_empty(&file_priv->event_list));
 622        if (ret < 0)
 623                return ret;
 624
 625        total = 0;
 626        while (drm_dequeue_event(file_priv, total, count, &e)) {
 627                if (copy_to_user(buffer + total,
 628                                 e->event, e->event->length)) {
 629                        total = -EFAULT;
 630                        break;
 631                }
 632
 633                total += e->event->length;
 634                e->destroy(e);
 635        }
 636
 637        return total;
 638}
 639EXPORT_SYMBOL(drm_read);
 640
 641unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
 642{
 643        struct drm_file *file_priv = filp->private_data;
 644        unsigned int mask = 0;
 645
 646        poll_wait(filp, &file_priv->event_wait, wait);
 647
 648        if (!list_empty(&file_priv->event_list))
 649                mask |= POLLIN | POLLRDNORM;
 650
 651        return mask;
 652}
 653EXPORT_SYMBOL(drm_poll);
 654