linux/drivers/gpu/drm/msm/msm_drv.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include "msm_drv.h"
  19#include "msm_gpu.h"
  20
  21static void msm_fb_output_poll_changed(struct drm_device *dev)
  22{
  23        struct msm_drm_private *priv = dev->dev_private;
  24        if (priv->fbdev)
  25                drm_fb_helper_hotplug_event(priv->fbdev);
  26}
  27
  28static const struct drm_mode_config_funcs mode_config_funcs = {
  29        .fb_create = msm_framebuffer_create,
  30        .output_poll_changed = msm_fb_output_poll_changed,
  31};
  32
  33static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
  34                unsigned long iova, int flags, void *arg)
  35{
  36        DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
  37        return 0;
  38}
  39
  40int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
  41{
  42        struct msm_drm_private *priv = dev->dev_private;
  43        int idx = priv->num_iommus++;
  44
  45        if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
  46                return -EINVAL;
  47
  48        priv->iommus[idx] = iommu;
  49
  50        iommu_set_fault_handler(iommu, msm_fault_handler, dev);
  51
  52        /* need to iommu_attach_device() somewhere??  on resume?? */
  53
  54        return idx;
  55}
  56
  57int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
  58                const char **names, int cnt)
  59{
  60        int i, ret;
  61
  62        for (i = 0; i < cnt; i++) {
  63                /* TODO maybe some day msm iommu won't require this hack: */
  64                struct device *msm_iommu_get_ctx(const char *ctx_name);
  65                struct device *ctx = msm_iommu_get_ctx(names[i]);
  66                if (!ctx)
  67                        continue;
  68                ret = iommu_attach_device(iommu, ctx);
  69                if (ret) {
  70                        dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
  71                        return ret;
  72                }
  73        }
  74        return 0;
  75}
  76
  77#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  78static bool reglog = false;
  79MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  80module_param(reglog, bool, 0600);
  81#else
  82#define reglog 0
  83#endif
  84
  85void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  86                const char *dbgname)
  87{
  88        struct resource *res;
  89        unsigned long size;
  90        void __iomem *ptr;
  91
  92        if (name)
  93                res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  94        else
  95                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  96
  97        if (!res) {
  98                dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  99                return ERR_PTR(-EINVAL);
 100        }
 101
 102        size = resource_size(res);
 103
 104        ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
 105        if (!ptr) {
 106                dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
 107                return ERR_PTR(-ENOMEM);
 108        }
 109
 110        if (reglog)
 111                printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
 112
 113        return ptr;
 114}
 115
 116void msm_writel(u32 data, void __iomem *addr)
 117{
 118        if (reglog)
 119                printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
 120        writel(data, addr);
 121}
 122
 123u32 msm_readl(const void __iomem *addr)
 124{
 125        u32 val = readl(addr);
 126        if (reglog)
 127                printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
 128        return val;
 129}
 130
 131/*
 132 * DRM operations:
 133 */
 134
 135static int msm_unload(struct drm_device *dev)
 136{
 137        struct msm_drm_private *priv = dev->dev_private;
 138        struct msm_kms *kms = priv->kms;
 139        struct msm_gpu *gpu = priv->gpu;
 140
 141        drm_kms_helper_poll_fini(dev);
 142        drm_mode_config_cleanup(dev);
 143        drm_vblank_cleanup(dev);
 144
 145        pm_runtime_get_sync(dev->dev);
 146        drm_irq_uninstall(dev);
 147        pm_runtime_put_sync(dev->dev);
 148
 149        flush_workqueue(priv->wq);
 150        destroy_workqueue(priv->wq);
 151
 152        if (kms) {
 153                pm_runtime_disable(dev->dev);
 154                kms->funcs->destroy(kms);
 155        }
 156
 157        if (gpu) {
 158                mutex_lock(&dev->struct_mutex);
 159                gpu->funcs->pm_suspend(gpu);
 160                gpu->funcs->destroy(gpu);
 161                mutex_unlock(&dev->struct_mutex);
 162        }
 163
 164        dev->dev_private = NULL;
 165
 166        kfree(priv);
 167
 168        return 0;
 169}
 170
 171static int msm_load(struct drm_device *dev, unsigned long flags)
 172{
 173        struct platform_device *pdev = dev->platformdev;
 174        struct msm_drm_private *priv;
 175        struct msm_kms *kms;
 176        int ret;
 177
 178        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 179        if (!priv) {
 180                dev_err(dev->dev, "failed to allocate private data\n");
 181                return -ENOMEM;
 182        }
 183
 184        dev->dev_private = priv;
 185
 186        priv->wq = alloc_ordered_workqueue("msm", 0);
 187        init_waitqueue_head(&priv->fence_event);
 188
 189        INIT_LIST_HEAD(&priv->inactive_list);
 190        INIT_LIST_HEAD(&priv->fence_cbs);
 191
 192        drm_mode_config_init(dev);
 193
 194        kms = mdp4_kms_init(dev);
 195        if (IS_ERR(kms)) {
 196                /*
 197                 * NOTE: once we have GPU support, having no kms should not
 198                 * be considered fatal.. ideally we would still support gpu
 199                 * and (for example) use dmabuf/prime to share buffers with
 200                 * imx drm driver on iMX5
 201                 */
 202                dev_err(dev->dev, "failed to load kms\n");
 203                ret = PTR_ERR(kms);
 204                goto fail;
 205        }
 206
 207        priv->kms = kms;
 208
 209        if (kms) {
 210                pm_runtime_enable(dev->dev);
 211                ret = kms->funcs->hw_init(kms);
 212                if (ret) {
 213                        dev_err(dev->dev, "kms hw init failed: %d\n", ret);
 214                        goto fail;
 215                }
 216        }
 217
 218        dev->mode_config.min_width = 0;
 219        dev->mode_config.min_height = 0;
 220        dev->mode_config.max_width = 2048;
 221        dev->mode_config.max_height = 2048;
 222        dev->mode_config.funcs = &mode_config_funcs;
 223
 224        ret = drm_vblank_init(dev, 1);
 225        if (ret < 0) {
 226                dev_err(dev->dev, "failed to initialize vblank\n");
 227                goto fail;
 228        }
 229
 230        pm_runtime_get_sync(dev->dev);
 231        ret = drm_irq_install(dev);
 232        pm_runtime_put_sync(dev->dev);
 233        if (ret < 0) {
 234                dev_err(dev->dev, "failed to install IRQ handler\n");
 235                goto fail;
 236        }
 237
 238        platform_set_drvdata(pdev, dev);
 239
 240#ifdef CONFIG_DRM_MSM_FBDEV
 241        priv->fbdev = msm_fbdev_init(dev);
 242#endif
 243
 244        drm_kms_helper_poll_init(dev);
 245
 246        return 0;
 247
 248fail:
 249        msm_unload(dev);
 250        return ret;
 251}
 252
 253static void load_gpu(struct drm_device *dev)
 254{
 255        struct msm_drm_private *priv = dev->dev_private;
 256        struct msm_gpu *gpu;
 257
 258        if (priv->gpu)
 259                return;
 260
 261        mutex_lock(&dev->struct_mutex);
 262        gpu = a3xx_gpu_init(dev);
 263        if (IS_ERR(gpu)) {
 264                dev_warn(dev->dev, "failed to load a3xx gpu\n");
 265                gpu = NULL;
 266                /* not fatal */
 267        }
 268        mutex_unlock(&dev->struct_mutex);
 269
 270        if (gpu) {
 271                int ret;
 272                gpu->funcs->pm_resume(gpu);
 273                ret = gpu->funcs->hw_init(gpu);
 274                if (ret) {
 275                        dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
 276                        gpu->funcs->destroy(gpu);
 277                        gpu = NULL;
 278                }
 279        }
 280
 281        priv->gpu = gpu;
 282}
 283
 284static int msm_open(struct drm_device *dev, struct drm_file *file)
 285{
 286        struct msm_file_private *ctx;
 287
 288        /* For now, load gpu on open.. to avoid the requirement of having
 289         * firmware in the initrd.
 290         */
 291        load_gpu(dev);
 292
 293        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 294        if (!ctx)
 295                return -ENOMEM;
 296
 297        file->driver_priv = ctx;
 298
 299        return 0;
 300}
 301
 302static void msm_preclose(struct drm_device *dev, struct drm_file *file)
 303{
 304        struct msm_drm_private *priv = dev->dev_private;
 305        struct msm_file_private *ctx = file->driver_priv;
 306        struct msm_kms *kms = priv->kms;
 307
 308        if (kms)
 309                kms->funcs->preclose(kms, file);
 310
 311        mutex_lock(&dev->struct_mutex);
 312        if (ctx == priv->lastctx)
 313                priv->lastctx = NULL;
 314        mutex_unlock(&dev->struct_mutex);
 315
 316        kfree(ctx);
 317}
 318
 319static void msm_lastclose(struct drm_device *dev)
 320{
 321        struct msm_drm_private *priv = dev->dev_private;
 322        if (priv->fbdev) {
 323                drm_modeset_lock_all(dev);
 324                drm_fb_helper_restore_fbdev_mode(priv->fbdev);
 325                drm_modeset_unlock_all(dev);
 326        }
 327}
 328
 329static irqreturn_t msm_irq(DRM_IRQ_ARGS)
 330{
 331        struct drm_device *dev = arg;
 332        struct msm_drm_private *priv = dev->dev_private;
 333        struct msm_kms *kms = priv->kms;
 334        BUG_ON(!kms);
 335        return kms->funcs->irq(kms);
 336}
 337
 338static void msm_irq_preinstall(struct drm_device *dev)
 339{
 340        struct msm_drm_private *priv = dev->dev_private;
 341        struct msm_kms *kms = priv->kms;
 342        BUG_ON(!kms);
 343        kms->funcs->irq_preinstall(kms);
 344}
 345
 346static int msm_irq_postinstall(struct drm_device *dev)
 347{
 348        struct msm_drm_private *priv = dev->dev_private;
 349        struct msm_kms *kms = priv->kms;
 350        BUG_ON(!kms);
 351        return kms->funcs->irq_postinstall(kms);
 352}
 353
 354static void msm_irq_uninstall(struct drm_device *dev)
 355{
 356        struct msm_drm_private *priv = dev->dev_private;
 357        struct msm_kms *kms = priv->kms;
 358        BUG_ON(!kms);
 359        kms->funcs->irq_uninstall(kms);
 360}
 361
 362static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
 363{
 364        struct msm_drm_private *priv = dev->dev_private;
 365        struct msm_kms *kms = priv->kms;
 366        if (!kms)
 367                return -ENXIO;
 368        DBG("dev=%p, crtc=%d", dev, crtc_id);
 369        return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
 370}
 371
 372static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
 373{
 374        struct msm_drm_private *priv = dev->dev_private;
 375        struct msm_kms *kms = priv->kms;
 376        if (!kms)
 377                return;
 378        DBG("dev=%p, crtc=%d", dev, crtc_id);
 379        kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
 380}
 381
 382/*
 383 * DRM debugfs:
 384 */
 385
 386#ifdef CONFIG_DEBUG_FS
 387static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
 388{
 389        struct msm_drm_private *priv = dev->dev_private;
 390        struct msm_gpu *gpu = priv->gpu;
 391
 392        if (gpu) {
 393                seq_printf(m, "%s Status:\n", gpu->name);
 394                gpu->funcs->show(gpu, m);
 395        }
 396
 397        return 0;
 398}
 399
 400static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 401{
 402        struct msm_drm_private *priv = dev->dev_private;
 403        struct msm_gpu *gpu = priv->gpu;
 404
 405        if (gpu) {
 406                seq_printf(m, "Active Objects (%s):\n", gpu->name);
 407                msm_gem_describe_objects(&gpu->active_list, m);
 408        }
 409
 410        seq_printf(m, "Inactive Objects:\n");
 411        msm_gem_describe_objects(&priv->inactive_list, m);
 412
 413        return 0;
 414}
 415
 416static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
 417{
 418        return drm_mm_dump_table(m, dev->mm_private);
 419}
 420
 421static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
 422{
 423        struct msm_drm_private *priv = dev->dev_private;
 424        struct drm_framebuffer *fb, *fbdev_fb = NULL;
 425
 426        if (priv->fbdev) {
 427                seq_printf(m, "fbcon ");
 428                fbdev_fb = priv->fbdev->fb;
 429                msm_framebuffer_describe(fbdev_fb, m);
 430        }
 431
 432        mutex_lock(&dev->mode_config.fb_lock);
 433        list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
 434                if (fb == fbdev_fb)
 435                        continue;
 436
 437                seq_printf(m, "user ");
 438                msm_framebuffer_describe(fb, m);
 439        }
 440        mutex_unlock(&dev->mode_config.fb_lock);
 441
 442        return 0;
 443}
 444
 445static int show_locked(struct seq_file *m, void *arg)
 446{
 447        struct drm_info_node *node = (struct drm_info_node *) m->private;
 448        struct drm_device *dev = node->minor->dev;
 449        int (*show)(struct drm_device *dev, struct seq_file *m) =
 450                        node->info_ent->data;
 451        int ret;
 452
 453        ret = mutex_lock_interruptible(&dev->struct_mutex);
 454        if (ret)
 455                return ret;
 456
 457        ret = show(dev, m);
 458
 459        mutex_unlock(&dev->struct_mutex);
 460
 461        return ret;
 462}
 463
 464static struct drm_info_list msm_debugfs_list[] = {
 465                {"gpu", show_locked, 0, msm_gpu_show},
 466                {"gem", show_locked, 0, msm_gem_show},
 467                { "mm", show_locked, 0, msm_mm_show },
 468                { "fb", show_locked, 0, msm_fb_show },
 469};
 470
 471static int msm_debugfs_init(struct drm_minor *minor)
 472{
 473        struct drm_device *dev = minor->dev;
 474        int ret;
 475
 476        ret = drm_debugfs_create_files(msm_debugfs_list,
 477                        ARRAY_SIZE(msm_debugfs_list),
 478                        minor->debugfs_root, minor);
 479
 480        if (ret) {
 481                dev_err(dev->dev, "could not install msm_debugfs_list\n");
 482                return ret;
 483        }
 484
 485        return ret;
 486}
 487
 488static void msm_debugfs_cleanup(struct drm_minor *minor)
 489{
 490        drm_debugfs_remove_files(msm_debugfs_list,
 491                        ARRAY_SIZE(msm_debugfs_list), minor);
 492}
 493#endif
 494
 495/*
 496 * Fences:
 497 */
 498
 499int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
 500                struct timespec *timeout)
 501{
 502        struct msm_drm_private *priv = dev->dev_private;
 503        int ret;
 504
 505        if (!priv->gpu)
 506                return 0;
 507
 508        if (fence > priv->gpu->submitted_fence) {
 509                DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
 510                                fence, priv->gpu->submitted_fence);
 511                return -EINVAL;
 512        }
 513
 514        if (!timeout) {
 515                /* no-wait: */
 516                ret = fence_completed(dev, fence) ? 0 : -EBUSY;
 517        } else {
 518                unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
 519                unsigned long start_jiffies = jiffies;
 520                unsigned long remaining_jiffies;
 521
 522                if (time_after(start_jiffies, timeout_jiffies))
 523                        remaining_jiffies = 0;
 524                else
 525                        remaining_jiffies = timeout_jiffies - start_jiffies;
 526
 527                ret = wait_event_interruptible_timeout(priv->fence_event,
 528                                fence_completed(dev, fence),
 529                                remaining_jiffies);
 530
 531                if (ret == 0) {
 532                        DBG("timeout waiting for fence: %u (completed: %u)",
 533                                        fence, priv->completed_fence);
 534                        ret = -ETIMEDOUT;
 535                } else if (ret != -ERESTARTSYS) {
 536                        ret = 0;
 537                }
 538        }
 539
 540        return ret;
 541}
 542
 543/* called from workqueue */
 544void msm_update_fence(struct drm_device *dev, uint32_t fence)
 545{
 546        struct msm_drm_private *priv = dev->dev_private;
 547
 548        mutex_lock(&dev->struct_mutex);
 549        priv->completed_fence = max(fence, priv->completed_fence);
 550
 551        while (!list_empty(&priv->fence_cbs)) {
 552                struct msm_fence_cb *cb;
 553
 554                cb = list_first_entry(&priv->fence_cbs,
 555                                struct msm_fence_cb, work.entry);
 556
 557                if (cb->fence > priv->completed_fence)
 558                        break;
 559
 560                list_del_init(&cb->work.entry);
 561                queue_work(priv->wq, &cb->work);
 562        }
 563
 564        mutex_unlock(&dev->struct_mutex);
 565
 566        wake_up_all(&priv->fence_event);
 567}
 568
 569void __msm_fence_worker(struct work_struct *work)
 570{
 571        struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
 572        cb->func(cb);
 573}
 574
 575/*
 576 * DRM ioctls:
 577 */
 578
 579static int msm_ioctl_get_param(struct drm_device *dev, void *data,
 580                struct drm_file *file)
 581{
 582        struct msm_drm_private *priv = dev->dev_private;
 583        struct drm_msm_param *args = data;
 584        struct msm_gpu *gpu;
 585
 586        /* for now, we just have 3d pipe.. eventually this would need to
 587         * be more clever to dispatch to appropriate gpu module:
 588         */
 589        if (args->pipe != MSM_PIPE_3D0)
 590                return -EINVAL;
 591
 592        gpu = priv->gpu;
 593
 594        if (!gpu)
 595                return -ENXIO;
 596
 597        return gpu->funcs->get_param(gpu, args->param, &args->value);
 598}
 599
 600static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
 601                struct drm_file *file)
 602{
 603        struct drm_msm_gem_new *args = data;
 604        return msm_gem_new_handle(dev, file, args->size,
 605                        args->flags, &args->handle);
 606}
 607
 608#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
 609
 610static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
 611                struct drm_file *file)
 612{
 613        struct drm_msm_gem_cpu_prep *args = data;
 614        struct drm_gem_object *obj;
 615        int ret;
 616
 617        obj = drm_gem_object_lookup(dev, file, args->handle);
 618        if (!obj)
 619                return -ENOENT;
 620
 621        ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
 622
 623        drm_gem_object_unreference_unlocked(obj);
 624
 625        return ret;
 626}
 627
 628static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
 629                struct drm_file *file)
 630{
 631        struct drm_msm_gem_cpu_fini *args = data;
 632        struct drm_gem_object *obj;
 633        int ret;
 634
 635        obj = drm_gem_object_lookup(dev, file, args->handle);
 636        if (!obj)
 637                return -ENOENT;
 638
 639        ret = msm_gem_cpu_fini(obj);
 640
 641        drm_gem_object_unreference_unlocked(obj);
 642
 643        return ret;
 644}
 645
 646static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 647                struct drm_file *file)
 648{
 649        struct drm_msm_gem_info *args = data;
 650        struct drm_gem_object *obj;
 651        int ret = 0;
 652
 653        if (args->pad)
 654                return -EINVAL;
 655
 656        obj = drm_gem_object_lookup(dev, file, args->handle);
 657        if (!obj)
 658                return -ENOENT;
 659
 660        args->offset = msm_gem_mmap_offset(obj);
 661
 662        drm_gem_object_unreference_unlocked(obj);
 663
 664        return ret;
 665}
 666
 667static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
 668                struct drm_file *file)
 669{
 670        struct drm_msm_wait_fence *args = data;
 671        return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
 672}
 673
 674static const struct drm_ioctl_desc msm_ioctls[] = {
 675        DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 676        DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 677        DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 678        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 679        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 680        DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 681        DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
 682};
 683
 684static const struct vm_operations_struct vm_ops = {
 685        .fault = msm_gem_fault,
 686        .open = drm_gem_vm_open,
 687        .close = drm_gem_vm_close,
 688};
 689
 690static const struct file_operations fops = {
 691        .owner              = THIS_MODULE,
 692        .open               = drm_open,
 693        .release            = drm_release,
 694        .unlocked_ioctl     = drm_ioctl,
 695#ifdef CONFIG_COMPAT
 696        .compat_ioctl       = drm_compat_ioctl,
 697#endif
 698        .poll               = drm_poll,
 699        .read               = drm_read,
 700        .llseek             = no_llseek,
 701        .mmap               = msm_gem_mmap,
 702};
 703
 704static struct drm_driver msm_driver = {
 705        .driver_features    = DRIVER_HAVE_IRQ |
 706                                DRIVER_GEM |
 707                                DRIVER_PRIME |
 708                                DRIVER_RENDER |
 709                                DRIVER_MODESET,
 710        .load               = msm_load,
 711        .unload             = msm_unload,
 712        .open               = msm_open,
 713        .preclose           = msm_preclose,
 714        .lastclose          = msm_lastclose,
 715        .irq_handler        = msm_irq,
 716        .irq_preinstall     = msm_irq_preinstall,
 717        .irq_postinstall    = msm_irq_postinstall,
 718        .irq_uninstall      = msm_irq_uninstall,
 719        .get_vblank_counter = drm_vblank_count,
 720        .enable_vblank      = msm_enable_vblank,
 721        .disable_vblank     = msm_disable_vblank,
 722        .gem_free_object    = msm_gem_free_object,
 723        .gem_vm_ops         = &vm_ops,
 724        .dumb_create        = msm_gem_dumb_create,
 725        .dumb_map_offset    = msm_gem_dumb_map_offset,
 726        .dumb_destroy       = drm_gem_dumb_destroy,
 727        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 728        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 729        .gem_prime_export   = drm_gem_prime_export,
 730        .gem_prime_import   = drm_gem_prime_import,
 731        .gem_prime_pin      = msm_gem_prime_pin,
 732        .gem_prime_unpin    = msm_gem_prime_unpin,
 733        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
 734        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
 735        .gem_prime_vmap     = msm_gem_prime_vmap,
 736        .gem_prime_vunmap   = msm_gem_prime_vunmap,
 737#ifdef CONFIG_DEBUG_FS
 738        .debugfs_init       = msm_debugfs_init,
 739        .debugfs_cleanup    = msm_debugfs_cleanup,
 740#endif
 741        .ioctls             = msm_ioctls,
 742        .num_ioctls         = DRM_MSM_NUM_IOCTLS,
 743        .fops               = &fops,
 744        .name               = "msm",
 745        .desc               = "MSM Snapdragon DRM",
 746        .date               = "20130625",
 747        .major              = 1,
 748        .minor              = 0,
 749};
 750
 751#ifdef CONFIG_PM_SLEEP
 752static int msm_pm_suspend(struct device *dev)
 753{
 754        struct drm_device *ddev = dev_get_drvdata(dev);
 755
 756        drm_kms_helper_poll_disable(ddev);
 757
 758        return 0;
 759}
 760
 761static int msm_pm_resume(struct device *dev)
 762{
 763        struct drm_device *ddev = dev_get_drvdata(dev);
 764
 765        drm_kms_helper_poll_enable(ddev);
 766
 767        return 0;
 768}
 769#endif
 770
 771static const struct dev_pm_ops msm_pm_ops = {
 772        SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
 773};
 774
 775/*
 776 * Platform driver:
 777 */
 778
 779static int msm_pdev_probe(struct platform_device *pdev)
 780{
 781        return drm_platform_init(&msm_driver, pdev);
 782}
 783
 784static int msm_pdev_remove(struct platform_device *pdev)
 785{
 786        drm_platform_exit(&msm_driver, pdev);
 787
 788        return 0;
 789}
 790
 791static const struct platform_device_id msm_id[] = {
 792        { "mdp", 0 },
 793        { }
 794};
 795
 796static struct platform_driver msm_platform_driver = {
 797        .probe      = msm_pdev_probe,
 798        .remove     = msm_pdev_remove,
 799        .driver     = {
 800                .owner  = THIS_MODULE,
 801                .name   = "msm",
 802                .pm     = &msm_pm_ops,
 803        },
 804        .id_table   = msm_id,
 805};
 806
 807static int __init msm_drm_register(void)
 808{
 809        DBG("init");
 810        hdmi_register();
 811        a3xx_register();
 812        return platform_driver_register(&msm_platform_driver);
 813}
 814
 815static void __exit msm_drm_unregister(void)
 816{
 817        DBG("fini");
 818        platform_driver_unregister(&msm_platform_driver);
 819        hdmi_unregister();
 820        a3xx_unregister();
 821}
 822
 823module_init(msm_drm_register);
 824module_exit(msm_drm_unregister);
 825
 826MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
 827MODULE_DESCRIPTION("MSM DRM Driver");
 828MODULE_LICENSE("GPL");
 829