linux/drivers/gpu/drm/msm/msm_drv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2013 Red Hat
   5 * Author: Rob Clark <robdclark@gmail.com>
   6 */
   7
   8#include <linux/kthread.h>
   9#include <uapi/linux/sched/types.h>
  10#include <drm/drm_of.h>
  11
  12#include "msm_drv.h"
  13#include "msm_debugfs.h"
  14#include "msm_fence.h"
  15#include "msm_gem.h"
  16#include "msm_gpu.h"
  17#include "msm_kms.h"
  18#include "adreno/adreno_gpu.h"
  19
  20
  21/*
  22 * MSM driver version:
  23 * - 1.0.0 - initial interface
  24 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
  25 * - 1.2.0 - adds explicit fence support for submit ioctl
  26 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  27 *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  28 *           MSM_GEM_INFO ioctl.
  29 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
  30 *           GEM object's debug name
  31 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
  32 */
  33#define MSM_VERSION_MAJOR       1
  34#define MSM_VERSION_MINOR       5
  35#define MSM_VERSION_PATCHLEVEL  0
  36
  37static const struct drm_mode_config_funcs mode_config_funcs = {
  38        .fb_create = msm_framebuffer_create,
  39        .output_poll_changed = drm_fb_helper_output_poll_changed,
  40        .atomic_check = drm_atomic_helper_check,
  41        .atomic_commit = drm_atomic_helper_commit,
  42};
  43
  44static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
  45        .atomic_commit_tail = msm_atomic_commit_tail,
  46};
  47
  48#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  49static bool reglog = false;
  50MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  51module_param(reglog, bool, 0600);
  52#else
  53#define reglog 0
  54#endif
  55
  56#ifdef CONFIG_DRM_FBDEV_EMULATION
  57static bool fbdev = true;
  58MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
  59module_param(fbdev, bool, 0600);
  60#endif
  61
  62static char *vram = "16m";
  63MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
  64module_param(vram, charp, 0);
  65
  66bool dumpstate = false;
  67MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
  68module_param(dumpstate, bool, 0600);
  69
  70static bool modeset = true;
  71MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
  72module_param(modeset, bool, 0600);
  73
  74/*
  75 * Util/helpers:
  76 */
  77
  78int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
  79{
  80        struct property *prop;
  81        const char *name;
  82        struct clk_bulk_data *local;
  83        int i = 0, ret, count;
  84
  85        count = of_property_count_strings(dev->of_node, "clock-names");
  86        if (count < 1)
  87                return 0;
  88
  89        local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
  90                count, GFP_KERNEL);
  91        if (!local)
  92                return -ENOMEM;
  93
  94        of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
  95                local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
  96                if (!local[i].id) {
  97                        devm_kfree(dev, local);
  98                        return -ENOMEM;
  99                }
 100
 101                i++;
 102        }
 103
 104        ret = devm_clk_bulk_get(dev, count, local);
 105
 106        if (ret) {
 107                for (i = 0; i < count; i++)
 108                        devm_kfree(dev, (void *) local[i].id);
 109                devm_kfree(dev, local);
 110
 111                return ret;
 112        }
 113
 114        *bulk = local;
 115        return count;
 116}
 117
 118struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
 119                const char *name)
 120{
 121        int i;
 122        char n[32];
 123
 124        snprintf(n, sizeof(n), "%s_clk", name);
 125
 126        for (i = 0; bulk && i < count; i++) {
 127                if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
 128                        return bulk[i].clk;
 129        }
 130
 131
 132        return NULL;
 133}
 134
 135struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
 136{
 137        struct clk *clk;
 138        char name2[32];
 139
 140        clk = devm_clk_get(&pdev->dev, name);
 141        if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
 142                return clk;
 143
 144        snprintf(name2, sizeof(name2), "%s_clk", name);
 145
 146        clk = devm_clk_get(&pdev->dev, name2);
 147        if (!IS_ERR(clk))
 148                dev_warn(&pdev->dev, "Using legacy clk name binding.  Use "
 149                                "\"%s\" instead of \"%s\"\n", name, name2);
 150
 151        return clk;
 152}
 153
 154void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 155                const char *dbgname)
 156{
 157        struct resource *res;
 158        unsigned long size;
 159        void __iomem *ptr;
 160
 161        if (name)
 162                res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 163        else
 164                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 165
 166        if (!res) {
 167                DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
 168                return ERR_PTR(-EINVAL);
 169        }
 170
 171        size = resource_size(res);
 172
 173        ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
 174        if (!ptr) {
 175                DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
 176                return ERR_PTR(-ENOMEM);
 177        }
 178
 179        if (reglog)
 180                printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
 181
 182        return ptr;
 183}
 184
 185void msm_writel(u32 data, void __iomem *addr)
 186{
 187        if (reglog)
 188                printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
 189        writel(data, addr);
 190}
 191
 192u32 msm_readl(const void __iomem *addr)
 193{
 194        u32 val = readl(addr);
 195        if (reglog)
 196                pr_err("IO:R %p %08x\n", addr, val);
 197        return val;
 198}
 199
 200struct msm_vblank_work {
 201        struct work_struct work;
 202        int crtc_id;
 203        bool enable;
 204        struct msm_drm_private *priv;
 205};
 206
 207static void vblank_ctrl_worker(struct work_struct *work)
 208{
 209        struct msm_vblank_work *vbl_work = container_of(work,
 210                                                struct msm_vblank_work, work);
 211        struct msm_drm_private *priv = vbl_work->priv;
 212        struct msm_kms *kms = priv->kms;
 213
 214        if (vbl_work->enable)
 215                kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
 216        else
 217                kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
 218
 219        kfree(vbl_work);
 220}
 221
 222static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 223                                        int crtc_id, bool enable)
 224{
 225        struct msm_vblank_work *vbl_work;
 226
 227        vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
 228        if (!vbl_work)
 229                return -ENOMEM;
 230
 231        INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
 232
 233        vbl_work->crtc_id = crtc_id;
 234        vbl_work->enable = enable;
 235        vbl_work->priv = priv;
 236
 237        queue_work(priv->wq, &vbl_work->work);
 238
 239        return 0;
 240}
 241
 242static int msm_drm_uninit(struct device *dev)
 243{
 244        struct platform_device *pdev = to_platform_device(dev);
 245        struct drm_device *ddev = platform_get_drvdata(pdev);
 246        struct msm_drm_private *priv = ddev->dev_private;
 247        struct msm_kms *kms = priv->kms;
 248        struct msm_mdss *mdss = priv->mdss;
 249        int i;
 250
 251        /*
 252         * Shutdown the hw if we're far enough along where things might be on.
 253         * If we run this too early, we'll end up panicking in any variety of
 254         * places. Since we don't register the drm device until late in
 255         * msm_drm_init, drm_dev->registered is used as an indicator that the
 256         * shutdown will be successful.
 257         */
 258        if (ddev->registered) {
 259                drm_dev_unregister(ddev);
 260                drm_atomic_helper_shutdown(ddev);
 261        }
 262
 263        /* We must cancel and cleanup any pending vblank enable/disable
 264         * work before drm_irq_uninstall() to avoid work re-enabling an
 265         * irq after uninstall has disabled it.
 266         */
 267
 268        flush_workqueue(priv->wq);
 269
 270        /* clean up event worker threads */
 271        for (i = 0; i < priv->num_crtcs; i++) {
 272                if (priv->event_thread[i].thread) {
 273                        kthread_destroy_worker(&priv->event_thread[i].worker);
 274                        priv->event_thread[i].thread = NULL;
 275                }
 276        }
 277
 278        msm_gem_shrinker_cleanup(ddev);
 279
 280        drm_kms_helper_poll_fini(ddev);
 281
 282        msm_perf_debugfs_cleanup(priv);
 283        msm_rd_debugfs_cleanup(priv);
 284
 285#ifdef CONFIG_DRM_FBDEV_EMULATION
 286        if (fbdev && priv->fbdev)
 287                msm_fbdev_free(ddev);
 288#endif
 289
 290        drm_mode_config_cleanup(ddev);
 291
 292        pm_runtime_get_sync(dev);
 293        drm_irq_uninstall(ddev);
 294        pm_runtime_put_sync(dev);
 295
 296        if (kms && kms->funcs)
 297                kms->funcs->destroy(kms);
 298
 299        if (priv->vram.paddr) {
 300                unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
 301                drm_mm_takedown(&priv->vram.mm);
 302                dma_free_attrs(dev, priv->vram.size, NULL,
 303                               priv->vram.paddr, attrs);
 304        }
 305
 306        component_unbind_all(dev, ddev);
 307
 308        if (mdss && mdss->funcs)
 309                mdss->funcs->destroy(ddev);
 310
 311        ddev->dev_private = NULL;
 312        drm_dev_put(ddev);
 313
 314        destroy_workqueue(priv->wq);
 315        kfree(priv);
 316
 317        return 0;
 318}
 319
 320#define KMS_MDP4 4
 321#define KMS_MDP5 5
 322#define KMS_DPU  3
 323
 324static int get_mdp_ver(struct platform_device *pdev)
 325{
 326        struct device *dev = &pdev->dev;
 327
 328        return (int) (unsigned long) of_device_get_match_data(dev);
 329}
 330
 331#include <linux/of_address.h>
 332
 333bool msm_use_mmu(struct drm_device *dev)
 334{
 335        struct msm_drm_private *priv = dev->dev_private;
 336
 337        /* a2xx comes with its own MMU */
 338        return priv->is_a2xx || iommu_present(&platform_bus_type);
 339}
 340
 341static int msm_init_vram(struct drm_device *dev)
 342{
 343        struct msm_drm_private *priv = dev->dev_private;
 344        struct device_node *node;
 345        unsigned long size = 0;
 346        int ret = 0;
 347
 348        /* In the device-tree world, we could have a 'memory-region'
 349         * phandle, which gives us a link to our "vram".  Allocating
 350         * is all nicely abstracted behind the dma api, but we need
 351         * to know the entire size to allocate it all in one go. There
 352         * are two cases:
 353         *  1) device with no IOMMU, in which case we need exclusive
 354         *     access to a VRAM carveout big enough for all gpu
 355         *     buffers
 356         *  2) device with IOMMU, but where the bootloader puts up
 357         *     a splash screen.  In this case, the VRAM carveout
 358         *     need only be large enough for fbdev fb.  But we need
 359         *     exclusive access to the buffer to avoid the kernel
 360         *     using those pages for other purposes (which appears
 361         *     as corruption on screen before we have a chance to
 362         *     load and do initial modeset)
 363         */
 364
 365        node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
 366        if (node) {
 367                struct resource r;
 368                ret = of_address_to_resource(node, 0, &r);
 369                of_node_put(node);
 370                if (ret)
 371                        return ret;
 372                size = r.end - r.start;
 373                DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
 374
 375                /* if we have no IOMMU, then we need to use carveout allocator.
 376                 * Grab the entire CMA chunk carved out in early startup in
 377                 * mach-msm:
 378                 */
 379        } else if (!msm_use_mmu(dev)) {
 380                DRM_INFO("using %s VRAM carveout\n", vram);
 381                size = memparse(vram, NULL);
 382        }
 383
 384        if (size) {
 385                unsigned long attrs = 0;
 386                void *p;
 387
 388                priv->vram.size = size;
 389
 390                drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
 391                spin_lock_init(&priv->vram.lock);
 392
 393                attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 394                attrs |= DMA_ATTR_WRITE_COMBINE;
 395
 396                /* note that for no-kernel-mapping, the vaddr returned
 397                 * is bogus, but non-null if allocation succeeded:
 398                 */
 399                p = dma_alloc_attrs(dev->dev, size,
 400                                &priv->vram.paddr, GFP_KERNEL, attrs);
 401                if (!p) {
 402                        DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
 403                        priv->vram.paddr = 0;
 404                        return -ENOMEM;
 405                }
 406
 407                DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
 408                                (uint32_t)priv->vram.paddr,
 409                                (uint32_t)(priv->vram.paddr + size));
 410        }
 411
 412        return ret;
 413}
 414
 415static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 416{
 417        struct platform_device *pdev = to_platform_device(dev);
 418        struct drm_device *ddev;
 419        struct msm_drm_private *priv;
 420        struct msm_kms *kms;
 421        struct msm_mdss *mdss;
 422        int ret, i;
 423        struct sched_param param;
 424
 425        ddev = drm_dev_alloc(drv, dev);
 426        if (IS_ERR(ddev)) {
 427                DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
 428                return PTR_ERR(ddev);
 429        }
 430
 431        platform_set_drvdata(pdev, ddev);
 432
 433        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 434        if (!priv) {
 435                ret = -ENOMEM;
 436                goto err_put_drm_dev;
 437        }
 438
 439        ddev->dev_private = priv;
 440        priv->dev = ddev;
 441
 442        switch (get_mdp_ver(pdev)) {
 443        case KMS_MDP5:
 444                ret = mdp5_mdss_init(ddev);
 445                break;
 446        case KMS_DPU:
 447                ret = dpu_mdss_init(ddev);
 448                break;
 449        default:
 450                ret = 0;
 451                break;
 452        }
 453        if (ret)
 454                goto err_free_priv;
 455
 456        mdss = priv->mdss;
 457
 458        priv->wq = alloc_ordered_workqueue("msm", 0);
 459
 460        INIT_WORK(&priv->free_work, msm_gem_free_work);
 461        init_llist_head(&priv->free_list);
 462
 463        INIT_LIST_HEAD(&priv->inactive_list);
 464
 465        drm_mode_config_init(ddev);
 466
 467        /* Bind all our sub-components: */
 468        ret = component_bind_all(dev, ddev);
 469        if (ret)
 470                goto err_destroy_mdss;
 471
 472        ret = msm_init_vram(ddev);
 473        if (ret)
 474                goto err_msm_uninit;
 475
 476        msm_gem_shrinker_init(ddev);
 477
 478        switch (get_mdp_ver(pdev)) {
 479        case KMS_MDP4:
 480                kms = mdp4_kms_init(ddev);
 481                priv->kms = kms;
 482                break;
 483        case KMS_MDP5:
 484                kms = mdp5_kms_init(ddev);
 485                break;
 486        case KMS_DPU:
 487                kms = dpu_kms_init(ddev);
 488                priv->kms = kms;
 489                break;
 490        default:
 491                /* valid only for the dummy headless case, where of_node=NULL */
 492                WARN_ON(dev->of_node);
 493                kms = NULL;
 494                break;
 495        }
 496
 497        if (IS_ERR(kms)) {
 498                DRM_DEV_ERROR(dev, "failed to load kms\n");
 499                ret = PTR_ERR(kms);
 500                priv->kms = NULL;
 501                goto err_msm_uninit;
 502        }
 503
 504        /* Enable normalization of plane zpos */
 505        ddev->mode_config.normalize_zpos = true;
 506
 507        if (kms) {
 508                ret = kms->funcs->hw_init(kms);
 509                if (ret) {
 510                        DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
 511                        goto err_msm_uninit;
 512                }
 513        }
 514
 515        ddev->mode_config.funcs = &mode_config_funcs;
 516        ddev->mode_config.helper_private = &mode_config_helper_funcs;
 517
 518        /**
 519         * this priority was found during empiric testing to have appropriate
 520         * realtime scheduling to process display updates and interact with
 521         * other real time and normal priority task
 522         */
 523        param.sched_priority = 16;
 524        for (i = 0; i < priv->num_crtcs; i++) {
 525                /* initialize event thread */
 526                priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
 527                kthread_init_worker(&priv->event_thread[i].worker);
 528                priv->event_thread[i].dev = ddev;
 529                priv->event_thread[i].thread =
 530                        kthread_run(kthread_worker_fn,
 531                                &priv->event_thread[i].worker,
 532                                "crtc_event:%d", priv->event_thread[i].crtc_id);
 533                if (IS_ERR(priv->event_thread[i].thread)) {
 534                        DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
 535                        priv->event_thread[i].thread = NULL;
 536                        goto err_msm_uninit;
 537                }
 538
 539                ret = sched_setscheduler(priv->event_thread[i].thread,
 540                                         SCHED_FIFO, &param);
 541                if (ret)
 542                        dev_warn(dev, "event_thread set priority failed:%d\n",
 543                                 ret);
 544        }
 545
 546        ret = drm_vblank_init(ddev, priv->num_crtcs);
 547        if (ret < 0) {
 548                DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
 549                goto err_msm_uninit;
 550        }
 551
 552        if (kms) {
 553                pm_runtime_get_sync(dev);
 554                ret = drm_irq_install(ddev, kms->irq);
 555                pm_runtime_put_sync(dev);
 556                if (ret < 0) {
 557                        DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
 558                        goto err_msm_uninit;
 559                }
 560        }
 561
 562        ret = drm_dev_register(ddev, 0);
 563        if (ret)
 564                goto err_msm_uninit;
 565
 566        drm_mode_config_reset(ddev);
 567
 568#ifdef CONFIG_DRM_FBDEV_EMULATION
 569        if (kms && fbdev)
 570                priv->fbdev = msm_fbdev_init(ddev);
 571#endif
 572
 573        ret = msm_debugfs_late_init(ddev);
 574        if (ret)
 575                goto err_msm_uninit;
 576
 577        drm_kms_helper_poll_init(ddev);
 578
 579        return 0;
 580
 581err_msm_uninit:
 582        msm_drm_uninit(dev);
 583        return ret;
 584err_destroy_mdss:
 585        if (mdss && mdss->funcs)
 586                mdss->funcs->destroy(ddev);
 587err_free_priv:
 588        kfree(priv);
 589err_put_drm_dev:
 590        drm_dev_put(ddev);
 591        return ret;
 592}
 593
 594/*
 595 * DRM operations:
 596 */
 597
 598static void load_gpu(struct drm_device *dev)
 599{
 600        static DEFINE_MUTEX(init_lock);
 601        struct msm_drm_private *priv = dev->dev_private;
 602
 603        mutex_lock(&init_lock);
 604
 605        if (!priv->gpu)
 606                priv->gpu = adreno_load_gpu(dev);
 607
 608        mutex_unlock(&init_lock);
 609}
 610
 611static int context_init(struct drm_device *dev, struct drm_file *file)
 612{
 613        struct msm_drm_private *priv = dev->dev_private;
 614        struct msm_file_private *ctx;
 615
 616        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 617        if (!ctx)
 618                return -ENOMEM;
 619
 620        msm_submitqueue_init(dev, ctx);
 621
 622        ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
 623        file->driver_priv = ctx;
 624
 625        return 0;
 626}
 627
 628static int msm_open(struct drm_device *dev, struct drm_file *file)
 629{
 630        /* For now, load gpu on open.. to avoid the requirement of having
 631         * firmware in the initrd.
 632         */
 633        load_gpu(dev);
 634
 635        return context_init(dev, file);
 636}
 637
 638static void context_close(struct msm_file_private *ctx)
 639{
 640        msm_submitqueue_close(ctx);
 641        kfree(ctx);
 642}
 643
 644static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 645{
 646        struct msm_drm_private *priv = dev->dev_private;
 647        struct msm_file_private *ctx = file->driver_priv;
 648
 649        mutex_lock(&dev->struct_mutex);
 650        if (ctx == priv->lastctx)
 651                priv->lastctx = NULL;
 652        mutex_unlock(&dev->struct_mutex);
 653
 654        context_close(ctx);
 655}
 656
 657static irqreturn_t msm_irq(int irq, void *arg)
 658{
 659        struct drm_device *dev = arg;
 660        struct msm_drm_private *priv = dev->dev_private;
 661        struct msm_kms *kms = priv->kms;
 662        BUG_ON(!kms);
 663        return kms->funcs->irq(kms);
 664}
 665
 666static void msm_irq_preinstall(struct drm_device *dev)
 667{
 668        struct msm_drm_private *priv = dev->dev_private;
 669        struct msm_kms *kms = priv->kms;
 670        BUG_ON(!kms);
 671        kms->funcs->irq_preinstall(kms);
 672}
 673
 674static int msm_irq_postinstall(struct drm_device *dev)
 675{
 676        struct msm_drm_private *priv = dev->dev_private;
 677        struct msm_kms *kms = priv->kms;
 678        BUG_ON(!kms);
 679
 680        if (kms->funcs->irq_postinstall)
 681                return kms->funcs->irq_postinstall(kms);
 682
 683        return 0;
 684}
 685
 686static void msm_irq_uninstall(struct drm_device *dev)
 687{
 688        struct msm_drm_private *priv = dev->dev_private;
 689        struct msm_kms *kms = priv->kms;
 690        BUG_ON(!kms);
 691        kms->funcs->irq_uninstall(kms);
 692}
 693
 694static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 695{
 696        struct msm_drm_private *priv = dev->dev_private;
 697        struct msm_kms *kms = priv->kms;
 698        if (!kms)
 699                return -ENXIO;
 700        DBG("dev=%p, crtc=%u", dev, pipe);
 701        return vblank_ctrl_queue_work(priv, pipe, true);
 702}
 703
 704static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
 705{
 706        struct msm_drm_private *priv = dev->dev_private;
 707        struct msm_kms *kms = priv->kms;
 708        if (!kms)
 709                return;
 710        DBG("dev=%p, crtc=%u", dev, pipe);
 711        vblank_ctrl_queue_work(priv, pipe, false);
 712}
 713
 714/*
 715 * DRM ioctls:
 716 */
 717
 718static int msm_ioctl_get_param(struct drm_device *dev, void *data,
 719                struct drm_file *file)
 720{
 721        struct msm_drm_private *priv = dev->dev_private;
 722        struct drm_msm_param *args = data;
 723        struct msm_gpu *gpu;
 724
 725        /* for now, we just have 3d pipe.. eventually this would need to
 726         * be more clever to dispatch to appropriate gpu module:
 727         */
 728        if (args->pipe != MSM_PIPE_3D0)
 729                return -EINVAL;
 730
 731        gpu = priv->gpu;
 732
 733        if (!gpu)
 734                return -ENXIO;
 735
 736        return gpu->funcs->get_param(gpu, args->param, &args->value);
 737}
 738
 739static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
 740                struct drm_file *file)
 741{
 742        struct drm_msm_gem_new *args = data;
 743
 744        if (args->flags & ~MSM_BO_FLAGS) {
 745                DRM_ERROR("invalid flags: %08x\n", args->flags);
 746                return -EINVAL;
 747        }
 748
 749        return msm_gem_new_handle(dev, file, args->size,
 750                        args->flags, &args->handle, NULL);
 751}
 752
 753static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
 754{
 755        return ktime_set(timeout.tv_sec, timeout.tv_nsec);
 756}
 757
 758static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
 759                struct drm_file *file)
 760{
 761        struct drm_msm_gem_cpu_prep *args = data;
 762        struct drm_gem_object *obj;
 763        ktime_t timeout = to_ktime(args->timeout);
 764        int ret;
 765
 766        if (args->op & ~MSM_PREP_FLAGS) {
 767                DRM_ERROR("invalid op: %08x\n", args->op);
 768                return -EINVAL;
 769        }
 770
 771        obj = drm_gem_object_lookup(file, args->handle);
 772        if (!obj)
 773                return -ENOENT;
 774
 775        ret = msm_gem_cpu_prep(obj, args->op, &timeout);
 776
 777        drm_gem_object_put_unlocked(obj);
 778
 779        return ret;
 780}
 781
 782static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
 783                struct drm_file *file)
 784{
 785        struct drm_msm_gem_cpu_fini *args = data;
 786        struct drm_gem_object *obj;
 787        int ret;
 788
 789        obj = drm_gem_object_lookup(file, args->handle);
 790        if (!obj)
 791                return -ENOENT;
 792
 793        ret = msm_gem_cpu_fini(obj);
 794
 795        drm_gem_object_put_unlocked(obj);
 796
 797        return ret;
 798}
 799
 800static int msm_ioctl_gem_info_iova(struct drm_device *dev,
 801                struct drm_gem_object *obj, uint64_t *iova)
 802{
 803        struct msm_drm_private *priv = dev->dev_private;
 804
 805        if (!priv->gpu)
 806                return -EINVAL;
 807
 808        /*
 809         * Don't pin the memory here - just get an address so that userspace can
 810         * be productive
 811         */
 812        return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
 813}
 814
 815static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 816                struct drm_file *file)
 817{
 818        struct drm_msm_gem_info *args = data;
 819        struct drm_gem_object *obj;
 820        struct msm_gem_object *msm_obj;
 821        int i, ret = 0;
 822
 823        if (args->pad)
 824                return -EINVAL;
 825
 826        switch (args->info) {
 827        case MSM_INFO_GET_OFFSET:
 828        case MSM_INFO_GET_IOVA:
 829                /* value returned as immediate, not pointer, so len==0: */
 830                if (args->len)
 831                        return -EINVAL;
 832                break;
 833        case MSM_INFO_SET_NAME:
 834        case MSM_INFO_GET_NAME:
 835                break;
 836        default:
 837                return -EINVAL;
 838        }
 839
 840        obj = drm_gem_object_lookup(file, args->handle);
 841        if (!obj)
 842                return -ENOENT;
 843
 844        msm_obj = to_msm_bo(obj);
 845
 846        switch (args->info) {
 847        case MSM_INFO_GET_OFFSET:
 848                args->value = msm_gem_mmap_offset(obj);
 849                break;
 850        case MSM_INFO_GET_IOVA:
 851                ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
 852                break;
 853        case MSM_INFO_SET_NAME:
 854                /* length check should leave room for terminating null: */
 855                if (args->len >= sizeof(msm_obj->name)) {
 856                        ret = -EINVAL;
 857                        break;
 858                }
 859                if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
 860                                   args->len)) {
 861                        msm_obj->name[0] = '\0';
 862                        ret = -EFAULT;
 863                        break;
 864                }
 865                msm_obj->name[args->len] = '\0';
 866                for (i = 0; i < args->len; i++) {
 867                        if (!isprint(msm_obj->name[i])) {
 868                                msm_obj->name[i] = '\0';
 869                                break;
 870                        }
 871                }
 872                break;
 873        case MSM_INFO_GET_NAME:
 874                if (args->value && (args->len < strlen(msm_obj->name))) {
 875                        ret = -EINVAL;
 876                        break;
 877                }
 878                args->len = strlen(msm_obj->name);
 879                if (args->value) {
 880                        if (copy_to_user(u64_to_user_ptr(args->value),
 881                                         msm_obj->name, args->len))
 882                                ret = -EFAULT;
 883                }
 884                break;
 885        }
 886
 887        drm_gem_object_put_unlocked(obj);
 888
 889        return ret;
 890}
 891
 892static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
 893                struct drm_file *file)
 894{
 895        struct msm_drm_private *priv = dev->dev_private;
 896        struct drm_msm_wait_fence *args = data;
 897        ktime_t timeout = to_ktime(args->timeout);
 898        struct msm_gpu_submitqueue *queue;
 899        struct msm_gpu *gpu = priv->gpu;
 900        int ret;
 901
 902        if (args->pad) {
 903                DRM_ERROR("invalid pad: %08x\n", args->pad);
 904                return -EINVAL;
 905        }
 906
 907        if (!gpu)
 908                return 0;
 909
 910        queue = msm_submitqueue_get(file->driver_priv, args->queueid);
 911        if (!queue)
 912                return -ENOENT;
 913
 914        ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
 915                true);
 916
 917        msm_submitqueue_put(queue);
 918        return ret;
 919}
 920
 921static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
 922                struct drm_file *file)
 923{
 924        struct drm_msm_gem_madvise *args = data;
 925        struct drm_gem_object *obj;
 926        int ret;
 927
 928        switch (args->madv) {
 929        case MSM_MADV_DONTNEED:
 930        case MSM_MADV_WILLNEED:
 931                break;
 932        default:
 933                return -EINVAL;
 934        }
 935
 936        ret = mutex_lock_interruptible(&dev->struct_mutex);
 937        if (ret)
 938                return ret;
 939
 940        obj = drm_gem_object_lookup(file, args->handle);
 941        if (!obj) {
 942                ret = -ENOENT;
 943                goto unlock;
 944        }
 945
 946        ret = msm_gem_madvise(obj, args->madv);
 947        if (ret >= 0) {
 948                args->retained = ret;
 949                ret = 0;
 950        }
 951
 952        drm_gem_object_put(obj);
 953
 954unlock:
 955        mutex_unlock(&dev->struct_mutex);
 956        return ret;
 957}
 958
 959
 960static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
 961                struct drm_file *file)
 962{
 963        struct drm_msm_submitqueue *args = data;
 964
 965        if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
 966                return -EINVAL;
 967
 968        return msm_submitqueue_create(dev, file->driver_priv, args->prio,
 969                args->flags, &args->id);
 970}
 971
 972static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
 973                struct drm_file *file)
 974{
 975        return msm_submitqueue_query(dev, file->driver_priv, data);
 976}
 977
 978static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
 979                struct drm_file *file)
 980{
 981        u32 id = *(u32 *) data;
 982
 983        return msm_submitqueue_remove(file->driver_priv, id);
 984}
 985
 986static const struct drm_ioctl_desc msm_ioctls[] = {
 987        DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
 988        DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
 989        DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
 990        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
 991        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
 992        DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
 993        DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
 994        DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
 995        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
 996        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
 997        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
 998};
 999
1000static const struct vm_operations_struct vm_ops = {
1001        .fault = msm_gem_fault,
1002        .open = drm_gem_vm_open,
1003        .close = drm_gem_vm_close,
1004};
1005
1006static const struct file_operations fops = {
1007        .owner              = THIS_MODULE,
1008        .open               = drm_open,
1009        .release            = drm_release,
1010        .unlocked_ioctl     = drm_ioctl,
1011        .compat_ioctl       = drm_compat_ioctl,
1012        .poll               = drm_poll,
1013        .read               = drm_read,
1014        .llseek             = no_llseek,
1015        .mmap               = msm_gem_mmap,
1016};
1017
1018static struct drm_driver msm_driver = {
1019        .driver_features    = DRIVER_GEM |
1020                                DRIVER_PRIME |
1021                                DRIVER_RENDER |
1022                                DRIVER_ATOMIC |
1023                                DRIVER_MODESET,
1024        .open               = msm_open,
1025        .postclose           = msm_postclose,
1026        .lastclose          = drm_fb_helper_lastclose,
1027        .irq_handler        = msm_irq,
1028        .irq_preinstall     = msm_irq_preinstall,
1029        .irq_postinstall    = msm_irq_postinstall,
1030        .irq_uninstall      = msm_irq_uninstall,
1031        .enable_vblank      = msm_enable_vblank,
1032        .disable_vblank     = msm_disable_vblank,
1033        .gem_free_object_unlocked = msm_gem_free_object,
1034        .gem_vm_ops         = &vm_ops,
1035        .dumb_create        = msm_gem_dumb_create,
1036        .dumb_map_offset    = msm_gem_dumb_map_offset,
1037        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1038        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1039        .gem_prime_export   = drm_gem_prime_export,
1040        .gem_prime_import   = drm_gem_prime_import,
1041        .gem_prime_pin      = msm_gem_prime_pin,
1042        .gem_prime_unpin    = msm_gem_prime_unpin,
1043        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1044        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1045        .gem_prime_vmap     = msm_gem_prime_vmap,
1046        .gem_prime_vunmap   = msm_gem_prime_vunmap,
1047        .gem_prime_mmap     = msm_gem_prime_mmap,
1048#ifdef CONFIG_DEBUG_FS
1049        .debugfs_init       = msm_debugfs_init,
1050#endif
1051        .ioctls             = msm_ioctls,
1052        .num_ioctls         = ARRAY_SIZE(msm_ioctls),
1053        .fops               = &fops,
1054        .name               = "msm",
1055        .desc               = "MSM Snapdragon DRM",
1056        .date               = "20130625",
1057        .major              = MSM_VERSION_MAJOR,
1058        .minor              = MSM_VERSION_MINOR,
1059        .patchlevel         = MSM_VERSION_PATCHLEVEL,
1060};
1061
1062#ifdef CONFIG_PM_SLEEP
1063static int msm_pm_suspend(struct device *dev)
1064{
1065        struct drm_device *ddev = dev_get_drvdata(dev);
1066        struct msm_drm_private *priv = ddev->dev_private;
1067
1068        if (WARN_ON(priv->pm_state))
1069                drm_atomic_state_put(priv->pm_state);
1070
1071        priv->pm_state = drm_atomic_helper_suspend(ddev);
1072        if (IS_ERR(priv->pm_state)) {
1073                int ret = PTR_ERR(priv->pm_state);
1074                DRM_ERROR("Failed to suspend dpu, %d\n", ret);
1075                return ret;
1076        }
1077
1078        return 0;
1079}
1080
1081static int msm_pm_resume(struct device *dev)
1082{
1083        struct drm_device *ddev = dev_get_drvdata(dev);
1084        struct msm_drm_private *priv = ddev->dev_private;
1085        int ret;
1086
1087        if (WARN_ON(!priv->pm_state))
1088                return -ENOENT;
1089
1090        ret = drm_atomic_helper_resume(ddev, priv->pm_state);
1091        if (!ret)
1092                priv->pm_state = NULL;
1093
1094        return ret;
1095}
1096#endif
1097
1098#ifdef CONFIG_PM
1099static int msm_runtime_suspend(struct device *dev)
1100{
1101        struct drm_device *ddev = dev_get_drvdata(dev);
1102        struct msm_drm_private *priv = ddev->dev_private;
1103        struct msm_mdss *mdss = priv->mdss;
1104
1105        DBG("");
1106
1107        if (mdss && mdss->funcs)
1108                return mdss->funcs->disable(mdss);
1109
1110        return 0;
1111}
1112
1113static int msm_runtime_resume(struct device *dev)
1114{
1115        struct drm_device *ddev = dev_get_drvdata(dev);
1116        struct msm_drm_private *priv = ddev->dev_private;
1117        struct msm_mdss *mdss = priv->mdss;
1118
1119        DBG("");
1120
1121        if (mdss && mdss->funcs)
1122                return mdss->funcs->enable(mdss);
1123
1124        return 0;
1125}
1126#endif
1127
1128static const struct dev_pm_ops msm_pm_ops = {
1129        SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
1130        SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1131};
1132
1133/*
1134 * Componentized driver support:
1135 */
1136
1137/*
1138 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1139 * so probably some room for some helpers
1140 */
1141static int compare_of(struct device *dev, void *data)
1142{
1143        return dev->of_node == data;
1144}
1145
1146/*
1147 * Identify what components need to be added by parsing what remote-endpoints
1148 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1149 * is no external component that we need to add since LVDS is within MDP4
1150 * itself.
1151 */
1152static int add_components_mdp(struct device *mdp_dev,
1153                              struct component_match **matchptr)
1154{
1155        struct device_node *np = mdp_dev->of_node;
1156        struct device_node *ep_node;
1157        struct device *master_dev;
1158
1159        /*
1160         * on MDP4 based platforms, the MDP platform device is the component
1161         * master that adds other display interface components to itself.
1162         *
1163         * on MDP5 based platforms, the MDSS platform device is the component
1164         * master that adds MDP5 and other display interface components to
1165         * itself.
1166         */
1167        if (of_device_is_compatible(np, "qcom,mdp4"))
1168                master_dev = mdp_dev;
1169        else
1170                master_dev = mdp_dev->parent;
1171
1172        for_each_endpoint_of_node(np, ep_node) {
1173                struct device_node *intf;
1174                struct of_endpoint ep;
1175                int ret;
1176
1177                ret = of_graph_parse_endpoint(ep_node, &ep);
1178                if (ret) {
1179                        DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
1180                        of_node_put(ep_node);
1181                        return ret;
1182                }
1183
1184                /*
1185                 * The LCDC/LVDS port on MDP4 is a speacial case where the
1186                 * remote-endpoint isn't a component that we need to add
1187                 */
1188                if (of_device_is_compatible(np, "qcom,mdp4") &&
1189                    ep.port == 0)
1190                        continue;
1191
1192                /*
1193                 * It's okay if some of the ports don't have a remote endpoint
1194                 * specified. It just means that the port isn't connected to
1195                 * any external interface.
1196                 */
1197                intf = of_graph_get_remote_port_parent(ep_node);
1198                if (!intf)
1199                        continue;
1200
1201                if (of_device_is_available(intf))
1202                        drm_of_component_match_add(master_dev, matchptr,
1203                                                   compare_of, intf);
1204
1205                of_node_put(intf);
1206        }
1207
1208        return 0;
1209}
1210
1211static int compare_name_mdp(struct device *dev, void *data)
1212{
1213        return (strstr(dev_name(dev), "mdp") != NULL);
1214}
1215
1216static int add_display_components(struct device *dev,
1217                                  struct component_match **matchptr)
1218{
1219        struct device *mdp_dev;
1220        int ret;
1221
1222        /*
1223         * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1224         * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1225         * Populate the children devices, find the MDP5/DPU node, and then add
1226         * the interfaces to our components list.
1227         */
1228        if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1229            of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
1230                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1231                if (ret) {
1232                        DRM_DEV_ERROR(dev, "failed to populate children devices\n");
1233                        return ret;
1234                }
1235
1236                mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1237                if (!mdp_dev) {
1238                        DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
1239                        of_platform_depopulate(dev);
1240                        return -ENODEV;
1241                }
1242
1243                put_device(mdp_dev);
1244
1245                /* add the MDP component itself */
1246                drm_of_component_match_add(dev, matchptr, compare_of,
1247                                           mdp_dev->of_node);
1248        } else {
1249                /* MDP4 */
1250                mdp_dev = dev;
1251        }
1252
1253        ret = add_components_mdp(mdp_dev, matchptr);
1254        if (ret)
1255                of_platform_depopulate(dev);
1256
1257        return ret;
1258}
1259
1260/*
1261 * We don't know what's the best binding to link the gpu with the drm device.
1262 * Fow now, we just hunt for all the possible gpus that we support, and add them
1263 * as components.
1264 */
1265static const struct of_device_id msm_gpu_match[] = {
1266        { .compatible = "qcom,adreno" },
1267        { .compatible = "qcom,adreno-3xx" },
1268        { .compatible = "amd,imageon" },
1269        { .compatible = "qcom,kgsl-3d0" },
1270        { },
1271};
1272
1273static int add_gpu_components(struct device *dev,
1274                              struct component_match **matchptr)
1275{
1276        struct device_node *np;
1277
1278        np = of_find_matching_node(NULL, msm_gpu_match);
1279        if (!np)
1280                return 0;
1281
1282        if (of_device_is_available(np))
1283                drm_of_component_match_add(dev, matchptr, compare_of, np);
1284
1285        of_node_put(np);
1286
1287        return 0;
1288}
1289
1290static int msm_drm_bind(struct device *dev)
1291{
1292        return msm_drm_init(dev, &msm_driver);
1293}
1294
1295static void msm_drm_unbind(struct device *dev)
1296{
1297        msm_drm_uninit(dev);
1298}
1299
1300static const struct component_master_ops msm_drm_ops = {
1301        .bind = msm_drm_bind,
1302        .unbind = msm_drm_unbind,
1303};
1304
1305/*
1306 * Platform driver:
1307 */
1308
1309static int msm_pdev_probe(struct platform_device *pdev)
1310{
1311        struct component_match *match = NULL;
1312        int ret;
1313
1314        if (get_mdp_ver(pdev)) {
1315                ret = add_display_components(&pdev->dev, &match);
1316                if (ret)
1317                        return ret;
1318        }
1319
1320        ret = add_gpu_components(&pdev->dev, &match);
1321        if (ret)
1322                goto fail;
1323
1324        /* on all devices that I am aware of, iommu's which can map
1325         * any address the cpu can see are used:
1326         */
1327        ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1328        if (ret)
1329                goto fail;
1330
1331        ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1332        if (ret)
1333                goto fail;
1334
1335        return 0;
1336
1337fail:
1338        of_platform_depopulate(&pdev->dev);
1339        return ret;
1340}
1341
1342static int msm_pdev_remove(struct platform_device *pdev)
1343{
1344        component_master_del(&pdev->dev, &msm_drm_ops);
1345        of_platform_depopulate(&pdev->dev);
1346
1347        return 0;
1348}
1349
1350static const struct of_device_id dt_match[] = {
1351        { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1352        { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1353        { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1354        {}
1355};
1356MODULE_DEVICE_TABLE(of, dt_match);
1357
1358static struct platform_driver msm_platform_driver = {
1359        .probe      = msm_pdev_probe,
1360        .remove     = msm_pdev_remove,
1361        .driver     = {
1362                .name   = "msm",
1363                .of_match_table = dt_match,
1364                .pm     = &msm_pm_ops,
1365        },
1366};
1367
1368static int __init msm_drm_register(void)
1369{
1370        if (!modeset)
1371                return -EINVAL;
1372
1373        DBG("init");
1374        msm_mdp_register();
1375        msm_dpu_register();
1376        msm_dsi_register();
1377        msm_edp_register();
1378        msm_hdmi_register();
1379        adreno_register();
1380        return platform_driver_register(&msm_platform_driver);
1381}
1382
1383static void __exit msm_drm_unregister(void)
1384{
1385        DBG("fini");
1386        platform_driver_unregister(&msm_platform_driver);
1387        msm_hdmi_unregister();
1388        adreno_unregister();
1389        msm_edp_unregister();
1390        msm_dsi_unregister();
1391        msm_mdp_unregister();
1392        msm_dpu_unregister();
1393}
1394
1395module_init(msm_drm_register);
1396module_exit(msm_drm_unregister);
1397
1398MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1399MODULE_DESCRIPTION("MSM DRM Driver");
1400MODULE_LICENSE("GPL");
1401