linux/drivers/gpu/drm/msm/msm_drv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2013 Red Hat
   5 * Author: Rob Clark <robdclark@gmail.com>
   6 */
   7
   8#include <linux/dma-mapping.h>
   9#include <linux/kthread.h>
  10#include <linux/sched/mm.h>
  11#include <linux/uaccess.h>
  12#include <uapi/linux/sched/types.h>
  13
  14#include <drm/drm_drv.h>
  15#include <drm/drm_file.h>
  16#include <drm/drm_ioctl.h>
  17#include <drm/drm_prime.h>
  18#include <drm/drm_of.h>
  19#include <drm/drm_vblank.h>
  20
  21#include "disp/msm_disp_snapshot.h"
  22#include "msm_drv.h"
  23#include "msm_debugfs.h"
  24#include "msm_fence.h"
  25#include "msm_gem.h"
  26#include "msm_gpu.h"
  27#include "msm_kms.h"
  28#include "adreno/adreno_gpu.h"
  29
  30/*
  31 * MSM driver version:
  32 * - 1.0.0 - initial interface
  33 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
  34 * - 1.2.0 - adds explicit fence support for submit ioctl
  35 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  36 *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  37 *           MSM_GEM_INFO ioctl.
  38 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
  39 *           GEM object's debug name
  40 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
  41 * - 1.6.0 - Syncobj support
  42 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
  43 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
  44 */
  45#define MSM_VERSION_MAJOR       1
  46#define MSM_VERSION_MINOR       8
  47#define MSM_VERSION_PATCHLEVEL  0
  48
  49static const struct drm_mode_config_funcs mode_config_funcs = {
  50        .fb_create = msm_framebuffer_create,
  51        .output_poll_changed = drm_fb_helper_output_poll_changed,
  52        .atomic_check = drm_atomic_helper_check,
  53        .atomic_commit = drm_atomic_helper_commit,
  54};
  55
  56static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
  57        .atomic_commit_tail = msm_atomic_commit_tail,
  58};
  59
  60#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  61static bool reglog;
  62MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  63module_param(reglog, bool, 0600);
  64#else
  65#define reglog 0
  66#endif
  67
  68#ifdef CONFIG_DRM_FBDEV_EMULATION
  69static bool fbdev = true;
  70MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
  71module_param(fbdev, bool, 0600);
  72#endif
  73
  74static char *vram = "16m";
  75MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
  76module_param(vram, charp, 0);
  77
  78bool dumpstate;
  79MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
  80module_param(dumpstate, bool, 0600);
  81
  82static bool modeset = true;
  83MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
  84module_param(modeset, bool, 0600);
  85
  86/*
  87 * Util/helpers:
  88 */
  89
  90struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
  91                const char *name)
  92{
  93        int i;
  94        char n[32];
  95
  96        snprintf(n, sizeof(n), "%s_clk", name);
  97
  98        for (i = 0; bulk && i < count; i++) {
  99                if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
 100                        return bulk[i].clk;
 101        }
 102
 103
 104        return NULL;
 105}
 106
 107struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
 108{
 109        struct clk *clk;
 110        char name2[32];
 111
 112        clk = devm_clk_get(&pdev->dev, name);
 113        if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
 114                return clk;
 115
 116        snprintf(name2, sizeof(name2), "%s_clk", name);
 117
 118        clk = devm_clk_get(&pdev->dev, name2);
 119        if (!IS_ERR(clk))
 120                dev_warn(&pdev->dev, "Using legacy clk name binding.  Use "
 121                                "\"%s\" instead of \"%s\"\n", name, name2);
 122
 123        return clk;
 124}
 125
 126static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
 127                                  const char *dbgname, bool quiet, phys_addr_t *psize)
 128{
 129        struct resource *res;
 130        unsigned long size;
 131        void __iomem *ptr;
 132
 133        if (name)
 134                res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 135        else
 136                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 137
 138        if (!res) {
 139                if (!quiet)
 140                        DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
 141                return ERR_PTR(-EINVAL);
 142        }
 143
 144        size = resource_size(res);
 145
 146        ptr = devm_ioremap(&pdev->dev, res->start, size);
 147        if (!ptr) {
 148                if (!quiet)
 149                        DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
 150                return ERR_PTR(-ENOMEM);
 151        }
 152
 153        if (reglog)
 154                printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
 155
 156        if (psize)
 157                *psize = size;
 158
 159        return ptr;
 160}
 161
 162void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 163                          const char *dbgname)
 164{
 165        return _msm_ioremap(pdev, name, dbgname, false, NULL);
 166}
 167
 168void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
 169                                const char *dbgname)
 170{
 171        return _msm_ioremap(pdev, name, dbgname, true, NULL);
 172}
 173
 174void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
 175                          const char *dbgname, phys_addr_t *psize)
 176{
 177        return _msm_ioremap(pdev, name, dbgname, false, psize);
 178}
 179
 180void msm_writel(u32 data, void __iomem *addr)
 181{
 182        if (reglog)
 183                printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
 184        writel(data, addr);
 185}
 186
 187u32 msm_readl(const void __iomem *addr)
 188{
 189        u32 val = readl(addr);
 190        if (reglog)
 191                pr_err("IO:R %p %08x\n", addr, val);
 192        return val;
 193}
 194
 195void msm_rmw(void __iomem *addr, u32 mask, u32 or)
 196{
 197        u32 val = msm_readl(addr);
 198
 199        val &= ~mask;
 200        msm_writel(val | or, addr);
 201}
 202
 203static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
 204{
 205        struct msm_hrtimer_work *work = container_of(t,
 206                        struct msm_hrtimer_work, timer);
 207
 208        kthread_queue_work(work->worker, &work->work);
 209
 210        return HRTIMER_NORESTART;
 211}
 212
 213void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
 214                            ktime_t wakeup_time,
 215                            enum hrtimer_mode mode)
 216{
 217        hrtimer_start(&work->timer, wakeup_time, mode);
 218}
 219
 220void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
 221                           struct kthread_worker *worker,
 222                           kthread_work_func_t fn,
 223                           clockid_t clock_id,
 224                           enum hrtimer_mode mode)
 225{
 226        hrtimer_init(&work->timer, clock_id, mode);
 227        work->timer.function = msm_hrtimer_worktimer;
 228        work->worker = worker;
 229        kthread_init_work(&work->work, fn);
 230}
 231
 232static irqreturn_t msm_irq(int irq, void *arg)
 233{
 234        struct drm_device *dev = arg;
 235        struct msm_drm_private *priv = dev->dev_private;
 236        struct msm_kms *kms = priv->kms;
 237
 238        BUG_ON(!kms);
 239
 240        return kms->funcs->irq(kms);
 241}
 242
 243static void msm_irq_preinstall(struct drm_device *dev)
 244{
 245        struct msm_drm_private *priv = dev->dev_private;
 246        struct msm_kms *kms = priv->kms;
 247
 248        BUG_ON(!kms);
 249
 250        kms->funcs->irq_preinstall(kms);
 251}
 252
 253static int msm_irq_postinstall(struct drm_device *dev)
 254{
 255        struct msm_drm_private *priv = dev->dev_private;
 256        struct msm_kms *kms = priv->kms;
 257
 258        BUG_ON(!kms);
 259
 260        if (kms->funcs->irq_postinstall)
 261                return kms->funcs->irq_postinstall(kms);
 262
 263        return 0;
 264}
 265
 266static int msm_irq_install(struct drm_device *dev, unsigned int irq)
 267{
 268        int ret;
 269
 270        if (irq == IRQ_NOTCONNECTED)
 271                return -ENOTCONN;
 272
 273        msm_irq_preinstall(dev);
 274
 275        ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
 276        if (ret)
 277                return ret;
 278
 279        ret = msm_irq_postinstall(dev);
 280        if (ret) {
 281                free_irq(irq, dev);
 282                return ret;
 283        }
 284
 285        return 0;
 286}
 287
 288static void msm_irq_uninstall(struct drm_device *dev)
 289{
 290        struct msm_drm_private *priv = dev->dev_private;
 291        struct msm_kms *kms = priv->kms;
 292
 293        kms->funcs->irq_uninstall(kms);
 294        free_irq(kms->irq, dev);
 295}
 296
 297struct msm_vblank_work {
 298        struct work_struct work;
 299        int crtc_id;
 300        bool enable;
 301        struct msm_drm_private *priv;
 302};
 303
 304static void vblank_ctrl_worker(struct work_struct *work)
 305{
 306        struct msm_vblank_work *vbl_work = container_of(work,
 307                                                struct msm_vblank_work, work);
 308        struct msm_drm_private *priv = vbl_work->priv;
 309        struct msm_kms *kms = priv->kms;
 310
 311        if (vbl_work->enable)
 312                kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
 313        else
 314                kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
 315
 316        kfree(vbl_work);
 317}
 318
 319static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 320                                        int crtc_id, bool enable)
 321{
 322        struct msm_vblank_work *vbl_work;
 323
 324        vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
 325        if (!vbl_work)
 326                return -ENOMEM;
 327
 328        INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
 329
 330        vbl_work->crtc_id = crtc_id;
 331        vbl_work->enable = enable;
 332        vbl_work->priv = priv;
 333
 334        queue_work(priv->wq, &vbl_work->work);
 335
 336        return 0;
 337}
 338
 339static int msm_drm_uninit(struct device *dev)
 340{
 341        struct platform_device *pdev = to_platform_device(dev);
 342        struct drm_device *ddev = platform_get_drvdata(pdev);
 343        struct msm_drm_private *priv = ddev->dev_private;
 344        struct msm_kms *kms = priv->kms;
 345        struct msm_mdss *mdss = priv->mdss;
 346        int i;
 347
 348        /*
 349         * Shutdown the hw if we're far enough along where things might be on.
 350         * If we run this too early, we'll end up panicking in any variety of
 351         * places. Since we don't register the drm device until late in
 352         * msm_drm_init, drm_dev->registered is used as an indicator that the
 353         * shutdown will be successful.
 354         */
 355        if (ddev->registered) {
 356                drm_dev_unregister(ddev);
 357                drm_atomic_helper_shutdown(ddev);
 358        }
 359
 360        /* We must cancel and cleanup any pending vblank enable/disable
 361         * work before msm_irq_uninstall() to avoid work re-enabling an
 362         * irq after uninstall has disabled it.
 363         */
 364
 365        flush_workqueue(priv->wq);
 366
 367        /* clean up event worker threads */
 368        for (i = 0; i < priv->num_crtcs; i++) {
 369                if (priv->event_thread[i].worker)
 370                        kthread_destroy_worker(priv->event_thread[i].worker);
 371        }
 372
 373        msm_gem_shrinker_cleanup(ddev);
 374
 375        drm_kms_helper_poll_fini(ddev);
 376
 377        msm_perf_debugfs_cleanup(priv);
 378        msm_rd_debugfs_cleanup(priv);
 379
 380#ifdef CONFIG_DRM_FBDEV_EMULATION
 381        if (fbdev && priv->fbdev)
 382                msm_fbdev_free(ddev);
 383#endif
 384
 385        msm_disp_snapshot_destroy(ddev);
 386
 387        drm_mode_config_cleanup(ddev);
 388
 389        pm_runtime_get_sync(dev);
 390        msm_irq_uninstall(ddev);
 391        pm_runtime_put_sync(dev);
 392
 393        if (kms && kms->funcs)
 394                kms->funcs->destroy(kms);
 395
 396        if (priv->vram.paddr) {
 397                unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
 398                drm_mm_takedown(&priv->vram.mm);
 399                dma_free_attrs(dev, priv->vram.size, NULL,
 400                               priv->vram.paddr, attrs);
 401        }
 402
 403        component_unbind_all(dev, ddev);
 404
 405        if (mdss && mdss->funcs)
 406                mdss->funcs->destroy(ddev);
 407
 408        ddev->dev_private = NULL;
 409        drm_dev_put(ddev);
 410
 411        destroy_workqueue(priv->wq);
 412        kfree(priv);
 413
 414        return 0;
 415}
 416
 417#define KMS_MDP4 4
 418#define KMS_MDP5 5
 419#define KMS_DPU  3
 420
 421static int get_mdp_ver(struct platform_device *pdev)
 422{
 423        struct device *dev = &pdev->dev;
 424
 425        return (int) (unsigned long) of_device_get_match_data(dev);
 426}
 427
 428#include <linux/of_address.h>
 429
 430bool msm_use_mmu(struct drm_device *dev)
 431{
 432        struct msm_drm_private *priv = dev->dev_private;
 433
 434        /* a2xx comes with its own MMU */
 435        return priv->is_a2xx || iommu_present(&platform_bus_type);
 436}
 437
 438static int msm_init_vram(struct drm_device *dev)
 439{
 440        struct msm_drm_private *priv = dev->dev_private;
 441        struct device_node *node;
 442        unsigned long size = 0;
 443        int ret = 0;
 444
 445        /* In the device-tree world, we could have a 'memory-region'
 446         * phandle, which gives us a link to our "vram".  Allocating
 447         * is all nicely abstracted behind the dma api, but we need
 448         * to know the entire size to allocate it all in one go. There
 449         * are two cases:
 450         *  1) device with no IOMMU, in which case we need exclusive
 451         *     access to a VRAM carveout big enough for all gpu
 452         *     buffers
 453         *  2) device with IOMMU, but where the bootloader puts up
 454         *     a splash screen.  In this case, the VRAM carveout
 455         *     need only be large enough for fbdev fb.  But we need
 456         *     exclusive access to the buffer to avoid the kernel
 457         *     using those pages for other purposes (which appears
 458         *     as corruption on screen before we have a chance to
 459         *     load and do initial modeset)
 460         */
 461
 462        node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
 463        if (node) {
 464                struct resource r;
 465                ret = of_address_to_resource(node, 0, &r);
 466                of_node_put(node);
 467                if (ret)
 468                        return ret;
 469                size = r.end - r.start;
 470                DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
 471
 472                /* if we have no IOMMU, then we need to use carveout allocator.
 473                 * Grab the entire CMA chunk carved out in early startup in
 474                 * mach-msm:
 475                 */
 476        } else if (!msm_use_mmu(dev)) {
 477                DRM_INFO("using %s VRAM carveout\n", vram);
 478                size = memparse(vram, NULL);
 479        }
 480
 481        if (size) {
 482                unsigned long attrs = 0;
 483                void *p;
 484
 485                priv->vram.size = size;
 486
 487                drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
 488                spin_lock_init(&priv->vram.lock);
 489
 490                attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 491                attrs |= DMA_ATTR_WRITE_COMBINE;
 492
 493                /* note that for no-kernel-mapping, the vaddr returned
 494                 * is bogus, but non-null if allocation succeeded:
 495                 */
 496                p = dma_alloc_attrs(dev->dev, size,
 497                                &priv->vram.paddr, GFP_KERNEL, attrs);
 498                if (!p) {
 499                        DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
 500                        priv->vram.paddr = 0;
 501                        return -ENOMEM;
 502                }
 503
 504                DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
 505                                (uint32_t)priv->vram.paddr,
 506                                (uint32_t)(priv->vram.paddr + size));
 507        }
 508
 509        return ret;
 510}
 511
 512static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
 513{
 514        struct platform_device *pdev = to_platform_device(dev);
 515        struct drm_device *ddev;
 516        struct msm_drm_private *priv;
 517        struct msm_kms *kms;
 518        struct msm_mdss *mdss;
 519        int ret, i;
 520
 521        ddev = drm_dev_alloc(drv, dev);
 522        if (IS_ERR(ddev)) {
 523                DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
 524                return PTR_ERR(ddev);
 525        }
 526
 527        platform_set_drvdata(pdev, ddev);
 528
 529        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 530        if (!priv) {
 531                ret = -ENOMEM;
 532                goto err_put_drm_dev;
 533        }
 534
 535        ddev->dev_private = priv;
 536        priv->dev = ddev;
 537
 538        switch (get_mdp_ver(pdev)) {
 539        case KMS_MDP5:
 540                ret = mdp5_mdss_init(ddev);
 541                break;
 542        case KMS_DPU:
 543                ret = dpu_mdss_init(ddev);
 544                break;
 545        default:
 546                ret = 0;
 547                break;
 548        }
 549        if (ret)
 550                goto err_free_priv;
 551
 552        mdss = priv->mdss;
 553
 554        priv->wq = alloc_ordered_workqueue("msm", 0);
 555        priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
 556
 557        INIT_LIST_HEAD(&priv->objects);
 558        mutex_init(&priv->obj_lock);
 559
 560        INIT_LIST_HEAD(&priv->inactive_willneed);
 561        INIT_LIST_HEAD(&priv->inactive_dontneed);
 562        INIT_LIST_HEAD(&priv->inactive_unpinned);
 563        mutex_init(&priv->mm_lock);
 564
 565        /* Teach lockdep about lock ordering wrt. shrinker: */
 566        fs_reclaim_acquire(GFP_KERNEL);
 567        might_lock(&priv->mm_lock);
 568        fs_reclaim_release(GFP_KERNEL);
 569
 570        drm_mode_config_init(ddev);
 571
 572        ret = msm_init_vram(ddev);
 573        if (ret)
 574                goto err_destroy_mdss;
 575
 576        /* Bind all our sub-components: */
 577        ret = component_bind_all(dev, ddev);
 578        if (ret)
 579                goto err_destroy_mdss;
 580
 581        dma_set_max_seg_size(dev, UINT_MAX);
 582
 583        msm_gem_shrinker_init(ddev);
 584
 585        switch (get_mdp_ver(pdev)) {
 586        case KMS_MDP4:
 587                kms = mdp4_kms_init(ddev);
 588                priv->kms = kms;
 589                break;
 590        case KMS_MDP5:
 591                kms = mdp5_kms_init(ddev);
 592                break;
 593        case KMS_DPU:
 594                kms = dpu_kms_init(ddev);
 595                priv->kms = kms;
 596                break;
 597        default:
 598                /* valid only for the dummy headless case, where of_node=NULL */
 599                WARN_ON(dev->of_node);
 600                kms = NULL;
 601                break;
 602        }
 603
 604        if (IS_ERR(kms)) {
 605                DRM_DEV_ERROR(dev, "failed to load kms\n");
 606                ret = PTR_ERR(kms);
 607                priv->kms = NULL;
 608                goto err_msm_uninit;
 609        }
 610
 611        /* Enable normalization of plane zpos */
 612        ddev->mode_config.normalize_zpos = true;
 613
 614        if (kms) {
 615                kms->dev = ddev;
 616                ret = kms->funcs->hw_init(kms);
 617                if (ret) {
 618                        DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
 619                        goto err_msm_uninit;
 620                }
 621        }
 622
 623        ddev->mode_config.funcs = &mode_config_funcs;
 624        ddev->mode_config.helper_private = &mode_config_helper_funcs;
 625
 626        for (i = 0; i < priv->num_crtcs; i++) {
 627                /* initialize event thread */
 628                priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
 629                priv->event_thread[i].dev = ddev;
 630                priv->event_thread[i].worker = kthread_create_worker(0,
 631                        "crtc_event:%d", priv->event_thread[i].crtc_id);
 632                if (IS_ERR(priv->event_thread[i].worker)) {
 633                        ret = PTR_ERR(priv->event_thread[i].worker);
 634                        DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
 635                        ret = PTR_ERR(priv->event_thread[i].worker);
 636                        goto err_msm_uninit;
 637                }
 638
 639                sched_set_fifo(priv->event_thread[i].worker->task);
 640        }
 641
 642        ret = drm_vblank_init(ddev, priv->num_crtcs);
 643        if (ret < 0) {
 644                DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
 645                goto err_msm_uninit;
 646        }
 647
 648        if (kms) {
 649                pm_runtime_get_sync(dev);
 650                ret = msm_irq_install(ddev, kms->irq);
 651                pm_runtime_put_sync(dev);
 652                if (ret < 0) {
 653                        DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
 654                        goto err_msm_uninit;
 655                }
 656        }
 657
 658        ret = drm_dev_register(ddev, 0);
 659        if (ret)
 660                goto err_msm_uninit;
 661
 662        if (kms) {
 663                ret = msm_disp_snapshot_init(ddev);
 664                if (ret)
 665                        DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
 666        }
 667        drm_mode_config_reset(ddev);
 668
 669#ifdef CONFIG_DRM_FBDEV_EMULATION
 670        if (kms && fbdev)
 671                priv->fbdev = msm_fbdev_init(ddev);
 672#endif
 673
 674        ret = msm_debugfs_late_init(ddev);
 675        if (ret)
 676                goto err_msm_uninit;
 677
 678        drm_kms_helper_poll_init(ddev);
 679
 680        return 0;
 681
 682err_msm_uninit:
 683        msm_drm_uninit(dev);
 684        return ret;
 685err_destroy_mdss:
 686        if (mdss && mdss->funcs)
 687                mdss->funcs->destroy(ddev);
 688err_free_priv:
 689        kfree(priv);
 690err_put_drm_dev:
 691        drm_dev_put(ddev);
 692        platform_set_drvdata(pdev, NULL);
 693        return ret;
 694}
 695
 696/*
 697 * DRM operations:
 698 */
 699
 700static void load_gpu(struct drm_device *dev)
 701{
 702        static DEFINE_MUTEX(init_lock);
 703        struct msm_drm_private *priv = dev->dev_private;
 704
 705        mutex_lock(&init_lock);
 706
 707        if (!priv->gpu)
 708                priv->gpu = adreno_load_gpu(dev);
 709
 710        mutex_unlock(&init_lock);
 711}
 712
 713static int context_init(struct drm_device *dev, struct drm_file *file)
 714{
 715        static atomic_t ident = ATOMIC_INIT(0);
 716        struct msm_drm_private *priv = dev->dev_private;
 717        struct msm_file_private *ctx;
 718
 719        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 720        if (!ctx)
 721                return -ENOMEM;
 722
 723        INIT_LIST_HEAD(&ctx->submitqueues);
 724        rwlock_init(&ctx->queuelock);
 725
 726        kref_init(&ctx->ref);
 727        msm_submitqueue_init(dev, ctx);
 728
 729        ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
 730        file->driver_priv = ctx;
 731
 732        ctx->seqno = atomic_inc_return(&ident);
 733
 734        return 0;
 735}
 736
 737static int msm_open(struct drm_device *dev, struct drm_file *file)
 738{
 739        /* For now, load gpu on open.. to avoid the requirement of having
 740         * firmware in the initrd.
 741         */
 742        load_gpu(dev);
 743
 744        return context_init(dev, file);
 745}
 746
 747static void context_close(struct msm_file_private *ctx)
 748{
 749        msm_submitqueue_close(ctx);
 750        msm_file_private_put(ctx);
 751}
 752
 753static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 754{
 755        struct msm_drm_private *priv = dev->dev_private;
 756        struct msm_file_private *ctx = file->driver_priv;
 757
 758        mutex_lock(&dev->struct_mutex);
 759        if (ctx == priv->lastctx)
 760                priv->lastctx = NULL;
 761        mutex_unlock(&dev->struct_mutex);
 762
 763        context_close(ctx);
 764}
 765
 766int msm_crtc_enable_vblank(struct drm_crtc *crtc)
 767{
 768        struct drm_device *dev = crtc->dev;
 769        unsigned int pipe = crtc->index;
 770        struct msm_drm_private *priv = dev->dev_private;
 771        struct msm_kms *kms = priv->kms;
 772        if (!kms)
 773                return -ENXIO;
 774        drm_dbg_vbl(dev, "crtc=%u", pipe);
 775        return vblank_ctrl_queue_work(priv, pipe, true);
 776}
 777
 778void msm_crtc_disable_vblank(struct drm_crtc *crtc)
 779{
 780        struct drm_device *dev = crtc->dev;
 781        unsigned int pipe = crtc->index;
 782        struct msm_drm_private *priv = dev->dev_private;
 783        struct msm_kms *kms = priv->kms;
 784        if (!kms)
 785                return;
 786        drm_dbg_vbl(dev, "crtc=%u", pipe);
 787        vblank_ctrl_queue_work(priv, pipe, false);
 788}
 789
 790/*
 791 * DRM ioctls:
 792 */
 793
 794static int msm_ioctl_get_param(struct drm_device *dev, void *data,
 795                struct drm_file *file)
 796{
 797        struct msm_drm_private *priv = dev->dev_private;
 798        struct drm_msm_param *args = data;
 799        struct msm_gpu *gpu;
 800
 801        /* for now, we just have 3d pipe.. eventually this would need to
 802         * be more clever to dispatch to appropriate gpu module:
 803         */
 804        if (args->pipe != MSM_PIPE_3D0)
 805                return -EINVAL;
 806
 807        gpu = priv->gpu;
 808
 809        if (!gpu)
 810                return -ENXIO;
 811
 812        return gpu->funcs->get_param(gpu, args->param, &args->value);
 813}
 814
 815static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
 816                struct drm_file *file)
 817{
 818        struct drm_msm_gem_new *args = data;
 819
 820        if (args->flags & ~MSM_BO_FLAGS) {
 821                DRM_ERROR("invalid flags: %08x\n", args->flags);
 822                return -EINVAL;
 823        }
 824
 825        return msm_gem_new_handle(dev, file, args->size,
 826                        args->flags, &args->handle, NULL);
 827}
 828
 829static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
 830{
 831        return ktime_set(timeout.tv_sec, timeout.tv_nsec);
 832}
 833
 834static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
 835                struct drm_file *file)
 836{
 837        struct drm_msm_gem_cpu_prep *args = data;
 838        struct drm_gem_object *obj;
 839        ktime_t timeout = to_ktime(args->timeout);
 840        int ret;
 841
 842        if (args->op & ~MSM_PREP_FLAGS) {
 843                DRM_ERROR("invalid op: %08x\n", args->op);
 844                return -EINVAL;
 845        }
 846
 847        obj = drm_gem_object_lookup(file, args->handle);
 848        if (!obj)
 849                return -ENOENT;
 850
 851        ret = msm_gem_cpu_prep(obj, args->op, &timeout);
 852
 853        drm_gem_object_put(obj);
 854
 855        return ret;
 856}
 857
 858static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
 859                struct drm_file *file)
 860{
 861        struct drm_msm_gem_cpu_fini *args = data;
 862        struct drm_gem_object *obj;
 863        int ret;
 864
 865        obj = drm_gem_object_lookup(file, args->handle);
 866        if (!obj)
 867                return -ENOENT;
 868
 869        ret = msm_gem_cpu_fini(obj);
 870
 871        drm_gem_object_put(obj);
 872
 873        return ret;
 874}
 875
 876static int msm_ioctl_gem_info_iova(struct drm_device *dev,
 877                struct drm_file *file, struct drm_gem_object *obj,
 878                uint64_t *iova)
 879{
 880        struct msm_drm_private *priv = dev->dev_private;
 881        struct msm_file_private *ctx = file->driver_priv;
 882
 883        if (!priv->gpu)
 884                return -EINVAL;
 885
 886        /*
 887         * Don't pin the memory here - just get an address so that userspace can
 888         * be productive
 889         */
 890        return msm_gem_get_iova(obj, ctx->aspace, iova);
 891}
 892
 893static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 894                struct drm_file *file)
 895{
 896        struct drm_msm_gem_info *args = data;
 897        struct drm_gem_object *obj;
 898        struct msm_gem_object *msm_obj;
 899        int i, ret = 0;
 900
 901        if (args->pad)
 902                return -EINVAL;
 903
 904        switch (args->info) {
 905        case MSM_INFO_GET_OFFSET:
 906        case MSM_INFO_GET_IOVA:
 907                /* value returned as immediate, not pointer, so len==0: */
 908                if (args->len)
 909                        return -EINVAL;
 910                break;
 911        case MSM_INFO_SET_NAME:
 912        case MSM_INFO_GET_NAME:
 913                break;
 914        default:
 915                return -EINVAL;
 916        }
 917
 918        obj = drm_gem_object_lookup(file, args->handle);
 919        if (!obj)
 920                return -ENOENT;
 921
 922        msm_obj = to_msm_bo(obj);
 923
 924        switch (args->info) {
 925        case MSM_INFO_GET_OFFSET:
 926                args->value = msm_gem_mmap_offset(obj);
 927                break;
 928        case MSM_INFO_GET_IOVA:
 929                ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
 930                break;
 931        case MSM_INFO_SET_NAME:
 932                /* length check should leave room for terminating null: */
 933                if (args->len >= sizeof(msm_obj->name)) {
 934                        ret = -EINVAL;
 935                        break;
 936                }
 937                if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
 938                                   args->len)) {
 939                        msm_obj->name[0] = '\0';
 940                        ret = -EFAULT;
 941                        break;
 942                }
 943                msm_obj->name[args->len] = '\0';
 944                for (i = 0; i < args->len; i++) {
 945                        if (!isprint(msm_obj->name[i])) {
 946                                msm_obj->name[i] = '\0';
 947                                break;
 948                        }
 949                }
 950                break;
 951        case MSM_INFO_GET_NAME:
 952                if (args->value && (args->len < strlen(msm_obj->name))) {
 953                        ret = -EINVAL;
 954                        break;
 955                }
 956                args->len = strlen(msm_obj->name);
 957                if (args->value) {
 958                        if (copy_to_user(u64_to_user_ptr(args->value),
 959                                         msm_obj->name, args->len))
 960                                ret = -EFAULT;
 961                }
 962                break;
 963        }
 964
 965        drm_gem_object_put(obj);
 966
 967        return ret;
 968}
 969
 970static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
 971                      ktime_t timeout)
 972{
 973        struct dma_fence *fence;
 974        int ret;
 975
 976        if (fence_id > queue->last_fence) {
 977                DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
 978                                      fence_id, queue->last_fence);
 979                return -EINVAL;
 980        }
 981
 982        /*
 983         * Map submitqueue scoped "seqno" (which is actually an idr key)
 984         * back to underlying dma-fence
 985         *
 986         * The fence is removed from the fence_idr when the submit is
 987         * retired, so if the fence is not found it means there is nothing
 988         * to wait for
 989         */
 990        ret = mutex_lock_interruptible(&queue->lock);
 991        if (ret)
 992                return ret;
 993        fence = idr_find(&queue->fence_idr, fence_id);
 994        if (fence)
 995                fence = dma_fence_get_rcu(fence);
 996        mutex_unlock(&queue->lock);
 997
 998        if (!fence)
 999                return 0;
1000
1001        ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
1002        if (ret == 0) {
1003                ret = -ETIMEDOUT;
1004        } else if (ret != -ERESTARTSYS) {
1005                ret = 0;
1006        }
1007
1008        dma_fence_put(fence);
1009
1010        return ret;
1011}
1012
1013static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
1014                struct drm_file *file)
1015{
1016        struct msm_drm_private *priv = dev->dev_private;
1017        struct drm_msm_wait_fence *args = data;
1018        struct msm_gpu_submitqueue *queue;
1019        int ret;
1020
1021        if (args->pad) {
1022                DRM_ERROR("invalid pad: %08x\n", args->pad);
1023                return -EINVAL;
1024        }
1025
1026        if (!priv->gpu)
1027                return 0;
1028
1029        queue = msm_submitqueue_get(file->driver_priv, args->queueid);
1030        if (!queue)
1031                return -ENOENT;
1032
1033        ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
1034
1035        msm_submitqueue_put(queue);
1036
1037        return ret;
1038}
1039
1040static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
1041                struct drm_file *file)
1042{
1043        struct drm_msm_gem_madvise *args = data;
1044        struct drm_gem_object *obj;
1045        int ret;
1046
1047        switch (args->madv) {
1048        case MSM_MADV_DONTNEED:
1049        case MSM_MADV_WILLNEED:
1050                break;
1051        default:
1052                return -EINVAL;
1053        }
1054
1055        obj = drm_gem_object_lookup(file, args->handle);
1056        if (!obj) {
1057                return -ENOENT;
1058        }
1059
1060        ret = msm_gem_madvise(obj, args->madv);
1061        if (ret >= 0) {
1062                args->retained = ret;
1063                ret = 0;
1064        }
1065
1066        drm_gem_object_put(obj);
1067
1068        return ret;
1069}
1070
1071
1072static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
1073                struct drm_file *file)
1074{
1075        struct drm_msm_submitqueue *args = data;
1076
1077        if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
1078                return -EINVAL;
1079
1080        return msm_submitqueue_create(dev, file->driver_priv, args->prio,
1081                args->flags, &args->id);
1082}
1083
1084static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
1085                struct drm_file *file)
1086{
1087        return msm_submitqueue_query(dev, file->driver_priv, data);
1088}
1089
1090static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
1091                struct drm_file *file)
1092{
1093        u32 id = *(u32 *) data;
1094
1095        return msm_submitqueue_remove(file->driver_priv, id);
1096}
1097
1098static const struct drm_ioctl_desc msm_ioctls[] = {
1099        DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_RENDER_ALLOW),
1100        DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_RENDER_ALLOW),
1101        DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_RENDER_ALLOW),
1102        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
1103        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1104        DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_RENDER_ALLOW),
1105        DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_RENDER_ALLOW),
1106        DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_RENDER_ALLOW),
1107        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_RENDER_ALLOW),
1108        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1109        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1110};
1111
1112DEFINE_DRM_GEM_FOPS(fops);
1113
1114static const struct drm_driver msm_driver = {
1115        .driver_features    = DRIVER_GEM |
1116                                DRIVER_RENDER |
1117                                DRIVER_ATOMIC |
1118                                DRIVER_MODESET |
1119                                DRIVER_SYNCOBJ,
1120        .open               = msm_open,
1121        .postclose           = msm_postclose,
1122        .lastclose          = drm_fb_helper_lastclose,
1123        .dumb_create        = msm_gem_dumb_create,
1124        .dumb_map_offset    = msm_gem_dumb_map_offset,
1125        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1126        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1127        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1128        .gem_prime_mmap     = drm_gem_prime_mmap,
1129#ifdef CONFIG_DEBUG_FS
1130        .debugfs_init       = msm_debugfs_init,
1131#endif
1132        .ioctls             = msm_ioctls,
1133        .num_ioctls         = ARRAY_SIZE(msm_ioctls),
1134        .fops               = &fops,
1135        .name               = "msm",
1136        .desc               = "MSM Snapdragon DRM",
1137        .date               = "20130625",
1138        .major              = MSM_VERSION_MAJOR,
1139        .minor              = MSM_VERSION_MINOR,
1140        .patchlevel         = MSM_VERSION_PATCHLEVEL,
1141};
1142
1143static int __maybe_unused msm_runtime_suspend(struct device *dev)
1144{
1145        struct drm_device *ddev = dev_get_drvdata(dev);
1146        struct msm_drm_private *priv = ddev->dev_private;
1147        struct msm_mdss *mdss = priv->mdss;
1148
1149        DBG("");
1150
1151        if (mdss && mdss->funcs)
1152                return mdss->funcs->disable(mdss);
1153
1154        return 0;
1155}
1156
1157static int __maybe_unused msm_runtime_resume(struct device *dev)
1158{
1159        struct drm_device *ddev = dev_get_drvdata(dev);
1160        struct msm_drm_private *priv = ddev->dev_private;
1161        struct msm_mdss *mdss = priv->mdss;
1162
1163        DBG("");
1164
1165        if (mdss && mdss->funcs)
1166                return mdss->funcs->enable(mdss);
1167
1168        return 0;
1169}
1170
1171static int __maybe_unused msm_pm_suspend(struct device *dev)
1172{
1173
1174        if (pm_runtime_suspended(dev))
1175                return 0;
1176
1177        return msm_runtime_suspend(dev);
1178}
1179
1180static int __maybe_unused msm_pm_resume(struct device *dev)
1181{
1182        if (pm_runtime_suspended(dev))
1183                return 0;
1184
1185        return msm_runtime_resume(dev);
1186}
1187
1188static int __maybe_unused msm_pm_prepare(struct device *dev)
1189{
1190        struct drm_device *ddev = dev_get_drvdata(dev);
1191        struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1192
1193        if (!priv || !priv->kms)
1194                return 0;
1195
1196        return drm_mode_config_helper_suspend(ddev);
1197}
1198
1199static void __maybe_unused msm_pm_complete(struct device *dev)
1200{
1201        struct drm_device *ddev = dev_get_drvdata(dev);
1202        struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1203
1204        if (!priv || !priv->kms)
1205                return;
1206
1207        drm_mode_config_helper_resume(ddev);
1208}
1209
1210static const struct dev_pm_ops msm_pm_ops = {
1211        SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
1212        SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1213        .prepare = msm_pm_prepare,
1214        .complete = msm_pm_complete,
1215};
1216
1217/*
1218 * Componentized driver support:
1219 */
1220
1221/*
1222 * NOTE: duplication of the same code as exynos or imx (or probably any other).
1223 * so probably some room for some helpers
1224 */
1225static int compare_of(struct device *dev, void *data)
1226{
1227        return dev->of_node == data;
1228}
1229
1230/*
1231 * Identify what components need to be added by parsing what remote-endpoints
1232 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1233 * is no external component that we need to add since LVDS is within MDP4
1234 * itself.
1235 */
1236static int add_components_mdp(struct device *mdp_dev,
1237                              struct component_match **matchptr)
1238{
1239        struct device_node *np = mdp_dev->of_node;
1240        struct device_node *ep_node;
1241        struct device *master_dev;
1242
1243        /*
1244         * on MDP4 based platforms, the MDP platform device is the component
1245         * master that adds other display interface components to itself.
1246         *
1247         * on MDP5 based platforms, the MDSS platform device is the component
1248         * master that adds MDP5 and other display interface components to
1249         * itself.
1250         */
1251        if (of_device_is_compatible(np, "qcom,mdp4"))
1252                master_dev = mdp_dev;
1253        else
1254                master_dev = mdp_dev->parent;
1255
1256        for_each_endpoint_of_node(np, ep_node) {
1257                struct device_node *intf;
1258                struct of_endpoint ep;
1259                int ret;
1260
1261                ret = of_graph_parse_endpoint(ep_node, &ep);
1262                if (ret) {
1263                        DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
1264                        of_node_put(ep_node);
1265                        return ret;
1266                }
1267
1268                /*
1269                 * The LCDC/LVDS port on MDP4 is a speacial case where the
1270                 * remote-endpoint isn't a component that we need to add
1271                 */
1272                if (of_device_is_compatible(np, "qcom,mdp4") &&
1273                    ep.port == 0)
1274                        continue;
1275
1276                /*
1277                 * It's okay if some of the ports don't have a remote endpoint
1278                 * specified. It just means that the port isn't connected to
1279                 * any external interface.
1280                 */
1281                intf = of_graph_get_remote_port_parent(ep_node);
1282                if (!intf)
1283                        continue;
1284
1285                if (of_device_is_available(intf))
1286                        drm_of_component_match_add(master_dev, matchptr,
1287                                                   compare_of, intf);
1288
1289                of_node_put(intf);
1290        }
1291
1292        return 0;
1293}
1294
1295static int compare_name_mdp(struct device *dev, void *data)
1296{
1297        return (strstr(dev_name(dev), "mdp") != NULL);
1298}
1299
1300static int add_display_components(struct platform_device *pdev,
1301                                  struct component_match **matchptr)
1302{
1303        struct device *mdp_dev;
1304        struct device *dev = &pdev->dev;
1305        int ret;
1306
1307        /*
1308         * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1309         * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1310         * Populate the children devices, find the MDP5/DPU node, and then add
1311         * the interfaces to our components list.
1312         */
1313        switch (get_mdp_ver(pdev)) {
1314        case KMS_MDP5:
1315        case KMS_DPU:
1316                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1317                if (ret) {
1318                        DRM_DEV_ERROR(dev, "failed to populate children devices\n");
1319                        return ret;
1320                }
1321
1322                mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1323                if (!mdp_dev) {
1324                        DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
1325                        of_platform_depopulate(dev);
1326                        return -ENODEV;
1327                }
1328
1329                put_device(mdp_dev);
1330
1331                /* add the MDP component itself */
1332                drm_of_component_match_add(dev, matchptr, compare_of,
1333                                           mdp_dev->of_node);
1334                break;
1335        case KMS_MDP4:
1336                /* MDP4 */
1337                mdp_dev = dev;
1338                break;
1339        }
1340
1341        ret = add_components_mdp(mdp_dev, matchptr);
1342        if (ret)
1343                of_platform_depopulate(dev);
1344
1345        return ret;
1346}
1347
1348/*
1349 * We don't know what's the best binding to link the gpu with the drm device.
1350 * Fow now, we just hunt for all the possible gpus that we support, and add them
1351 * as components.
1352 */
1353static const struct of_device_id msm_gpu_match[] = {
1354        { .compatible = "qcom,adreno" },
1355        { .compatible = "qcom,adreno-3xx" },
1356        { .compatible = "amd,imageon" },
1357        { .compatible = "qcom,kgsl-3d0" },
1358        { },
1359};
1360
1361static int add_gpu_components(struct device *dev,
1362                              struct component_match **matchptr)
1363{
1364        struct device_node *np;
1365
1366        np = of_find_matching_node(NULL, msm_gpu_match);
1367        if (!np)
1368                return 0;
1369
1370        if (of_device_is_available(np))
1371                drm_of_component_match_add(dev, matchptr, compare_of, np);
1372
1373        of_node_put(np);
1374
1375        return 0;
1376}
1377
1378static int msm_drm_bind(struct device *dev)
1379{
1380        return msm_drm_init(dev, &msm_driver);
1381}
1382
1383static void msm_drm_unbind(struct device *dev)
1384{
1385        msm_drm_uninit(dev);
1386}
1387
1388static const struct component_master_ops msm_drm_ops = {
1389        .bind = msm_drm_bind,
1390        .unbind = msm_drm_unbind,
1391};
1392
1393/*
1394 * Platform driver:
1395 */
1396
1397static int msm_pdev_probe(struct platform_device *pdev)
1398{
1399        struct component_match *match = NULL;
1400        int ret;
1401
1402        if (get_mdp_ver(pdev)) {
1403                ret = add_display_components(pdev, &match);
1404                if (ret)
1405                        return ret;
1406        }
1407
1408        ret = add_gpu_components(&pdev->dev, &match);
1409        if (ret)
1410                goto fail;
1411
1412        /* on all devices that I am aware of, iommu's which can map
1413         * any address the cpu can see are used:
1414         */
1415        ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1416        if (ret)
1417                goto fail;
1418
1419        ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1420        if (ret)
1421                goto fail;
1422
1423        return 0;
1424
1425fail:
1426        of_platform_depopulate(&pdev->dev);
1427        return ret;
1428}
1429
1430static int msm_pdev_remove(struct platform_device *pdev)
1431{
1432        component_master_del(&pdev->dev, &msm_drm_ops);
1433        of_platform_depopulate(&pdev->dev);
1434
1435        return 0;
1436}
1437
1438static void msm_pdev_shutdown(struct platform_device *pdev)
1439{
1440        struct drm_device *drm = platform_get_drvdata(pdev);
1441        struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
1442
1443        if (!priv || !priv->kms)
1444                return;
1445
1446        drm_atomic_helper_shutdown(drm);
1447}
1448
1449static const struct of_device_id dt_match[] = {
1450        { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1451        { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1452        { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1453        { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
1454        { .compatible = "qcom,sc7280-mdss", .data = (void *)KMS_DPU },
1455        { .compatible = "qcom,sm8150-mdss", .data = (void *)KMS_DPU },
1456        { .compatible = "qcom,sm8250-mdss", .data = (void *)KMS_DPU },
1457        {}
1458};
1459MODULE_DEVICE_TABLE(of, dt_match);
1460
1461static struct platform_driver msm_platform_driver = {
1462        .probe      = msm_pdev_probe,
1463        .remove     = msm_pdev_remove,
1464        .shutdown   = msm_pdev_shutdown,
1465        .driver     = {
1466                .name   = "msm",
1467                .of_match_table = dt_match,
1468                .pm     = &msm_pm_ops,
1469        },
1470};
1471
1472static int __init msm_drm_register(void)
1473{
1474        if (!modeset)
1475                return -EINVAL;
1476
1477        DBG("init");
1478        msm_mdp_register();
1479        msm_dpu_register();
1480        msm_dsi_register();
1481        msm_edp_register();
1482        msm_hdmi_register();
1483        msm_dp_register();
1484        adreno_register();
1485        return platform_driver_register(&msm_platform_driver);
1486}
1487
1488static void __exit msm_drm_unregister(void)
1489{
1490        DBG("fini");
1491        platform_driver_unregister(&msm_platform_driver);
1492        msm_dp_unregister();
1493        msm_hdmi_unregister();
1494        adreno_unregister();
1495        msm_edp_unregister();
1496        msm_dsi_unregister();
1497        msm_mdp_unregister();
1498        msm_dpu_unregister();
1499}
1500
1501module_init(msm_drm_register);
1502module_exit(msm_drm_unregister);
1503
1504MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1505MODULE_DESCRIPTION("MSM DRM Driver");
1506MODULE_LICENSE("GPL");
1507