linux/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2013 Red Hat
   5 * Author: Rob Clark <robdclark@gmail.com>
   6 */
   7
   8#define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
   9
  10#include <drm/drm_crtc.h>
  11#include <linux/debugfs.h>
  12#include <linux/of_irq.h>
  13#include <linux/dma-buf.h>
  14
  15#include "msm_drv.h"
  16#include "msm_mmu.h"
  17#include "msm_gem.h"
  18
  19#include "dpu_kms.h"
  20#include "dpu_core_irq.h"
  21#include "dpu_formats.h"
  22#include "dpu_hw_vbif.h"
  23#include "dpu_vbif.h"
  24#include "dpu_encoder.h"
  25#include "dpu_plane.h"
  26#include "dpu_crtc.h"
  27
  28#define CREATE_TRACE_POINTS
  29#include "dpu_trace.h"
  30
  31static const char * const iommu_ports[] = {
  32                "mdp_0",
  33};
  34
  35/*
  36 * To enable overall DRM driver logging
  37 * # echo 0x2 > /sys/module/drm/parameters/debug
  38 *
  39 * To enable DRM driver h/w logging
  40 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  41 *
  42 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
  43 */
  44#define DPU_DEBUGFS_DIR "msm_dpu"
  45#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
  46
  47static int dpu_kms_hw_init(struct msm_kms *kms);
  48static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
  49
  50static unsigned long dpu_iomap_size(struct platform_device *pdev,
  51                                    const char *name)
  52{
  53        struct resource *res;
  54
  55        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  56        if (!res) {
  57                DRM_ERROR("failed to get memory resource: %s\n", name);
  58                return 0;
  59        }
  60
  61        return resource_size(res);
  62}
  63
  64#ifdef CONFIG_DEBUG_FS
  65static int _dpu_danger_signal_status(struct seq_file *s,
  66                bool danger_status)
  67{
  68        struct dpu_kms *kms = (struct dpu_kms *)s->private;
  69        struct msm_drm_private *priv;
  70        struct dpu_danger_safe_status status;
  71        int i;
  72
  73        if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
  74                DPU_ERROR("invalid arg(s)\n");
  75                return 0;
  76        }
  77
  78        priv = kms->dev->dev_private;
  79        memset(&status, 0, sizeof(struct dpu_danger_safe_status));
  80
  81        pm_runtime_get_sync(&kms->pdev->dev);
  82        if (danger_status) {
  83                seq_puts(s, "\nDanger signal status:\n");
  84                if (kms->hw_mdp->ops.get_danger_status)
  85                        kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
  86                                        &status);
  87        } else {
  88                seq_puts(s, "\nSafe signal status:\n");
  89                if (kms->hw_mdp->ops.get_danger_status)
  90                        kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
  91                                        &status);
  92        }
  93        pm_runtime_put_sync(&kms->pdev->dev);
  94
  95        seq_printf(s, "MDP     :  0x%x\n", status.mdp);
  96
  97        for (i = SSPP_VIG0; i < SSPP_MAX; i++)
  98                seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
  99                                status.sspp[i]);
 100        seq_puts(s, "\n");
 101
 102        return 0;
 103}
 104
 105#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                           \
 106static int __prefix ## _open(struct inode *inode, struct file *file)    \
 107{                                                                       \
 108        return single_open(file, __prefix ## _show, inode->i_private);  \
 109}                                                                       \
 110static const struct file_operations __prefix ## _fops = {               \
 111        .owner = THIS_MODULE,                                           \
 112        .open = __prefix ## _open,                                      \
 113        .release = single_release,                                      \
 114        .read = seq_read,                                               \
 115        .llseek = seq_lseek,                                            \
 116}
 117
 118static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
 119{
 120        return _dpu_danger_signal_status(s, true);
 121}
 122DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
 123
 124static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
 125{
 126        return _dpu_danger_signal_status(s, false);
 127}
 128DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
 129
 130static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
 131                struct dentry *parent)
 132{
 133        struct dentry *entry = debugfs_create_dir("danger", parent);
 134
 135        debugfs_create_file("danger_status", 0600, entry,
 136                        dpu_kms, &dpu_debugfs_danger_stats_fops);
 137        debugfs_create_file("safe_status", 0600, entry,
 138                        dpu_kms, &dpu_debugfs_safe_stats_fops);
 139}
 140
 141static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
 142{
 143        struct dpu_debugfs_regset32 *regset = s->private;
 144        struct dpu_kms *dpu_kms = regset->dpu_kms;
 145        struct drm_device *dev;
 146        struct msm_drm_private *priv;
 147        void __iomem *base;
 148        uint32_t i, addr;
 149
 150        if (!dpu_kms->mmio)
 151                return 0;
 152
 153        dev = dpu_kms->dev;
 154        if (!dev)
 155                return 0;
 156
 157        priv = dev->dev_private;
 158        if (!priv)
 159                return 0;
 160
 161        base = dpu_kms->mmio + regset->offset;
 162
 163        /* insert padding spaces, if needed */
 164        if (regset->offset & 0xF) {
 165                seq_printf(s, "[%x]", regset->offset & ~0xF);
 166                for (i = 0; i < (regset->offset & 0xF); i += 4)
 167                        seq_puts(s, "         ");
 168        }
 169
 170        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 171
 172        /* main register output */
 173        for (i = 0; i < regset->blk_len; i += 4) {
 174                addr = regset->offset + i;
 175                if ((addr & 0xF) == 0x0)
 176                        seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
 177                seq_printf(s, " %08x", readl_relaxed(base + i));
 178        }
 179        seq_puts(s, "\n");
 180        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 181
 182        return 0;
 183}
 184
 185static int dpu_debugfs_open_regset32(struct inode *inode,
 186                struct file *file)
 187{
 188        return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
 189}
 190
 191static const struct file_operations dpu_fops_regset32 = {
 192        .open =         dpu_debugfs_open_regset32,
 193        .read =         seq_read,
 194        .llseek =       seq_lseek,
 195        .release =      single_release,
 196};
 197
 198void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
 199                uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
 200{
 201        if (regset) {
 202                regset->offset = offset;
 203                regset->blk_len = length;
 204                regset->dpu_kms = dpu_kms;
 205        }
 206}
 207
 208void dpu_debugfs_create_regset32(const char *name, umode_t mode,
 209                void *parent, struct dpu_debugfs_regset32 *regset)
 210{
 211        if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
 212                return;
 213
 214        /* make sure offset is a multiple of 4 */
 215        regset->offset = round_down(regset->offset, 4);
 216
 217        debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
 218}
 219
 220static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 221{
 222        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 223        void *p = dpu_hw_util_get_log_mask_ptr();
 224        struct dentry *entry;
 225
 226        if (!p)
 227                return -EINVAL;
 228
 229        entry = debugfs_create_dir("debug", minor->debugfs_root);
 230
 231        debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
 232
 233        dpu_debugfs_danger_init(dpu_kms, entry);
 234        dpu_debugfs_vbif_init(dpu_kms, entry);
 235        dpu_debugfs_core_irq_init(dpu_kms, entry);
 236
 237        return dpu_core_perf_debugfs_init(dpu_kms, entry);
 238}
 239#endif
 240
 241static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 242{
 243        return dpu_crtc_vblank(crtc, true);
 244}
 245
 246static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 247{
 248        dpu_crtc_vblank(crtc, false);
 249}
 250
 251static void dpu_kms_prepare_commit(struct msm_kms *kms,
 252                struct drm_atomic_state *state)
 253{
 254        struct dpu_kms *dpu_kms;
 255        struct msm_drm_private *priv;
 256        struct drm_device *dev;
 257        struct drm_crtc *crtc;
 258        struct drm_crtc_state *crtc_state;
 259        struct drm_encoder *encoder;
 260        int i;
 261
 262        if (!kms)
 263                return;
 264        dpu_kms = to_dpu_kms(kms);
 265        dev = dpu_kms->dev;
 266
 267        if (!dev || !dev->dev_private)
 268                return;
 269        priv = dev->dev_private;
 270        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 271
 272        /* Call prepare_commit for all affected encoders */
 273        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 274                drm_for_each_encoder_mask(encoder, crtc->dev,
 275                                          crtc_state->encoder_mask) {
 276                        dpu_encoder_prepare_commit(encoder);
 277                }
 278        }
 279}
 280
 281/*
 282 * Override the encoder enable since we need to setup the inline rotator and do
 283 * some crtc magic before enabling any bridge that might be present.
 284 */
 285void dpu_kms_encoder_enable(struct drm_encoder *encoder)
 286{
 287        const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
 288        struct drm_device *dev = encoder->dev;
 289        struct drm_crtc *crtc;
 290
 291        /* Forward this enable call to the commit hook */
 292        if (funcs && funcs->commit)
 293                funcs->commit(encoder);
 294
 295        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 296        drm_for_each_crtc(crtc, dev) {
 297                if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
 298                        continue;
 299
 300                trace_dpu_kms_enc_enable(DRMID(crtc));
 301                dpu_crtc_commit_kickoff(crtc, false);
 302        }
 303}
 304
 305static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 306{
 307        struct drm_crtc *crtc;
 308        struct drm_crtc_state *crtc_state;
 309        int i;
 310
 311        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 312                /* If modeset is required, kickoff is run in encoder_enable */
 313                if (drm_atomic_crtc_needs_modeset(crtc_state))
 314                        continue;
 315
 316                if (crtc->state->active) {
 317                        trace_dpu_kms_commit(DRMID(crtc));
 318                        dpu_crtc_commit_kickoff(crtc,
 319                                                state->legacy_cursor_update);
 320                }
 321        }
 322}
 323
 324static void dpu_kms_complete_commit(struct msm_kms *kms,
 325                struct drm_atomic_state *old_state)
 326{
 327        struct dpu_kms *dpu_kms;
 328        struct msm_drm_private *priv;
 329        struct drm_crtc *crtc;
 330        struct drm_crtc_state *old_crtc_state;
 331        int i;
 332
 333        if (!kms || !old_state)
 334                return;
 335        dpu_kms = to_dpu_kms(kms);
 336
 337        if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
 338                return;
 339        priv = dpu_kms->dev->dev_private;
 340
 341        DPU_ATRACE_BEGIN("kms_complete_commit");
 342
 343        for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
 344                dpu_crtc_complete_commit(crtc, old_crtc_state);
 345
 346        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 347
 348        DPU_ATRACE_END("kms_complete_commit");
 349}
 350
 351static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
 352                struct drm_crtc *crtc)
 353{
 354        struct drm_encoder *encoder;
 355        struct drm_device *dev;
 356        int ret;
 357
 358        if (!kms || !crtc || !crtc->state) {
 359                DPU_ERROR("invalid params\n");
 360                return;
 361        }
 362
 363        dev = crtc->dev;
 364
 365        if (!crtc->state->enable) {
 366                DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
 367                return;
 368        }
 369
 370        if (!crtc->state->active) {
 371                DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
 372                return;
 373        }
 374
 375        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 376                if (encoder->crtc != crtc)
 377                        continue;
 378                /*
 379                 * Wait for post-flush if necessary to delay before
 380                 * plane_cleanup. For example, wait for vsync in case of video
 381                 * mode panels. This may be a no-op for command mode panels.
 382                 */
 383                trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
 384                ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
 385                if (ret && ret != -EWOULDBLOCK) {
 386                        DPU_ERROR("wait for commit done returned %d\n", ret);
 387                        break;
 388                }
 389        }
 390}
 391
 392static int _dpu_kms_initialize_dsi(struct drm_device *dev,
 393                                    struct msm_drm_private *priv,
 394                                    struct dpu_kms *dpu_kms)
 395{
 396        struct drm_encoder *encoder = NULL;
 397        int i, rc = 0;
 398
 399        if (!(priv->dsi[0] || priv->dsi[1]))
 400                return rc;
 401
 402        /*TODO: Support two independent DSI connectors */
 403        encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
 404        if (IS_ERR(encoder)) {
 405                DPU_ERROR("encoder init failed for dsi display\n");
 406                return PTR_ERR(encoder);
 407        }
 408
 409        priv->encoders[priv->num_encoders++] = encoder;
 410
 411        for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
 412                if (!priv->dsi[i])
 413                        continue;
 414
 415                rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
 416                if (rc) {
 417                        DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
 418                                i, rc);
 419                        break;
 420                }
 421        }
 422
 423        return rc;
 424}
 425
 426/**
 427 * _dpu_kms_setup_displays - create encoders, bridges and connectors
 428 *                           for underlying displays
 429 * @dev:        Pointer to drm device structure
 430 * @priv:       Pointer to private drm device data
 431 * @dpu_kms:    Pointer to dpu kms structure
 432 * Returns:     Zero on success
 433 */
 434static int _dpu_kms_setup_displays(struct drm_device *dev,
 435                                    struct msm_drm_private *priv,
 436                                    struct dpu_kms *dpu_kms)
 437{
 438        /**
 439         * Extend this function to initialize other
 440         * types of displays
 441         */
 442
 443        return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
 444}
 445
 446static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
 447{
 448        struct msm_drm_private *priv;
 449        int i;
 450
 451        if (!dpu_kms) {
 452                DPU_ERROR("invalid dpu_kms\n");
 453                return;
 454        } else if (!dpu_kms->dev) {
 455                DPU_ERROR("invalid dev\n");
 456                return;
 457        } else if (!dpu_kms->dev->dev_private) {
 458                DPU_ERROR("invalid dev_private\n");
 459                return;
 460        }
 461        priv = dpu_kms->dev->dev_private;
 462
 463        for (i = 0; i < priv->num_crtcs; i++)
 464                priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
 465        priv->num_crtcs = 0;
 466
 467        for (i = 0; i < priv->num_planes; i++)
 468                priv->planes[i]->funcs->destroy(priv->planes[i]);
 469        priv->num_planes = 0;
 470
 471        for (i = 0; i < priv->num_connectors; i++)
 472                priv->connectors[i]->funcs->destroy(priv->connectors[i]);
 473        priv->num_connectors = 0;
 474
 475        for (i = 0; i < priv->num_encoders; i++)
 476                priv->encoders[i]->funcs->destroy(priv->encoders[i]);
 477        priv->num_encoders = 0;
 478}
 479
 480static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
 481{
 482        struct drm_device *dev;
 483        struct drm_plane *primary_planes[MAX_PLANES], *plane;
 484        struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
 485        struct drm_crtc *crtc;
 486
 487        struct msm_drm_private *priv;
 488        struct dpu_mdss_cfg *catalog;
 489
 490        int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
 491        int max_crtc_count;
 492
 493        if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
 494                DPU_ERROR("invalid dpu_kms\n");
 495                return -EINVAL;
 496        }
 497
 498        dev = dpu_kms->dev;
 499        priv = dev->dev_private;
 500        catalog = dpu_kms->catalog;
 501
 502        /*
 503         * Create encoder and query display drivers to create
 504         * bridges and connectors
 505         */
 506        ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
 507        if (ret)
 508                goto fail;
 509
 510        max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
 511
 512        /* Create the planes, keeping track of one primary/cursor per crtc */
 513        for (i = 0; i < catalog->sspp_count; i++) {
 514                enum drm_plane_type type;
 515
 516                if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
 517                        && cursor_planes_idx < max_crtc_count)
 518                        type = DRM_PLANE_TYPE_CURSOR;
 519                else if (primary_planes_idx < max_crtc_count)
 520                        type = DRM_PLANE_TYPE_PRIMARY;
 521                else
 522                        type = DRM_PLANE_TYPE_OVERLAY;
 523
 524                DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
 525                          type, catalog->sspp[i].features,
 526                          catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
 527
 528                plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
 529                                       (1UL << max_crtc_count) - 1, 0);
 530                if (IS_ERR(plane)) {
 531                        DPU_ERROR("dpu_plane_init failed\n");
 532                        ret = PTR_ERR(plane);
 533                        goto fail;
 534                }
 535                priv->planes[priv->num_planes++] = plane;
 536
 537                if (type == DRM_PLANE_TYPE_CURSOR)
 538                        cursor_planes[cursor_planes_idx++] = plane;
 539                else if (type == DRM_PLANE_TYPE_PRIMARY)
 540                        primary_planes[primary_planes_idx++] = plane;
 541        }
 542
 543        max_crtc_count = min(max_crtc_count, primary_planes_idx);
 544
 545        /* Create one CRTC per encoder */
 546        for (i = 0; i < max_crtc_count; i++) {
 547                crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
 548                if (IS_ERR(crtc)) {
 549                        ret = PTR_ERR(crtc);
 550                        goto fail;
 551                }
 552                priv->crtcs[priv->num_crtcs++] = crtc;
 553        }
 554
 555        /* All CRTCs are compatible with all encoders */
 556        for (i = 0; i < priv->num_encoders; i++)
 557                priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
 558
 559        return 0;
 560fail:
 561        _dpu_kms_drm_obj_destroy(dpu_kms);
 562        return ret;
 563}
 564
 565static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
 566                struct drm_encoder *encoder)
 567{
 568        return rate;
 569}
 570
 571static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
 572{
 573        struct drm_device *dev;
 574        int i;
 575
 576        dev = dpu_kms->dev;
 577        if (!dev)
 578                return;
 579
 580        if (dpu_kms->hw_intr)
 581                dpu_hw_intr_destroy(dpu_kms->hw_intr);
 582        dpu_kms->hw_intr = NULL;
 583
 584        /* safe to call these more than once during shutdown */
 585        _dpu_kms_mmu_destroy(dpu_kms);
 586
 587        if (dpu_kms->catalog) {
 588                for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
 589                        u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
 590
 591                        if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
 592                                dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
 593                }
 594        }
 595
 596        if (dpu_kms->rm_init)
 597                dpu_rm_destroy(&dpu_kms->rm);
 598        dpu_kms->rm_init = false;
 599
 600        if (dpu_kms->catalog)
 601                dpu_hw_catalog_deinit(dpu_kms->catalog);
 602        dpu_kms->catalog = NULL;
 603
 604        if (dpu_kms->vbif[VBIF_NRT])
 605                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
 606        dpu_kms->vbif[VBIF_NRT] = NULL;
 607
 608        if (dpu_kms->vbif[VBIF_RT])
 609                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
 610        dpu_kms->vbif[VBIF_RT] = NULL;
 611
 612        if (dpu_kms->hw_mdp)
 613                dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
 614        dpu_kms->hw_mdp = NULL;
 615
 616        if (dpu_kms->mmio)
 617                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
 618        dpu_kms->mmio = NULL;
 619}
 620
 621static void dpu_kms_destroy(struct msm_kms *kms)
 622{
 623        struct dpu_kms *dpu_kms;
 624
 625        if (!kms) {
 626                DPU_ERROR("invalid kms\n");
 627                return;
 628        }
 629
 630        dpu_kms = to_dpu_kms(kms);
 631
 632        _dpu_kms_hw_destroy(dpu_kms);
 633}
 634
 635static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
 636                                 struct drm_encoder *encoder,
 637                                 bool cmd_mode)
 638{
 639        struct msm_display_info info;
 640        struct msm_drm_private *priv = encoder->dev->dev_private;
 641        int i, rc = 0;
 642
 643        memset(&info, 0, sizeof(info));
 644
 645        info.intf_type = encoder->encoder_type;
 646        info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
 647                        MSM_DISPLAY_CAP_VID_MODE;
 648
 649        /* TODO: No support for DSI swap */
 650        for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
 651                if (priv->dsi[i]) {
 652                        info.h_tile_instance[info.num_of_h_tiles] = i;
 653                        info.num_of_h_tiles++;
 654                }
 655        }
 656
 657        rc = dpu_encoder_setup(encoder->dev, encoder, &info);
 658        if (rc)
 659                DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
 660                        encoder->base.id, rc);
 661}
 662
 663static irqreturn_t dpu_irq(struct msm_kms *kms)
 664{
 665        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 666
 667        return dpu_core_irq(dpu_kms);
 668}
 669
 670static void dpu_irq_preinstall(struct msm_kms *kms)
 671{
 672        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 673
 674        dpu_core_irq_preinstall(dpu_kms);
 675}
 676
 677static void dpu_irq_uninstall(struct msm_kms *kms)
 678{
 679        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 680
 681        dpu_core_irq_uninstall(dpu_kms);
 682}
 683
 684static const struct msm_kms_funcs kms_funcs = {
 685        .hw_init         = dpu_kms_hw_init,
 686        .irq_preinstall  = dpu_irq_preinstall,
 687        .irq_uninstall   = dpu_irq_uninstall,
 688        .irq             = dpu_irq,
 689        .prepare_commit  = dpu_kms_prepare_commit,
 690        .commit          = dpu_kms_commit,
 691        .complete_commit = dpu_kms_complete_commit,
 692        .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
 693        .enable_vblank   = dpu_kms_enable_vblank,
 694        .disable_vblank  = dpu_kms_disable_vblank,
 695        .check_modified_format = dpu_format_check_modified_format,
 696        .get_format      = dpu_get_msm_format,
 697        .round_pixclk    = dpu_kms_round_pixclk,
 698        .destroy         = dpu_kms_destroy,
 699        .set_encoder_mode = _dpu_kms_set_encoder_mode,
 700#ifdef CONFIG_DEBUG_FS
 701        .debugfs_init    = dpu_kms_debugfs_init,
 702#endif
 703};
 704
 705static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
 706{
 707        struct msm_mmu *mmu;
 708
 709        if (!dpu_kms->base.aspace)
 710                return;
 711
 712        mmu = dpu_kms->base.aspace->mmu;
 713
 714        mmu->funcs->detach(mmu, (const char **)iommu_ports,
 715                        ARRAY_SIZE(iommu_ports));
 716        msm_gem_address_space_put(dpu_kms->base.aspace);
 717
 718        dpu_kms->base.aspace = NULL;
 719}
 720
 721static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
 722{
 723        struct iommu_domain *domain;
 724        struct msm_gem_address_space *aspace;
 725        int ret;
 726
 727        domain = iommu_domain_alloc(&platform_bus_type);
 728        if (!domain)
 729                return 0;
 730
 731        domain->geometry.aperture_start = 0x1000;
 732        domain->geometry.aperture_end = 0xffffffff;
 733
 734        aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
 735                        domain, "dpu1");
 736        if (IS_ERR(aspace)) {
 737                iommu_domain_free(domain);
 738                return PTR_ERR(aspace);
 739        }
 740
 741        ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
 742                        ARRAY_SIZE(iommu_ports));
 743        if (ret) {
 744                DPU_ERROR("failed to attach iommu %d\n", ret);
 745                msm_gem_address_space_put(aspace);
 746                return ret;
 747        }
 748
 749        dpu_kms->base.aspace = aspace;
 750        return 0;
 751}
 752
 753static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
 754                char *clock_name)
 755{
 756        struct dss_module_power *mp = &dpu_kms->mp;
 757        int i;
 758
 759        for (i = 0; i < mp->num_clk; i++) {
 760                if (!strcmp(mp->clk_config[i].clk_name, clock_name))
 761                        return &mp->clk_config[i];
 762        }
 763
 764        return NULL;
 765}
 766
 767u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
 768{
 769        struct dss_clk *clk;
 770
 771        clk = _dpu_kms_get_clk(dpu_kms, clock_name);
 772        if (!clk)
 773                return -EINVAL;
 774
 775        return clk_get_rate(clk->clk);
 776}
 777
 778static int dpu_kms_hw_init(struct msm_kms *kms)
 779{
 780        struct dpu_kms *dpu_kms;
 781        struct drm_device *dev;
 782        struct msm_drm_private *priv;
 783        int i, rc = -EINVAL;
 784
 785        if (!kms) {
 786                DPU_ERROR("invalid kms\n");
 787                return rc;
 788        }
 789
 790        dpu_kms = to_dpu_kms(kms);
 791        dev = dpu_kms->dev;
 792        if (!dev) {
 793                DPU_ERROR("invalid device\n");
 794                return rc;
 795        }
 796
 797        priv = dev->dev_private;
 798        if (!priv) {
 799                DPU_ERROR("invalid private data\n");
 800                return rc;
 801        }
 802
 803        dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
 804        if (IS_ERR(dpu_kms->mmio)) {
 805                rc = PTR_ERR(dpu_kms->mmio);
 806                DPU_ERROR("mdp register memory map failed: %d\n", rc);
 807                dpu_kms->mmio = NULL;
 808                goto error;
 809        }
 810        DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
 811        dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
 812
 813        dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
 814        if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
 815                rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
 816                DPU_ERROR("vbif register memory map failed: %d\n", rc);
 817                dpu_kms->vbif[VBIF_RT] = NULL;
 818                goto error;
 819        }
 820        dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
 821        dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
 822        if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
 823                dpu_kms->vbif[VBIF_NRT] = NULL;
 824                DPU_DEBUG("VBIF NRT is not defined");
 825        } else {
 826                dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
 827                                                             "vbif_nrt");
 828        }
 829
 830        dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
 831        if (IS_ERR(dpu_kms->reg_dma)) {
 832                dpu_kms->reg_dma = NULL;
 833                DPU_DEBUG("REG_DMA is not defined");
 834        } else {
 835                dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
 836        }
 837
 838        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 839
 840        dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
 841
 842        pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
 843
 844        dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
 845        if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
 846                rc = PTR_ERR(dpu_kms->catalog);
 847                if (!dpu_kms->catalog)
 848                        rc = -EINVAL;
 849                DPU_ERROR("catalog init failed: %d\n", rc);
 850                dpu_kms->catalog = NULL;
 851                goto power_error;
 852        }
 853
 854        /*
 855         * Now we need to read the HW catalog and initialize resources such as
 856         * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
 857         */
 858        rc = _dpu_kms_mmu_init(dpu_kms);
 859        if (rc) {
 860                DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
 861                goto power_error;
 862        }
 863
 864        rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
 865        if (rc) {
 866                DPU_ERROR("rm init failed: %d\n", rc);
 867                goto power_error;
 868        }
 869
 870        dpu_kms->rm_init = true;
 871
 872        dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
 873                                             dpu_kms->catalog);
 874        if (IS_ERR(dpu_kms->hw_mdp)) {
 875                rc = PTR_ERR(dpu_kms->hw_mdp);
 876                DPU_ERROR("failed to get hw_mdp: %d\n", rc);
 877                dpu_kms->hw_mdp = NULL;
 878                goto power_error;
 879        }
 880
 881        for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
 882                u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
 883
 884                dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
 885                                dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
 886                if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
 887                        rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
 888                        if (!dpu_kms->hw_vbif[vbif_idx])
 889                                rc = -EINVAL;
 890                        DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
 891                        dpu_kms->hw_vbif[vbif_idx] = NULL;
 892                        goto power_error;
 893                }
 894        }
 895
 896        rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
 897                        _dpu_kms_get_clk(dpu_kms, "core"));
 898        if (rc) {
 899                DPU_ERROR("failed to init perf %d\n", rc);
 900                goto perf_err;
 901        }
 902
 903        dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
 904        if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
 905                rc = PTR_ERR(dpu_kms->hw_intr);
 906                DPU_ERROR("hw_intr init failed: %d\n", rc);
 907                dpu_kms->hw_intr = NULL;
 908                goto hw_intr_init_err;
 909        }
 910
 911        dev->mode_config.min_width = 0;
 912        dev->mode_config.min_height = 0;
 913
 914        /*
 915         * max crtc width is equal to the max mixer width * 2 and max height is
 916         * is 4K
 917         */
 918        dev->mode_config.max_width =
 919                        dpu_kms->catalog->caps->max_mixer_width * 2;
 920        dev->mode_config.max_height = 4096;
 921
 922        /*
 923         * Support format modifiers for compression etc.
 924         */
 925        dev->mode_config.allow_fb_modifiers = true;
 926
 927        /*
 928         * _dpu_kms_drm_obj_init should create the DRM related objects
 929         * i.e. CRTCs, planes, encoders, connectors and so forth
 930         */
 931        rc = _dpu_kms_drm_obj_init(dpu_kms);
 932        if (rc) {
 933                DPU_ERROR("modeset init failed: %d\n", rc);
 934                goto drm_obj_init_err;
 935        }
 936
 937        dpu_vbif_init_memtypes(dpu_kms);
 938
 939        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 940
 941        return 0;
 942
 943drm_obj_init_err:
 944        dpu_core_perf_destroy(&dpu_kms->perf);
 945hw_intr_init_err:
 946perf_err:
 947power_error:
 948        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 949error:
 950        _dpu_kms_hw_destroy(dpu_kms);
 951
 952        return rc;
 953}
 954
 955struct msm_kms *dpu_kms_init(struct drm_device *dev)
 956{
 957        struct msm_drm_private *priv;
 958        struct dpu_kms *dpu_kms;
 959        int irq;
 960
 961        if (!dev || !dev->dev_private) {
 962                DPU_ERROR("drm device node invalid\n");
 963                return ERR_PTR(-EINVAL);
 964        }
 965
 966        priv = dev->dev_private;
 967        dpu_kms = to_dpu_kms(priv->kms);
 968
 969        irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
 970        if (irq < 0) {
 971                DPU_ERROR("failed to get irq: %d\n", irq);
 972                return ERR_PTR(irq);
 973        }
 974        dpu_kms->base.irq = irq;
 975
 976        return &dpu_kms->base;
 977}
 978
 979static int dpu_bind(struct device *dev, struct device *master, void *data)
 980{
 981        struct drm_device *ddev = dev_get_drvdata(master);
 982        struct platform_device *pdev = to_platform_device(dev);
 983        struct msm_drm_private *priv = ddev->dev_private;
 984        struct dpu_kms *dpu_kms;
 985        struct dss_module_power *mp;
 986        int ret = 0;
 987
 988        dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
 989        if (!dpu_kms)
 990                return -ENOMEM;
 991
 992        mp = &dpu_kms->mp;
 993        ret = msm_dss_parse_clock(pdev, mp);
 994        if (ret) {
 995                DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
 996                return ret;
 997        }
 998
 999        platform_set_drvdata(pdev, dpu_kms);
1000
1001        msm_kms_init(&dpu_kms->base, &kms_funcs);
1002        dpu_kms->dev = ddev;
1003        dpu_kms->pdev = pdev;
1004
1005        pm_runtime_enable(&pdev->dev);
1006        dpu_kms->rpm_enabled = true;
1007
1008        priv->kms = &dpu_kms->base;
1009        return ret;
1010}
1011
1012static void dpu_unbind(struct device *dev, struct device *master, void *data)
1013{
1014        struct platform_device *pdev = to_platform_device(dev);
1015        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1016        struct dss_module_power *mp = &dpu_kms->mp;
1017
1018        msm_dss_put_clk(mp->clk_config, mp->num_clk);
1019        devm_kfree(&pdev->dev, mp->clk_config);
1020        mp->num_clk = 0;
1021
1022        if (dpu_kms->rpm_enabled)
1023                pm_runtime_disable(&pdev->dev);
1024}
1025
1026static const struct component_ops dpu_ops = {
1027        .bind   = dpu_bind,
1028        .unbind = dpu_unbind,
1029};
1030
1031static int dpu_dev_probe(struct platform_device *pdev)
1032{
1033        return component_add(&pdev->dev, &dpu_ops);
1034}
1035
1036static int dpu_dev_remove(struct platform_device *pdev)
1037{
1038        component_del(&pdev->dev, &dpu_ops);
1039        return 0;
1040}
1041
1042static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1043{
1044        int rc = -1;
1045        struct platform_device *pdev = to_platform_device(dev);
1046        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1047        struct drm_device *ddev;
1048        struct dss_module_power *mp = &dpu_kms->mp;
1049
1050        ddev = dpu_kms->dev;
1051        if (!ddev) {
1052                DPU_ERROR("invalid drm_device\n");
1053                return rc;
1054        }
1055
1056        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1057        if (rc)
1058                DPU_ERROR("clock disable failed rc:%d\n", rc);
1059
1060        return rc;
1061}
1062
1063static int __maybe_unused dpu_runtime_resume(struct device *dev)
1064{
1065        int rc = -1;
1066        struct platform_device *pdev = to_platform_device(dev);
1067        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1068        struct drm_encoder *encoder;
1069        struct drm_device *ddev;
1070        struct dss_module_power *mp = &dpu_kms->mp;
1071
1072        ddev = dpu_kms->dev;
1073        if (!ddev) {
1074                DPU_ERROR("invalid drm_device\n");
1075                return rc;
1076        }
1077
1078        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1079        if (rc) {
1080                DPU_ERROR("clock enable failed rc:%d\n", rc);
1081                return rc;
1082        }
1083
1084        dpu_vbif_init_memtypes(dpu_kms);
1085
1086        drm_for_each_encoder(encoder, ddev)
1087                dpu_encoder_virt_runtime_resume(encoder);
1088
1089        return rc;
1090}
1091
1092static const struct dev_pm_ops dpu_pm_ops = {
1093        SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1094};
1095
1096static const struct of_device_id dpu_dt_match[] = {
1097        { .compatible = "qcom,sdm845-dpu", },
1098        {}
1099};
1100MODULE_DEVICE_TABLE(of, dpu_dt_match);
1101
1102static struct platform_driver dpu_driver = {
1103        .probe = dpu_dev_probe,
1104        .remove = dpu_dev_remove,
1105        .driver = {
1106                .name = "msm_dpu",
1107                .of_match_table = dpu_dt_match,
1108                .pm = &dpu_pm_ops,
1109        },
1110};
1111
1112void __init msm_dpu_register(void)
1113{
1114        platform_driver_register(&dpu_driver);
1115}
1116
1117void __exit msm_dpu_unregister(void)
1118{
1119        platform_driver_unregister(&dpu_driver);
1120}
1121