linux/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published by
   8 * the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
  20
  21#include <drm/drm_crtc.h>
  22#include <linux/debugfs.h>
  23#include <linux/of_irq.h>
  24#include <linux/dma-buf.h>
  25
  26#include "msm_drv.h"
  27#include "msm_mmu.h"
  28#include "msm_gem.h"
  29
  30#include "dpu_kms.h"
  31#include "dpu_core_irq.h"
  32#include "dpu_formats.h"
  33#include "dpu_hw_vbif.h"
  34#include "dpu_vbif.h"
  35#include "dpu_encoder.h"
  36#include "dpu_plane.h"
  37#include "dpu_crtc.h"
  38
  39#define CREATE_TRACE_POINTS
  40#include "dpu_trace.h"
  41
  42static const char * const iommu_ports[] = {
  43                "mdp_0",
  44};
  45
  46/*
  47 * To enable overall DRM driver logging
  48 * # echo 0x2 > /sys/module/drm/parameters/debug
  49 *
  50 * To enable DRM driver h/w logging
  51 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  52 *
  53 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
  54 */
  55#define DPU_DEBUGFS_DIR "msm_dpu"
  56#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
  57
  58static int dpu_kms_hw_init(struct msm_kms *kms);
  59static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
  60
  61static unsigned long dpu_iomap_size(struct platform_device *pdev,
  62                                    const char *name)
  63{
  64        struct resource *res;
  65
  66        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  67        if (!res) {
  68                DRM_ERROR("failed to get memory resource: %s\n", name);
  69                return 0;
  70        }
  71
  72        return resource_size(res);
  73}
  74
  75#ifdef CONFIG_DEBUG_FS
  76static int _dpu_danger_signal_status(struct seq_file *s,
  77                bool danger_status)
  78{
  79        struct dpu_kms *kms = (struct dpu_kms *)s->private;
  80        struct msm_drm_private *priv;
  81        struct dpu_danger_safe_status status;
  82        int i;
  83
  84        if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
  85                DPU_ERROR("invalid arg(s)\n");
  86                return 0;
  87        }
  88
  89        priv = kms->dev->dev_private;
  90        memset(&status, 0, sizeof(struct dpu_danger_safe_status));
  91
  92        pm_runtime_get_sync(&kms->pdev->dev);
  93        if (danger_status) {
  94                seq_puts(s, "\nDanger signal status:\n");
  95                if (kms->hw_mdp->ops.get_danger_status)
  96                        kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
  97                                        &status);
  98        } else {
  99                seq_puts(s, "\nSafe signal status:\n");
 100                if (kms->hw_mdp->ops.get_danger_status)
 101                        kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
 102                                        &status);
 103        }
 104        pm_runtime_put_sync(&kms->pdev->dev);
 105
 106        seq_printf(s, "MDP     :  0x%x\n", status.mdp);
 107
 108        for (i = SSPP_VIG0; i < SSPP_MAX; i++)
 109                seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
 110                                status.sspp[i]);
 111        seq_puts(s, "\n");
 112
 113        return 0;
 114}
 115
 116#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                           \
 117static int __prefix ## _open(struct inode *inode, struct file *file)    \
 118{                                                                       \
 119        return single_open(file, __prefix ## _show, inode->i_private);  \
 120}                                                                       \
 121static const struct file_operations __prefix ## _fops = {               \
 122        .owner = THIS_MODULE,                                           \
 123        .open = __prefix ## _open,                                      \
 124        .release = single_release,                                      \
 125        .read = seq_read,                                               \
 126        .llseek = seq_lseek,                                            \
 127}
 128
 129static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
 130{
 131        return _dpu_danger_signal_status(s, true);
 132}
 133DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
 134
 135static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
 136{
 137        return _dpu_danger_signal_status(s, false);
 138}
 139DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
 140
 141static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
 142                struct dentry *parent)
 143{
 144        struct dentry *entry = debugfs_create_dir("danger", parent);
 145        if (IS_ERR_OR_NULL(entry))
 146                return;
 147
 148        debugfs_create_file("danger_status", 0600, entry,
 149                        dpu_kms, &dpu_debugfs_danger_stats_fops);
 150        debugfs_create_file("safe_status", 0600, entry,
 151                        dpu_kms, &dpu_debugfs_safe_stats_fops);
 152}
 153
 154static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
 155{
 156        struct dpu_debugfs_regset32 *regset = s->private;
 157        struct dpu_kms *dpu_kms = regset->dpu_kms;
 158        struct drm_device *dev;
 159        struct msm_drm_private *priv;
 160        void __iomem *base;
 161        uint32_t i, addr;
 162
 163        if (!dpu_kms->mmio)
 164                return 0;
 165
 166        dev = dpu_kms->dev;
 167        if (!dev)
 168                return 0;
 169
 170        priv = dev->dev_private;
 171        if (!priv)
 172                return 0;
 173
 174        base = dpu_kms->mmio + regset->offset;
 175
 176        /* insert padding spaces, if needed */
 177        if (regset->offset & 0xF) {
 178                seq_printf(s, "[%x]", regset->offset & ~0xF);
 179                for (i = 0; i < (regset->offset & 0xF); i += 4)
 180                        seq_puts(s, "         ");
 181        }
 182
 183        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 184
 185        /* main register output */
 186        for (i = 0; i < regset->blk_len; i += 4) {
 187                addr = regset->offset + i;
 188                if ((addr & 0xF) == 0x0)
 189                        seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
 190                seq_printf(s, " %08x", readl_relaxed(base + i));
 191        }
 192        seq_puts(s, "\n");
 193        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 194
 195        return 0;
 196}
 197
 198static int dpu_debugfs_open_regset32(struct inode *inode,
 199                struct file *file)
 200{
 201        return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
 202}
 203
 204static const struct file_operations dpu_fops_regset32 = {
 205        .open =         dpu_debugfs_open_regset32,
 206        .read =         seq_read,
 207        .llseek =       seq_lseek,
 208        .release =      single_release,
 209};
 210
 211void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
 212                uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
 213{
 214        if (regset) {
 215                regset->offset = offset;
 216                regset->blk_len = length;
 217                regset->dpu_kms = dpu_kms;
 218        }
 219}
 220
 221void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
 222                void *parent, struct dpu_debugfs_regset32 *regset)
 223{
 224        if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
 225                return NULL;
 226
 227        /* make sure offset is a multiple of 4 */
 228        regset->offset = round_down(regset->offset, 4);
 229
 230        return debugfs_create_file(name, mode, parent,
 231                        regset, &dpu_fops_regset32);
 232}
 233
 234static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
 235{
 236        void *p = dpu_hw_util_get_log_mask_ptr();
 237        struct dentry *entry;
 238
 239        if (!p)
 240                return -EINVAL;
 241
 242        entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
 243        if (IS_ERR_OR_NULL(entry))
 244                return -ENODEV;
 245
 246        /* allow root to be NULL */
 247        debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
 248
 249        dpu_debugfs_danger_init(dpu_kms, entry);
 250        dpu_debugfs_vbif_init(dpu_kms, entry);
 251        dpu_debugfs_core_irq_init(dpu_kms, entry);
 252
 253        return dpu_core_perf_debugfs_init(dpu_kms, entry);
 254}
 255#endif
 256
 257static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 258{
 259        return dpu_crtc_vblank(crtc, true);
 260}
 261
 262static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 263{
 264        dpu_crtc_vblank(crtc, false);
 265}
 266
 267static void dpu_kms_prepare_commit(struct msm_kms *kms,
 268                struct drm_atomic_state *state)
 269{
 270        struct dpu_kms *dpu_kms;
 271        struct msm_drm_private *priv;
 272        struct drm_device *dev;
 273        struct drm_crtc *crtc;
 274        struct drm_crtc_state *crtc_state;
 275        struct drm_encoder *encoder;
 276        int i;
 277
 278        if (!kms)
 279                return;
 280        dpu_kms = to_dpu_kms(kms);
 281        dev = dpu_kms->dev;
 282
 283        if (!dev || !dev->dev_private)
 284                return;
 285        priv = dev->dev_private;
 286        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 287
 288        /* Call prepare_commit for all affected encoders */
 289        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 290                drm_for_each_encoder_mask(encoder, crtc->dev,
 291                                          crtc_state->encoder_mask) {
 292                        dpu_encoder_prepare_commit(encoder);
 293                }
 294        }
 295}
 296
 297/*
 298 * Override the encoder enable since we need to setup the inline rotator and do
 299 * some crtc magic before enabling any bridge that might be present.
 300 */
 301void dpu_kms_encoder_enable(struct drm_encoder *encoder)
 302{
 303        const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
 304        struct drm_device *dev = encoder->dev;
 305        struct drm_crtc *crtc;
 306
 307        /* Forward this enable call to the commit hook */
 308        if (funcs && funcs->commit)
 309                funcs->commit(encoder);
 310
 311        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 312        drm_for_each_crtc(crtc, dev) {
 313                if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
 314                        continue;
 315
 316                trace_dpu_kms_enc_enable(DRMID(crtc));
 317                dpu_crtc_commit_kickoff(crtc, false);
 318        }
 319}
 320
 321static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 322{
 323        struct drm_crtc *crtc;
 324        struct drm_crtc_state *crtc_state;
 325        int i;
 326
 327        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 328                /* If modeset is required, kickoff is run in encoder_enable */
 329                if (drm_atomic_crtc_needs_modeset(crtc_state))
 330                        continue;
 331
 332                if (crtc->state->active) {
 333                        trace_dpu_kms_commit(DRMID(crtc));
 334                        dpu_crtc_commit_kickoff(crtc,
 335                                                state->legacy_cursor_update);
 336                }
 337        }
 338}
 339
 340static void dpu_kms_complete_commit(struct msm_kms *kms,
 341                struct drm_atomic_state *old_state)
 342{
 343        struct dpu_kms *dpu_kms;
 344        struct msm_drm_private *priv;
 345        struct drm_crtc *crtc;
 346        struct drm_crtc_state *old_crtc_state;
 347        int i;
 348
 349        if (!kms || !old_state)
 350                return;
 351        dpu_kms = to_dpu_kms(kms);
 352
 353        if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
 354                return;
 355        priv = dpu_kms->dev->dev_private;
 356
 357        DPU_ATRACE_BEGIN("kms_complete_commit");
 358
 359        for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
 360                dpu_crtc_complete_commit(crtc, old_crtc_state);
 361
 362        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 363
 364        DPU_ATRACE_END("kms_complete_commit");
 365}
 366
 367static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
 368                struct drm_crtc *crtc)
 369{
 370        struct drm_encoder *encoder;
 371        struct drm_device *dev;
 372        int ret;
 373
 374        if (!kms || !crtc || !crtc->state) {
 375                DPU_ERROR("invalid params\n");
 376                return;
 377        }
 378
 379        dev = crtc->dev;
 380
 381        if (!crtc->state->enable) {
 382                DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
 383                return;
 384        }
 385
 386        if (!crtc->state->active) {
 387                DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
 388                return;
 389        }
 390
 391        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 392                if (encoder->crtc != crtc)
 393                        continue;
 394                /*
 395                 * Wait for post-flush if necessary to delay before
 396                 * plane_cleanup. For example, wait for vsync in case of video
 397                 * mode panels. This may be a no-op for command mode panels.
 398                 */
 399                trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
 400                ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
 401                if (ret && ret != -EWOULDBLOCK) {
 402                        DPU_ERROR("wait for commit done returned %d\n", ret);
 403                        break;
 404                }
 405        }
 406}
 407
 408static void _dpu_kms_initialize_dsi(struct drm_device *dev,
 409                                    struct msm_drm_private *priv,
 410                                    struct dpu_kms *dpu_kms)
 411{
 412        struct drm_encoder *encoder = NULL;
 413        int i, rc;
 414
 415        /*TODO: Support two independent DSI connectors */
 416        encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
 417        if (IS_ERR_OR_NULL(encoder)) {
 418                DPU_ERROR("encoder init failed for dsi display\n");
 419                return;
 420        }
 421
 422        priv->encoders[priv->num_encoders++] = encoder;
 423
 424        for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
 425                if (!priv->dsi[i]) {
 426                        DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
 427                        return;
 428                }
 429
 430                rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
 431                if (rc) {
 432                        DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
 433                                i, rc);
 434                        continue;
 435                }
 436        }
 437}
 438
 439/**
 440 * _dpu_kms_setup_displays - create encoders, bridges and connectors
 441 *                           for underlying displays
 442 * @dev:        Pointer to drm device structure
 443 * @priv:       Pointer to private drm device data
 444 * @dpu_kms:    Pointer to dpu kms structure
 445 * Returns:     Zero on success
 446 */
 447static void _dpu_kms_setup_displays(struct drm_device *dev,
 448                                    struct msm_drm_private *priv,
 449                                    struct dpu_kms *dpu_kms)
 450{
 451        _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
 452
 453        /**
 454         * Extend this function to initialize other
 455         * types of displays
 456         */
 457}
 458
 459static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
 460{
 461        struct msm_drm_private *priv;
 462        int i;
 463
 464        if (!dpu_kms) {
 465                DPU_ERROR("invalid dpu_kms\n");
 466                return;
 467        } else if (!dpu_kms->dev) {
 468                DPU_ERROR("invalid dev\n");
 469                return;
 470        } else if (!dpu_kms->dev->dev_private) {
 471                DPU_ERROR("invalid dev_private\n");
 472                return;
 473        }
 474        priv = dpu_kms->dev->dev_private;
 475
 476        for (i = 0; i < priv->num_crtcs; i++)
 477                priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
 478        priv->num_crtcs = 0;
 479
 480        for (i = 0; i < priv->num_planes; i++)
 481                priv->planes[i]->funcs->destroy(priv->planes[i]);
 482        priv->num_planes = 0;
 483
 484        for (i = 0; i < priv->num_connectors; i++)
 485                priv->connectors[i]->funcs->destroy(priv->connectors[i]);
 486        priv->num_connectors = 0;
 487
 488        for (i = 0; i < priv->num_encoders; i++)
 489                priv->encoders[i]->funcs->destroy(priv->encoders[i]);
 490        priv->num_encoders = 0;
 491}
 492
 493static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
 494{
 495        struct drm_device *dev;
 496        struct drm_plane *primary_planes[MAX_PLANES], *plane;
 497        struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
 498        struct drm_crtc *crtc;
 499
 500        struct msm_drm_private *priv;
 501        struct dpu_mdss_cfg *catalog;
 502
 503        int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
 504        int max_crtc_count;
 505
 506        if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
 507                DPU_ERROR("invalid dpu_kms\n");
 508                return -EINVAL;
 509        }
 510
 511        dev = dpu_kms->dev;
 512        priv = dev->dev_private;
 513        catalog = dpu_kms->catalog;
 514
 515        /*
 516         * Create encoder and query display drivers to create
 517         * bridges and connectors
 518         */
 519        _dpu_kms_setup_displays(dev, priv, dpu_kms);
 520
 521        max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
 522
 523        /* Create the planes, keeping track of one primary/cursor per crtc */
 524        for (i = 0; i < catalog->sspp_count; i++) {
 525                enum drm_plane_type type;
 526
 527                if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
 528                        && cursor_planes_idx < max_crtc_count)
 529                        type = DRM_PLANE_TYPE_CURSOR;
 530                else if (primary_planes_idx < max_crtc_count)
 531                        type = DRM_PLANE_TYPE_PRIMARY;
 532                else
 533                        type = DRM_PLANE_TYPE_OVERLAY;
 534
 535                DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
 536                          type, catalog->sspp[i].features,
 537                          catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
 538
 539                plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
 540                                       (1UL << max_crtc_count) - 1, 0);
 541                if (IS_ERR(plane)) {
 542                        DPU_ERROR("dpu_plane_init failed\n");
 543                        ret = PTR_ERR(plane);
 544                        goto fail;
 545                }
 546                priv->planes[priv->num_planes++] = plane;
 547
 548                if (type == DRM_PLANE_TYPE_CURSOR)
 549                        cursor_planes[cursor_planes_idx++] = plane;
 550                else if (type == DRM_PLANE_TYPE_PRIMARY)
 551                        primary_planes[primary_planes_idx++] = plane;
 552        }
 553
 554        max_crtc_count = min(max_crtc_count, primary_planes_idx);
 555
 556        /* Create one CRTC per encoder */
 557        for (i = 0; i < max_crtc_count; i++) {
 558                crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
 559                if (IS_ERR(crtc)) {
 560                        ret = PTR_ERR(crtc);
 561                        goto fail;
 562                }
 563                priv->crtcs[priv->num_crtcs++] = crtc;
 564        }
 565
 566        /* All CRTCs are compatible with all encoders */
 567        for (i = 0; i < priv->num_encoders; i++)
 568                priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
 569
 570        return 0;
 571fail:
 572        _dpu_kms_drm_obj_destroy(dpu_kms);
 573        return ret;
 574}
 575
 576#ifdef CONFIG_DEBUG_FS
 577static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 578{
 579        return _dpu_debugfs_init(to_dpu_kms(kms));
 580}
 581#endif
 582
 583static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
 584                struct drm_encoder *encoder)
 585{
 586        return rate;
 587}
 588
 589static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
 590{
 591        struct drm_device *dev;
 592        int i;
 593
 594        dev = dpu_kms->dev;
 595        if (!dev)
 596                return;
 597
 598        if (dpu_kms->hw_intr)
 599                dpu_hw_intr_destroy(dpu_kms->hw_intr);
 600        dpu_kms->hw_intr = NULL;
 601
 602        /* safe to call these more than once during shutdown */
 603        _dpu_kms_mmu_destroy(dpu_kms);
 604
 605        if (dpu_kms->catalog) {
 606                for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
 607                        u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
 608
 609                        if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
 610                                dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
 611                }
 612        }
 613
 614        if (dpu_kms->rm_init)
 615                dpu_rm_destroy(&dpu_kms->rm);
 616        dpu_kms->rm_init = false;
 617
 618        if (dpu_kms->catalog)
 619                dpu_hw_catalog_deinit(dpu_kms->catalog);
 620        dpu_kms->catalog = NULL;
 621
 622        if (dpu_kms->vbif[VBIF_NRT])
 623                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
 624        dpu_kms->vbif[VBIF_NRT] = NULL;
 625
 626        if (dpu_kms->vbif[VBIF_RT])
 627                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
 628        dpu_kms->vbif[VBIF_RT] = NULL;
 629
 630        if (dpu_kms->mmio)
 631                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
 632        dpu_kms->mmio = NULL;
 633}
 634
 635static void dpu_kms_destroy(struct msm_kms *kms)
 636{
 637        struct dpu_kms *dpu_kms;
 638
 639        if (!kms) {
 640                DPU_ERROR("invalid kms\n");
 641                return;
 642        }
 643
 644        dpu_kms = to_dpu_kms(kms);
 645
 646        _dpu_kms_hw_destroy(dpu_kms);
 647}
 648
 649static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
 650                                 struct drm_encoder *encoder,
 651                                 bool cmd_mode)
 652{
 653        struct msm_display_info info;
 654        struct msm_drm_private *priv = encoder->dev->dev_private;
 655        int i, rc = 0;
 656
 657        memset(&info, 0, sizeof(info));
 658
 659        info.intf_type = encoder->encoder_type;
 660        info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
 661                        MSM_DISPLAY_CAP_VID_MODE;
 662
 663        /* TODO: No support for DSI swap */
 664        for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
 665                if (priv->dsi[i]) {
 666                        info.h_tile_instance[info.num_of_h_tiles] = i;
 667                        info.num_of_h_tiles++;
 668                }
 669        }
 670
 671        rc = dpu_encoder_setup(encoder->dev, encoder, &info);
 672        if (rc)
 673                DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
 674                        encoder->base.id, rc);
 675}
 676
 677static irqreturn_t dpu_irq(struct msm_kms *kms)
 678{
 679        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 680
 681        return dpu_core_irq(dpu_kms);
 682}
 683
 684static void dpu_irq_preinstall(struct msm_kms *kms)
 685{
 686        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 687
 688        dpu_core_irq_preinstall(dpu_kms);
 689}
 690
 691static void dpu_irq_uninstall(struct msm_kms *kms)
 692{
 693        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
 694
 695        dpu_core_irq_uninstall(dpu_kms);
 696}
 697
 698static const struct msm_kms_funcs kms_funcs = {
 699        .hw_init         = dpu_kms_hw_init,
 700        .irq_preinstall  = dpu_irq_preinstall,
 701        .irq_uninstall   = dpu_irq_uninstall,
 702        .irq             = dpu_irq,
 703        .prepare_commit  = dpu_kms_prepare_commit,
 704        .commit          = dpu_kms_commit,
 705        .complete_commit = dpu_kms_complete_commit,
 706        .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
 707        .enable_vblank   = dpu_kms_enable_vblank,
 708        .disable_vblank  = dpu_kms_disable_vblank,
 709        .check_modified_format = dpu_format_check_modified_format,
 710        .get_format      = dpu_get_msm_format,
 711        .round_pixclk    = dpu_kms_round_pixclk,
 712        .destroy         = dpu_kms_destroy,
 713        .set_encoder_mode = _dpu_kms_set_encoder_mode,
 714#ifdef CONFIG_DEBUG_FS
 715        .debugfs_init    = dpu_kms_debugfs_init,
 716#endif
 717};
 718
 719static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
 720{
 721        struct msm_mmu *mmu;
 722
 723        mmu = dpu_kms->base.aspace->mmu;
 724
 725        mmu->funcs->detach(mmu, (const char **)iommu_ports,
 726                        ARRAY_SIZE(iommu_ports));
 727        msm_gem_address_space_put(dpu_kms->base.aspace);
 728
 729        return 0;
 730}
 731
 732static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
 733{
 734        struct iommu_domain *domain;
 735        struct msm_gem_address_space *aspace;
 736        int ret;
 737
 738        domain = iommu_domain_alloc(&platform_bus_type);
 739        if (!domain)
 740                return 0;
 741
 742        domain->geometry.aperture_start = 0x1000;
 743        domain->geometry.aperture_end = 0xffffffff;
 744
 745        aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
 746                        domain, "dpu1");
 747        if (IS_ERR(aspace)) {
 748                ret = PTR_ERR(aspace);
 749                goto fail;
 750        }
 751
 752        dpu_kms->base.aspace = aspace;
 753
 754        ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
 755                        ARRAY_SIZE(iommu_ports));
 756        if (ret) {
 757                DPU_ERROR("failed to attach iommu %d\n", ret);
 758                msm_gem_address_space_put(aspace);
 759                goto fail;
 760        }
 761
 762        return 0;
 763fail:
 764        _dpu_kms_mmu_destroy(dpu_kms);
 765
 766        return ret;
 767}
 768
 769static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
 770                char *clock_name)
 771{
 772        struct dss_module_power *mp = &dpu_kms->mp;
 773        int i;
 774
 775        for (i = 0; i < mp->num_clk; i++) {
 776                if (!strcmp(mp->clk_config[i].clk_name, clock_name))
 777                        return &mp->clk_config[i];
 778        }
 779
 780        return NULL;
 781}
 782
 783u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
 784{
 785        struct dss_clk *clk;
 786
 787        clk = _dpu_kms_get_clk(dpu_kms, clock_name);
 788        if (!clk)
 789                return -EINVAL;
 790
 791        return clk_get_rate(clk->clk);
 792}
 793
 794static int dpu_kms_hw_init(struct msm_kms *kms)
 795{
 796        struct dpu_kms *dpu_kms;
 797        struct drm_device *dev;
 798        struct msm_drm_private *priv;
 799        int i, rc = -EINVAL;
 800
 801        if (!kms) {
 802                DPU_ERROR("invalid kms\n");
 803                return rc;
 804        }
 805
 806        dpu_kms = to_dpu_kms(kms);
 807        dev = dpu_kms->dev;
 808        if (!dev) {
 809                DPU_ERROR("invalid device\n");
 810                return rc;
 811        }
 812
 813        priv = dev->dev_private;
 814        if (!priv) {
 815                DPU_ERROR("invalid private data\n");
 816                return rc;
 817        }
 818
 819        dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
 820        if (IS_ERR(dpu_kms->mmio)) {
 821                rc = PTR_ERR(dpu_kms->mmio);
 822                DPU_ERROR("mdp register memory map failed: %d\n", rc);
 823                dpu_kms->mmio = NULL;
 824                goto error;
 825        }
 826        DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
 827        dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
 828
 829        dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
 830        if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
 831                rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
 832                DPU_ERROR("vbif register memory map failed: %d\n", rc);
 833                dpu_kms->vbif[VBIF_RT] = NULL;
 834                goto error;
 835        }
 836        dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
 837        dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
 838        if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
 839                dpu_kms->vbif[VBIF_NRT] = NULL;
 840                DPU_DEBUG("VBIF NRT is not defined");
 841        } else {
 842                dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
 843                                                             "vbif_nrt");
 844        }
 845
 846        dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
 847        if (IS_ERR(dpu_kms->reg_dma)) {
 848                dpu_kms->reg_dma = NULL;
 849                DPU_DEBUG("REG_DMA is not defined");
 850        } else {
 851                dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
 852        }
 853
 854        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 855
 856        dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
 857
 858        pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
 859
 860        dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
 861        if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
 862                rc = PTR_ERR(dpu_kms->catalog);
 863                if (!dpu_kms->catalog)
 864                        rc = -EINVAL;
 865                DPU_ERROR("catalog init failed: %d\n", rc);
 866                dpu_kms->catalog = NULL;
 867                goto power_error;
 868        }
 869
 870        /*
 871         * Now we need to read the HW catalog and initialize resources such as
 872         * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
 873         */
 874        rc = _dpu_kms_mmu_init(dpu_kms);
 875        if (rc) {
 876                DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
 877                goto power_error;
 878        }
 879
 880        rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
 881                        dpu_kms->dev);
 882        if (rc) {
 883                DPU_ERROR("rm init failed: %d\n", rc);
 884                goto power_error;
 885        }
 886
 887        dpu_kms->rm_init = true;
 888
 889        dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
 890        if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
 891                rc = PTR_ERR(dpu_kms->hw_mdp);
 892                if (!dpu_kms->hw_mdp)
 893                        rc = -EINVAL;
 894                DPU_ERROR("failed to get hw_mdp: %d\n", rc);
 895                dpu_kms->hw_mdp = NULL;
 896                goto power_error;
 897        }
 898
 899        for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
 900                u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
 901
 902                dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
 903                                dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
 904                if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
 905                        rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
 906                        if (!dpu_kms->hw_vbif[vbif_idx])
 907                                rc = -EINVAL;
 908                        DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
 909                        dpu_kms->hw_vbif[vbif_idx] = NULL;
 910                        goto power_error;
 911                }
 912        }
 913
 914        rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
 915                        _dpu_kms_get_clk(dpu_kms, "core"));
 916        if (rc) {
 917                DPU_ERROR("failed to init perf %d\n", rc);
 918                goto perf_err;
 919        }
 920
 921        dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
 922        if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
 923                rc = PTR_ERR(dpu_kms->hw_intr);
 924                DPU_ERROR("hw_intr init failed: %d\n", rc);
 925                dpu_kms->hw_intr = NULL;
 926                goto hw_intr_init_err;
 927        }
 928
 929        /*
 930         * _dpu_kms_drm_obj_init should create the DRM related objects
 931         * i.e. CRTCs, planes, encoders, connectors and so forth
 932         */
 933        rc = _dpu_kms_drm_obj_init(dpu_kms);
 934        if (rc) {
 935                DPU_ERROR("modeset init failed: %d\n", rc);
 936                goto drm_obj_init_err;
 937        }
 938
 939        dev->mode_config.min_width = 0;
 940        dev->mode_config.min_height = 0;
 941
 942        /*
 943         * max crtc width is equal to the max mixer width * 2 and max height is
 944         * is 4K
 945         */
 946        dev->mode_config.max_width =
 947                        dpu_kms->catalog->caps->max_mixer_width * 2;
 948        dev->mode_config.max_height = 4096;
 949
 950        /*
 951         * Support format modifiers for compression etc.
 952         */
 953        dev->mode_config.allow_fb_modifiers = true;
 954
 955        dpu_vbif_init_memtypes(dpu_kms);
 956
 957        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 958
 959        return 0;
 960
 961drm_obj_init_err:
 962        dpu_core_perf_destroy(&dpu_kms->perf);
 963hw_intr_init_err:
 964perf_err:
 965power_error:
 966        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 967error:
 968        _dpu_kms_hw_destroy(dpu_kms);
 969
 970        return rc;
 971}
 972
 973struct msm_kms *dpu_kms_init(struct drm_device *dev)
 974{
 975        struct msm_drm_private *priv;
 976        struct dpu_kms *dpu_kms;
 977        int irq;
 978
 979        if (!dev || !dev->dev_private) {
 980                DPU_ERROR("drm device node invalid\n");
 981                return ERR_PTR(-EINVAL);
 982        }
 983
 984        priv = dev->dev_private;
 985        dpu_kms = to_dpu_kms(priv->kms);
 986
 987        irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
 988        if (irq < 0) {
 989                DPU_ERROR("failed to get irq: %d\n", irq);
 990                return ERR_PTR(irq);
 991        }
 992        dpu_kms->base.irq = irq;
 993
 994        return &dpu_kms->base;
 995}
 996
 997static int dpu_bind(struct device *dev, struct device *master, void *data)
 998{
 999        struct drm_device *ddev = dev_get_drvdata(master);
1000        struct platform_device *pdev = to_platform_device(dev);
1001        struct msm_drm_private *priv = ddev->dev_private;
1002        struct dpu_kms *dpu_kms;
1003        struct dss_module_power *mp;
1004        int ret = 0;
1005
1006        dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1007        if (!dpu_kms)
1008                return -ENOMEM;
1009
1010        mp = &dpu_kms->mp;
1011        ret = msm_dss_parse_clock(pdev, mp);
1012        if (ret) {
1013                DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1014                return ret;
1015        }
1016
1017        platform_set_drvdata(pdev, dpu_kms);
1018
1019        msm_kms_init(&dpu_kms->base, &kms_funcs);
1020        dpu_kms->dev = ddev;
1021        dpu_kms->pdev = pdev;
1022
1023        pm_runtime_enable(&pdev->dev);
1024        dpu_kms->rpm_enabled = true;
1025
1026        priv->kms = &dpu_kms->base;
1027        return ret;
1028}
1029
1030static void dpu_unbind(struct device *dev, struct device *master, void *data)
1031{
1032        struct platform_device *pdev = to_platform_device(dev);
1033        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1034        struct dss_module_power *mp = &dpu_kms->mp;
1035
1036        msm_dss_put_clk(mp->clk_config, mp->num_clk);
1037        devm_kfree(&pdev->dev, mp->clk_config);
1038        mp->num_clk = 0;
1039
1040        if (dpu_kms->rpm_enabled)
1041                pm_runtime_disable(&pdev->dev);
1042}
1043
1044static const struct component_ops dpu_ops = {
1045        .bind   = dpu_bind,
1046        .unbind = dpu_unbind,
1047};
1048
1049static int dpu_dev_probe(struct platform_device *pdev)
1050{
1051        return component_add(&pdev->dev, &dpu_ops);
1052}
1053
1054static int dpu_dev_remove(struct platform_device *pdev)
1055{
1056        component_del(&pdev->dev, &dpu_ops);
1057        return 0;
1058}
1059
1060static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1061{
1062        int rc = -1;
1063        struct platform_device *pdev = to_platform_device(dev);
1064        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1065        struct drm_device *ddev;
1066        struct dss_module_power *mp = &dpu_kms->mp;
1067
1068        ddev = dpu_kms->dev;
1069        if (!ddev) {
1070                DPU_ERROR("invalid drm_device\n");
1071                return rc;
1072        }
1073
1074        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1075        if (rc)
1076                DPU_ERROR("clock disable failed rc:%d\n", rc);
1077
1078        return rc;
1079}
1080
1081static int __maybe_unused dpu_runtime_resume(struct device *dev)
1082{
1083        int rc = -1;
1084        struct platform_device *pdev = to_platform_device(dev);
1085        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1086        struct drm_encoder *encoder;
1087        struct drm_device *ddev;
1088        struct dss_module_power *mp = &dpu_kms->mp;
1089
1090        ddev = dpu_kms->dev;
1091        if (!ddev) {
1092                DPU_ERROR("invalid drm_device\n");
1093                return rc;
1094        }
1095
1096        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1097        if (rc) {
1098                DPU_ERROR("clock enable failed rc:%d\n", rc);
1099                return rc;
1100        }
1101
1102        dpu_vbif_init_memtypes(dpu_kms);
1103
1104        drm_for_each_encoder(encoder, ddev)
1105                dpu_encoder_virt_runtime_resume(encoder);
1106
1107        return rc;
1108}
1109
1110static const struct dev_pm_ops dpu_pm_ops = {
1111        SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1112};
1113
1114static const struct of_device_id dpu_dt_match[] = {
1115        { .compatible = "qcom,sdm845-dpu", },
1116        {}
1117};
1118MODULE_DEVICE_TABLE(of, dpu_dt_match);
1119
1120static struct platform_driver dpu_driver = {
1121        .probe = dpu_dev_probe,
1122        .remove = dpu_dev_remove,
1123        .driver = {
1124                .name = "msm_dpu",
1125                .of_match_table = dpu_dt_match,
1126                .pm = &dpu_pm_ops,
1127        },
1128};
1129
1130void __init msm_dpu_register(void)
1131{
1132        platform_driver_register(&dpu_driver);
1133}
1134
1135void __exit msm_dpu_unregister(void)
1136{
1137        platform_driver_unregister(&dpu_driver);
1138}
1139