linux/drivers/gpu/drm/msm/dsi/dsi_host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/err.h>
  10#include <linux/gpio/consumer.h>
  11#include <linux/interrupt.h>
  12#include <linux/mfd/syscon.h>
  13#include <linux/of_device.h>
  14#include <linux/of_graph.h>
  15#include <linux/of_irq.h>
  16#include <linux/pinctrl/consumer.h>
  17#include <linux/pm_opp.h>
  18#include <linux/regmap.h>
  19#include <linux/regulator/consumer.h>
  20#include <linux/spinlock.h>
  21
  22#include <video/mipi_display.h>
  23
  24#include "dsi.h"
  25#include "dsi.xml.h"
  26#include "sfpb.xml.h"
  27#include "dsi_cfg.h"
  28#include "msm_kms.h"
  29#include "msm_gem.h"
  30
  31#define DSI_RESET_TOGGLE_DELAY_MS 20
  32
  33static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  34{
  35        u32 ver;
  36
  37        if (!major || !minor)
  38                return -EINVAL;
  39
  40        /*
  41         * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  42         * makes all other registers 4-byte shifted down.
  43         *
  44         * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  45         * older, we read the DSI_VERSION register without any shift(offset
  46         * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  47         * the case of DSI6G, this has to be zero (the offset points to a
  48         * scratch register which we never touch)
  49         */
  50
  51        ver = msm_readl(base + REG_DSI_VERSION);
  52        if (ver) {
  53                /* older dsi host, there is no register shift */
  54                ver = FIELD(ver, DSI_VERSION_MAJOR);
  55                if (ver <= MSM_DSI_VER_MAJOR_V2) {
  56                        /* old versions */
  57                        *major = ver;
  58                        *minor = 0;
  59                        return 0;
  60                } else {
  61                        return -EINVAL;
  62                }
  63        } else {
  64                /*
  65                 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  66                 * registers are shifted down, read DSI_VERSION again with
  67                 * the shifted offset
  68                 */
  69                ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  70                ver = FIELD(ver, DSI_VERSION_MAJOR);
  71                if (ver == MSM_DSI_VER_MAJOR_6G) {
  72                        /* 6G version */
  73                        *major = ver;
  74                        *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  75                        return 0;
  76                } else {
  77                        return -EINVAL;
  78                }
  79        }
  80}
  81
  82#define DSI_ERR_STATE_ACK                       0x0000
  83#define DSI_ERR_STATE_TIMEOUT                   0x0001
  84#define DSI_ERR_STATE_DLN0_PHY                  0x0002
  85#define DSI_ERR_STATE_FIFO                      0x0004
  86#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW        0x0008
  87#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION  0x0010
  88#define DSI_ERR_STATE_PLL_UNLOCKED              0x0020
  89
  90#define DSI_CLK_CTRL_ENABLE_CLKS        \
  91                (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  92                DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  93                DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  94                DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  95
  96struct msm_dsi_host {
  97        struct mipi_dsi_host base;
  98
  99        struct platform_device *pdev;
 100        struct drm_device *dev;
 101
 102        int id;
 103
 104        void __iomem *ctrl_base;
 105        phys_addr_t ctrl_size;
 106        struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
 107
 108        struct clk *bus_clks[DSI_BUS_CLK_MAX];
 109
 110        struct clk *byte_clk;
 111        struct clk *esc_clk;
 112        struct clk *pixel_clk;
 113        struct clk *byte_clk_src;
 114        struct clk *pixel_clk_src;
 115        struct clk *byte_intf_clk;
 116
 117        u32 byte_clk_rate;
 118        u32 pixel_clk_rate;
 119        u32 esc_clk_rate;
 120
 121        /* DSI v2 specific clocks */
 122        struct clk *src_clk;
 123        struct clk *esc_clk_src;
 124        struct clk *dsi_clk_src;
 125
 126        u32 src_clk_rate;
 127
 128        struct gpio_desc *disp_en_gpio;
 129        struct gpio_desc *te_gpio;
 130
 131        const struct msm_dsi_cfg_handler *cfg_hnd;
 132
 133        struct completion dma_comp;
 134        struct completion video_comp;
 135        struct mutex dev_mutex;
 136        struct mutex cmd_mutex;
 137        spinlock_t intr_lock; /* Protect interrupt ctrl register */
 138
 139        u32 err_work_state;
 140        struct work_struct err_work;
 141        struct work_struct hpd_work;
 142        struct workqueue_struct *workqueue;
 143
 144        /* DSI 6G TX buffer*/
 145        struct drm_gem_object *tx_gem_obj;
 146
 147        /* DSI v2 TX buffer */
 148        void *tx_buf;
 149        dma_addr_t tx_buf_paddr;
 150
 151        int tx_size;
 152
 153        u8 *rx_buf;
 154
 155        struct regmap *sfpb;
 156
 157        struct drm_display_mode *mode;
 158
 159        /* connected device info */
 160        struct device_node *device_node;
 161        unsigned int channel;
 162        unsigned int lanes;
 163        enum mipi_dsi_pixel_format format;
 164        unsigned long mode_flags;
 165
 166        /* lane data parsed via DT */
 167        int dlane_swap;
 168        int num_data_lanes;
 169
 170        u32 dma_cmd_ctrl_restore;
 171
 172        bool registered;
 173        bool power_on;
 174        bool enabled;
 175        int irq;
 176};
 177
 178static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
 179{
 180        switch (fmt) {
 181        case MIPI_DSI_FMT_RGB565:               return 16;
 182        case MIPI_DSI_FMT_RGB666_PACKED:        return 18;
 183        case MIPI_DSI_FMT_RGB666:
 184        case MIPI_DSI_FMT_RGB888:
 185        default:                                return 24;
 186        }
 187}
 188
 189static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 190{
 191        return msm_readl(msm_host->ctrl_base + reg);
 192}
 193static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 194{
 195        msm_writel(data, msm_host->ctrl_base + reg);
 196}
 197
 198static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 199static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 200
 201static const struct msm_dsi_cfg_handler *dsi_get_config(
 202                                                struct msm_dsi_host *msm_host)
 203{
 204        const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 205        struct device *dev = &msm_host->pdev->dev;
 206        struct regulator *gdsc_reg;
 207        struct clk *ahb_clk;
 208        int ret;
 209        u32 major = 0, minor = 0;
 210
 211        gdsc_reg = regulator_get(dev, "gdsc");
 212        if (IS_ERR(gdsc_reg)) {
 213                pr_err("%s: cannot get gdsc\n", __func__);
 214                goto exit;
 215        }
 216
 217        ahb_clk = msm_clk_get(msm_host->pdev, "iface");
 218        if (IS_ERR(ahb_clk)) {
 219                pr_err("%s: cannot get interface clock\n", __func__);
 220                goto put_gdsc;
 221        }
 222
 223        pm_runtime_get_sync(dev);
 224
 225        ret = regulator_enable(gdsc_reg);
 226        if (ret) {
 227                pr_err("%s: unable to enable gdsc\n", __func__);
 228                goto put_gdsc;
 229        }
 230
 231        ret = clk_prepare_enable(ahb_clk);
 232        if (ret) {
 233                pr_err("%s: unable to enable ahb_clk\n", __func__);
 234                goto disable_gdsc;
 235        }
 236
 237        ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
 238        if (ret) {
 239                pr_err("%s: Invalid version\n", __func__);
 240                goto disable_clks;
 241        }
 242
 243        cfg_hnd = msm_dsi_cfg_get(major, minor);
 244
 245        DBG("%s: Version %x:%x\n", __func__, major, minor);
 246
 247disable_clks:
 248        clk_disable_unprepare(ahb_clk);
 249disable_gdsc:
 250        regulator_disable(gdsc_reg);
 251        pm_runtime_put_sync(dev);
 252put_gdsc:
 253        regulator_put(gdsc_reg);
 254exit:
 255        return cfg_hnd;
 256}
 257
 258static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
 259{
 260        return container_of(host, struct msm_dsi_host, base);
 261}
 262
 263static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 264{
 265        struct regulator_bulk_data *s = msm_host->supplies;
 266        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 267        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 268        int i;
 269
 270        DBG("");
 271        for (i = num - 1; i >= 0; i--)
 272                if (regs[i].disable_load >= 0)
 273                        regulator_set_load(s[i].consumer,
 274                                           regs[i].disable_load);
 275
 276        regulator_bulk_disable(num, s);
 277}
 278
 279static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 280{
 281        struct regulator_bulk_data *s = msm_host->supplies;
 282        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 283        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 284        int ret, i;
 285
 286        DBG("");
 287        for (i = 0; i < num; i++) {
 288                if (regs[i].enable_load >= 0) {
 289                        ret = regulator_set_load(s[i].consumer,
 290                                                 regs[i].enable_load);
 291                        if (ret < 0) {
 292                                pr_err("regulator %d set op mode failed, %d\n",
 293                                        i, ret);
 294                                goto fail;
 295                        }
 296                }
 297        }
 298
 299        ret = regulator_bulk_enable(num, s);
 300        if (ret < 0) {
 301                pr_err("regulator enable failed, %d\n", ret);
 302                goto fail;
 303        }
 304
 305        return 0;
 306
 307fail:
 308        for (i--; i >= 0; i--)
 309                regulator_set_load(s[i].consumer, regs[i].disable_load);
 310        return ret;
 311}
 312
 313static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 314{
 315        struct regulator_bulk_data *s = msm_host->supplies;
 316        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 317        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 318        int i, ret;
 319
 320        for (i = 0; i < num; i++)
 321                s[i].supply = regs[i].name;
 322
 323        ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
 324        if (ret < 0) {
 325                pr_err("%s: failed to init regulator, ret=%d\n",
 326                                                __func__, ret);
 327                return ret;
 328        }
 329
 330        return 0;
 331}
 332
 333int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
 334{
 335        struct platform_device *pdev = msm_host->pdev;
 336        int ret = 0;
 337
 338        msm_host->src_clk = msm_clk_get(pdev, "src");
 339
 340        if (IS_ERR(msm_host->src_clk)) {
 341                ret = PTR_ERR(msm_host->src_clk);
 342                pr_err("%s: can't find src clock. ret=%d\n",
 343                        __func__, ret);
 344                msm_host->src_clk = NULL;
 345                return ret;
 346        }
 347
 348        msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
 349        if (!msm_host->esc_clk_src) {
 350                ret = -ENODEV;
 351                pr_err("%s: can't get esc clock parent. ret=%d\n",
 352                        __func__, ret);
 353                return ret;
 354        }
 355
 356        msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
 357        if (!msm_host->dsi_clk_src) {
 358                ret = -ENODEV;
 359                pr_err("%s: can't get src clock parent. ret=%d\n",
 360                        __func__, ret);
 361        }
 362
 363        return ret;
 364}
 365
 366int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
 367{
 368        struct platform_device *pdev = msm_host->pdev;
 369        int ret = 0;
 370
 371        msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
 372        if (IS_ERR(msm_host->byte_intf_clk)) {
 373                ret = PTR_ERR(msm_host->byte_intf_clk);
 374                pr_err("%s: can't find byte_intf clock. ret=%d\n",
 375                        __func__, ret);
 376        }
 377
 378        return ret;
 379}
 380
 381static int dsi_clk_init(struct msm_dsi_host *msm_host)
 382{
 383        struct platform_device *pdev = msm_host->pdev;
 384        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 385        const struct msm_dsi_config *cfg = cfg_hnd->cfg;
 386        int i, ret = 0;
 387
 388        /* get bus clocks */
 389        for (i = 0; i < cfg->num_bus_clks; i++) {
 390                msm_host->bus_clks[i] = msm_clk_get(pdev,
 391                                                cfg->bus_clk_names[i]);
 392                if (IS_ERR(msm_host->bus_clks[i])) {
 393                        ret = PTR_ERR(msm_host->bus_clks[i]);
 394                        pr_err("%s: Unable to get %s clock, ret = %d\n",
 395                                __func__, cfg->bus_clk_names[i], ret);
 396                        goto exit;
 397                }
 398        }
 399
 400        /* get link and source clocks */
 401        msm_host->byte_clk = msm_clk_get(pdev, "byte");
 402        if (IS_ERR(msm_host->byte_clk)) {
 403                ret = PTR_ERR(msm_host->byte_clk);
 404                pr_err("%s: can't find dsi_byte clock. ret=%d\n",
 405                        __func__, ret);
 406                msm_host->byte_clk = NULL;
 407                goto exit;
 408        }
 409
 410        msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
 411        if (IS_ERR(msm_host->pixel_clk)) {
 412                ret = PTR_ERR(msm_host->pixel_clk);
 413                pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
 414                        __func__, ret);
 415                msm_host->pixel_clk = NULL;
 416                goto exit;
 417        }
 418
 419        msm_host->esc_clk = msm_clk_get(pdev, "core");
 420        if (IS_ERR(msm_host->esc_clk)) {
 421                ret = PTR_ERR(msm_host->esc_clk);
 422                pr_err("%s: can't find dsi_esc clock. ret=%d\n",
 423                        __func__, ret);
 424                msm_host->esc_clk = NULL;
 425                goto exit;
 426        }
 427
 428        msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
 429        if (IS_ERR(msm_host->byte_clk_src)) {
 430                ret = PTR_ERR(msm_host->byte_clk_src);
 431                pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
 432                goto exit;
 433        }
 434
 435        msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
 436        if (IS_ERR(msm_host->pixel_clk_src)) {
 437                ret = PTR_ERR(msm_host->pixel_clk_src);
 438                pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
 439                goto exit;
 440        }
 441
 442        if (cfg_hnd->ops->clk_init_ver)
 443                ret = cfg_hnd->ops->clk_init_ver(msm_host);
 444exit:
 445        return ret;
 446}
 447
 448static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
 449{
 450        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 451        int i, ret;
 452
 453        DBG("id=%d", msm_host->id);
 454
 455        for (i = 0; i < cfg->num_bus_clks; i++) {
 456                ret = clk_prepare_enable(msm_host->bus_clks[i]);
 457                if (ret) {
 458                        pr_err("%s: failed to enable bus clock %d ret %d\n",
 459                                __func__, i, ret);
 460                        goto err;
 461                }
 462        }
 463
 464        return 0;
 465err:
 466        for (; i > 0; i--)
 467                clk_disable_unprepare(msm_host->bus_clks[i]);
 468
 469        return ret;
 470}
 471
 472static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
 473{
 474        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 475        int i;
 476
 477        DBG("");
 478
 479        for (i = cfg->num_bus_clks - 1; i >= 0; i--)
 480                clk_disable_unprepare(msm_host->bus_clks[i]);
 481}
 482
 483int msm_dsi_runtime_suspend(struct device *dev)
 484{
 485        struct platform_device *pdev = to_platform_device(dev);
 486        struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
 487        struct mipi_dsi_host *host = msm_dsi->host;
 488        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 489
 490        if (!msm_host->cfg_hnd)
 491                return 0;
 492
 493        dsi_bus_clk_disable(msm_host);
 494
 495        return 0;
 496}
 497
 498int msm_dsi_runtime_resume(struct device *dev)
 499{
 500        struct platform_device *pdev = to_platform_device(dev);
 501        struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
 502        struct mipi_dsi_host *host = msm_dsi->host;
 503        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 504
 505        if (!msm_host->cfg_hnd)
 506                return 0;
 507
 508        return dsi_bus_clk_enable(msm_host);
 509}
 510
 511int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
 512{
 513        int ret;
 514
 515        DBG("Set clk rates: pclk=%d, byteclk=%d",
 516                msm_host->mode->clock, msm_host->byte_clk_rate);
 517
 518        ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
 519                                  msm_host->byte_clk_rate);
 520        if (ret) {
 521                pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret);
 522                return ret;
 523        }
 524
 525        ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 526        if (ret) {
 527                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 528                return ret;
 529        }
 530
 531        if (msm_host->byte_intf_clk) {
 532                ret = clk_set_rate(msm_host->byte_intf_clk,
 533                                   msm_host->byte_clk_rate / 2);
 534                if (ret) {
 535                        pr_err("%s: Failed to set rate byte intf clk, %d\n",
 536                               __func__, ret);
 537                        return ret;
 538                }
 539        }
 540
 541        return 0;
 542}
 543
 544
 545int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 546{
 547        int ret;
 548
 549        ret = clk_prepare_enable(msm_host->esc_clk);
 550        if (ret) {
 551                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 552                goto error;
 553        }
 554
 555        ret = clk_prepare_enable(msm_host->byte_clk);
 556        if (ret) {
 557                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 558                goto byte_clk_err;
 559        }
 560
 561        ret = clk_prepare_enable(msm_host->pixel_clk);
 562        if (ret) {
 563                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 564                goto pixel_clk_err;
 565        }
 566
 567        if (msm_host->byte_intf_clk) {
 568                ret = clk_prepare_enable(msm_host->byte_intf_clk);
 569                if (ret) {
 570                        pr_err("%s: Failed to enable byte intf clk\n",
 571                               __func__);
 572                        goto byte_intf_clk_err;
 573                }
 574        }
 575
 576        return 0;
 577
 578byte_intf_clk_err:
 579        clk_disable_unprepare(msm_host->pixel_clk);
 580pixel_clk_err:
 581        clk_disable_unprepare(msm_host->byte_clk);
 582byte_clk_err:
 583        clk_disable_unprepare(msm_host->esc_clk);
 584error:
 585        return ret;
 586}
 587
 588int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
 589{
 590        int ret;
 591
 592        DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
 593                msm_host->mode->clock, msm_host->byte_clk_rate,
 594                msm_host->esc_clk_rate, msm_host->src_clk_rate);
 595
 596        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 597        if (ret) {
 598                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 599                return ret;
 600        }
 601
 602        ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
 603        if (ret) {
 604                pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
 605                return ret;
 606        }
 607
 608        ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
 609        if (ret) {
 610                pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
 611                return ret;
 612        }
 613
 614        ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 615        if (ret) {
 616                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 617                return ret;
 618        }
 619
 620        return 0;
 621}
 622
 623int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
 624{
 625        int ret;
 626
 627        ret = clk_prepare_enable(msm_host->byte_clk);
 628        if (ret) {
 629                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 630                goto error;
 631        }
 632
 633        ret = clk_prepare_enable(msm_host->esc_clk);
 634        if (ret) {
 635                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 636                goto esc_clk_err;
 637        }
 638
 639        ret = clk_prepare_enable(msm_host->src_clk);
 640        if (ret) {
 641                pr_err("%s: Failed to enable dsi src clk\n", __func__);
 642                goto src_clk_err;
 643        }
 644
 645        ret = clk_prepare_enable(msm_host->pixel_clk);
 646        if (ret) {
 647                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 648                goto pixel_clk_err;
 649        }
 650
 651        return 0;
 652
 653pixel_clk_err:
 654        clk_disable_unprepare(msm_host->src_clk);
 655src_clk_err:
 656        clk_disable_unprepare(msm_host->esc_clk);
 657esc_clk_err:
 658        clk_disable_unprepare(msm_host->byte_clk);
 659error:
 660        return ret;
 661}
 662
 663void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
 664{
 665        /* Drop the performance state vote */
 666        dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
 667        clk_disable_unprepare(msm_host->esc_clk);
 668        clk_disable_unprepare(msm_host->pixel_clk);
 669        if (msm_host->byte_intf_clk)
 670                clk_disable_unprepare(msm_host->byte_intf_clk);
 671        clk_disable_unprepare(msm_host->byte_clk);
 672}
 673
 674void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
 675{
 676        clk_disable_unprepare(msm_host->pixel_clk);
 677        clk_disable_unprepare(msm_host->src_clk);
 678        clk_disable_unprepare(msm_host->esc_clk);
 679        clk_disable_unprepare(msm_host->byte_clk);
 680}
 681
 682static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 683{
 684        struct drm_display_mode *mode = msm_host->mode;
 685        u32 pclk_rate;
 686
 687        pclk_rate = mode->clock * 1000;
 688
 689        /*
 690         * For dual DSI mode, the current DRM mode has the complete width of the
 691         * panel. Since, the complete panel is driven by two DSI controllers,
 692         * the clock rates have to be split between the two dsi controllers.
 693         * Adjust the byte and pixel clock rates for each dsi host accordingly.
 694         */
 695        if (is_dual_dsi)
 696                pclk_rate /= 2;
 697
 698        return pclk_rate;
 699}
 700
 701static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 702{
 703        u8 lanes = msm_host->lanes;
 704        u32 bpp = dsi_get_bpp(msm_host->format);
 705        u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
 706        u64 pclk_bpp = (u64)pclk_rate * bpp;
 707
 708        if (lanes == 0) {
 709                pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
 710                lanes = 1;
 711        }
 712
 713        do_div(pclk_bpp, (8 * lanes));
 714
 715        msm_host->pixel_clk_rate = pclk_rate;
 716        msm_host->byte_clk_rate = pclk_bpp;
 717
 718        DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
 719                                msm_host->byte_clk_rate);
 720
 721}
 722
 723int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 724{
 725        if (!msm_host->mode) {
 726                pr_err("%s: mode not set\n", __func__);
 727                return -EINVAL;
 728        }
 729
 730        dsi_calc_pclk(msm_host, is_dual_dsi);
 731        msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
 732        return 0;
 733}
 734
 735int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 736{
 737        u32 bpp = dsi_get_bpp(msm_host->format);
 738        u64 pclk_bpp;
 739        unsigned int esc_mhz, esc_div;
 740        unsigned long byte_mhz;
 741
 742        dsi_calc_pclk(msm_host, is_dual_dsi);
 743
 744        pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
 745        do_div(pclk_bpp, 8);
 746        msm_host->src_clk_rate = pclk_bpp;
 747
 748        /*
 749         * esc clock is byte clock followed by a 4 bit divider,
 750         * we need to find an escape clock frequency within the
 751         * mipi DSI spec range within the maximum divider limit
 752         * We iterate here between an escape clock frequencey
 753         * between 20 Mhz to 5 Mhz and pick up the first one
 754         * that can be supported by our divider
 755         */
 756
 757        byte_mhz = msm_host->byte_clk_rate / 1000000;
 758
 759        for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
 760                esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
 761
 762                /*
 763                 * TODO: Ideally, we shouldn't know what sort of divider
 764                 * is available in mmss_cc, we're just assuming that
 765                 * it'll always be a 4 bit divider. Need to come up with
 766                 * a better way here.
 767                 */
 768                if (esc_div >= 1 && esc_div <= 16)
 769                        break;
 770        }
 771
 772        if (esc_mhz < 5)
 773                return -EINVAL;
 774
 775        msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
 776
 777        DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
 778                msm_host->src_clk_rate);
 779
 780        return 0;
 781}
 782
 783static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
 784{
 785        u32 intr;
 786        unsigned long flags;
 787
 788        spin_lock_irqsave(&msm_host->intr_lock, flags);
 789        intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
 790
 791        if (enable)
 792                intr |= mask;
 793        else
 794                intr &= ~mask;
 795
 796        DBG("intr=%x enable=%d", intr, enable);
 797
 798        dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
 799        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
 800}
 801
 802static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
 803{
 804        if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
 805                return BURST_MODE;
 806        else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
 807                return NON_BURST_SYNCH_PULSE;
 808
 809        return NON_BURST_SYNCH_EVENT;
 810}
 811
 812static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
 813                                const enum mipi_dsi_pixel_format mipi_fmt)
 814{
 815        switch (mipi_fmt) {
 816        case MIPI_DSI_FMT_RGB888:       return VID_DST_FORMAT_RGB888;
 817        case MIPI_DSI_FMT_RGB666:       return VID_DST_FORMAT_RGB666_LOOSE;
 818        case MIPI_DSI_FMT_RGB666_PACKED:        return VID_DST_FORMAT_RGB666;
 819        case MIPI_DSI_FMT_RGB565:       return VID_DST_FORMAT_RGB565;
 820        default:                        return VID_DST_FORMAT_RGB888;
 821        }
 822}
 823
 824static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
 825                                const enum mipi_dsi_pixel_format mipi_fmt)
 826{
 827        switch (mipi_fmt) {
 828        case MIPI_DSI_FMT_RGB888:       return CMD_DST_FORMAT_RGB888;
 829        case MIPI_DSI_FMT_RGB666_PACKED:
 830        case MIPI_DSI_FMT_RGB666:       return CMD_DST_FORMAT_RGB666;
 831        case MIPI_DSI_FMT_RGB565:       return CMD_DST_FORMAT_RGB565;
 832        default:                        return CMD_DST_FORMAT_RGB888;
 833        }
 834}
 835
 836static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 837                        struct msm_dsi_phy_shared_timings *phy_shared_timings)
 838{
 839        u32 flags = msm_host->mode_flags;
 840        enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
 841        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 842        u32 data = 0, lane_ctrl = 0;
 843
 844        if (!enable) {
 845                dsi_write(msm_host, REG_DSI_CTRL, 0);
 846                return;
 847        }
 848
 849        if (flags & MIPI_DSI_MODE_VIDEO) {
 850                if (flags & MIPI_DSI_MODE_VIDEO_HSE)
 851                        data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
 852                if (flags & MIPI_DSI_MODE_VIDEO_HFP)
 853                        data |= DSI_VID_CFG0_HFP_POWER_STOP;
 854                if (flags & MIPI_DSI_MODE_VIDEO_HBP)
 855                        data |= DSI_VID_CFG0_HBP_POWER_STOP;
 856                if (flags & MIPI_DSI_MODE_VIDEO_HSA)
 857                        data |= DSI_VID_CFG0_HSA_POWER_STOP;
 858                /* Always set low power stop mode for BLLP
 859                 * to let command engine send packets
 860                 */
 861                data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
 862                        DSI_VID_CFG0_BLLP_POWER_STOP;
 863                data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
 864                data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
 865                data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
 866                dsi_write(msm_host, REG_DSI_VID_CFG0, data);
 867
 868                /* Do not swap RGB colors */
 869                data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
 870                dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
 871        } else {
 872                /* Do not swap RGB colors */
 873                data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
 874                data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
 875                dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
 876
 877                data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
 878                        DSI_CMD_CFG1_WR_MEM_CONTINUE(
 879                                        MIPI_DCS_WRITE_MEMORY_CONTINUE);
 880                /* Always insert DCS command */
 881                data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
 882                dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
 883        }
 884
 885        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
 886                        DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
 887                        DSI_CMD_DMA_CTRL_LOW_POWER);
 888
 889        data = 0;
 890        /* Always assume dedicated TE pin */
 891        data |= DSI_TRIG_CTRL_TE;
 892        data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 893        data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 894        data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
 895        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 896                (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 897                data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 898        dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 899
 900        data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
 901                DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
 902        dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
 903
 904        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 905            (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
 906            phy_shared_timings->clk_pre_inc_by_2)
 907                dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
 908                          DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
 909
 910        data = 0;
 911        if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
 912                data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
 913        dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
 914
 915        /* allow only ack-err-status to generate interrupt */
 916        dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
 917
 918        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
 919
 920        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
 921
 922        data = DSI_CTRL_CLK_EN;
 923
 924        DBG("lane number=%d", msm_host->lanes);
 925        data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
 926
 927        dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
 928                  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
 929
 930        if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
 931                lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
 932                dsi_write(msm_host, REG_DSI_LANE_CTRL,
 933                        lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
 934        }
 935
 936        data |= DSI_CTRL_ENABLE;
 937
 938        dsi_write(msm_host, REG_DSI_CTRL, data);
 939}
 940
 941static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 942{
 943        struct drm_display_mode *mode = msm_host->mode;
 944        u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
 945        u32 h_total = mode->htotal;
 946        u32 v_total = mode->vtotal;
 947        u32 hs_end = mode->hsync_end - mode->hsync_start;
 948        u32 vs_end = mode->vsync_end - mode->vsync_start;
 949        u32 ha_start = h_total - mode->hsync_start;
 950        u32 ha_end = ha_start + mode->hdisplay;
 951        u32 va_start = v_total - mode->vsync_start;
 952        u32 va_end = va_start + mode->vdisplay;
 953        u32 hdisplay = mode->hdisplay;
 954        u32 wc;
 955
 956        DBG("");
 957
 958        /*
 959         * For dual DSI mode, the current DRM mode has
 960         * the complete width of the panel. Since, the complete
 961         * panel is driven by two DSI controllers, the horizontal
 962         * timings have to be split between the two dsi controllers.
 963         * Adjust the DSI host timing values accordingly.
 964         */
 965        if (is_dual_dsi) {
 966                h_total /= 2;
 967                hs_end /= 2;
 968                ha_start /= 2;
 969                ha_end /= 2;
 970                hdisplay /= 2;
 971        }
 972
 973        if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
 974                dsi_write(msm_host, REG_DSI_ACTIVE_H,
 975                        DSI_ACTIVE_H_START(ha_start) |
 976                        DSI_ACTIVE_H_END(ha_end));
 977                dsi_write(msm_host, REG_DSI_ACTIVE_V,
 978                        DSI_ACTIVE_V_START(va_start) |
 979                        DSI_ACTIVE_V_END(va_end));
 980                dsi_write(msm_host, REG_DSI_TOTAL,
 981                        DSI_TOTAL_H_TOTAL(h_total - 1) |
 982                        DSI_TOTAL_V_TOTAL(v_total - 1));
 983
 984                dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
 985                        DSI_ACTIVE_HSYNC_START(hs_start) |
 986                        DSI_ACTIVE_HSYNC_END(hs_end));
 987                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
 988                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
 989                        DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
 990                        DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
 991        } else {                /* command mode */
 992                /* image data and 1 byte write_memory_start cmd */
 993                wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
 994
 995                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
 996                        DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
 997                        DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(
 998                                        msm_host->channel) |
 999                        DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(
1000                                        MIPI_DSI_DCS_LONG_WRITE));
1001
1002                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL,
1003                        DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) |
1004                        DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay));
1005        }
1006}
1007
1008static void dsi_sw_reset(struct msm_dsi_host *msm_host)
1009{
1010        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1011        wmb(); /* clocks need to be enabled before reset */
1012
1013        dsi_write(msm_host, REG_DSI_RESET, 1);
1014        msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
1015        dsi_write(msm_host, REG_DSI_RESET, 0);
1016}
1017
1018static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
1019                                        bool video_mode, bool enable)
1020{
1021        u32 dsi_ctrl;
1022
1023        dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
1024
1025        if (!enable) {
1026                dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
1027                                DSI_CTRL_CMD_MODE_EN);
1028                dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
1029                                        DSI_IRQ_MASK_VIDEO_DONE, 0);
1030        } else {
1031                if (video_mode) {
1032                        dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
1033                } else {                /* command mode */
1034                        dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
1035                        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
1036                }
1037                dsi_ctrl |= DSI_CTRL_ENABLE;
1038        }
1039
1040        dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
1041}
1042
1043static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
1044{
1045        u32 data;
1046
1047        data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
1048
1049        if (mode == 0)
1050                data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
1051        else
1052                data |= DSI_CMD_DMA_CTRL_LOW_POWER;
1053
1054        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
1055}
1056
1057static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
1058{
1059        u32 ret = 0;
1060        struct device *dev = &msm_host->pdev->dev;
1061
1062        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
1063
1064        reinit_completion(&msm_host->video_comp);
1065
1066        ret = wait_for_completion_timeout(&msm_host->video_comp,
1067                        msecs_to_jiffies(70));
1068
1069        if (ret == 0)
1070                DRM_DEV_ERROR(dev, "wait for video done timed out\n");
1071
1072        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
1073}
1074
1075static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1076{
1077        if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1078                return;
1079
1080        if (msm_host->power_on && msm_host->enabled) {
1081                dsi_wait4video_done(msm_host);
1082                /* delay 4 ms to skip BLLP */
1083                usleep_range(2000, 4000);
1084        }
1085}
1086
1087int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
1088{
1089        struct drm_device *dev = msm_host->dev;
1090        struct msm_drm_private *priv = dev->dev_private;
1091        uint64_t iova;
1092        u8 *data;
1093
1094        data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
1095                                        priv->kms->aspace,
1096                                        &msm_host->tx_gem_obj, &iova);
1097
1098        if (IS_ERR(data)) {
1099                msm_host->tx_gem_obj = NULL;
1100                return PTR_ERR(data);
1101        }
1102
1103        msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
1104
1105        msm_host->tx_size = msm_host->tx_gem_obj->size;
1106
1107        return 0;
1108}
1109
1110int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
1111{
1112        struct drm_device *dev = msm_host->dev;
1113
1114        msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1115                                        &msm_host->tx_buf_paddr, GFP_KERNEL);
1116        if (!msm_host->tx_buf)
1117                return -ENOMEM;
1118
1119        msm_host->tx_size = size;
1120
1121        return 0;
1122}
1123
1124static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1125{
1126        struct drm_device *dev = msm_host->dev;
1127        struct msm_drm_private *priv;
1128
1129        /*
1130         * This is possible if we're tearing down before we've had a chance to
1131         * fully initialize. A very real possibility if our probe is deferred,
1132         * in which case we'll hit msm_dsi_host_destroy() without having run
1133         * through the dsi_tx_buf_alloc().
1134         */
1135        if (!dev)
1136                return;
1137
1138        priv = dev->dev_private;
1139        if (msm_host->tx_gem_obj) {
1140                msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
1141                drm_gem_object_put(msm_host->tx_gem_obj);
1142                msm_host->tx_gem_obj = NULL;
1143        }
1144
1145        if (msm_host->tx_buf)
1146                dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1147                        msm_host->tx_buf_paddr);
1148}
1149
1150void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
1151{
1152        return msm_gem_get_vaddr(msm_host->tx_gem_obj);
1153}
1154
1155void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
1156{
1157        return msm_host->tx_buf;
1158}
1159
1160void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
1161{
1162        msm_gem_put_vaddr(msm_host->tx_gem_obj);
1163}
1164
1165/*
1166 * prepare cmd buffer to be txed
1167 */
1168static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1169                           const struct mipi_dsi_msg *msg)
1170{
1171        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1172        struct mipi_dsi_packet packet;
1173        int len;
1174        int ret;
1175        u8 *data;
1176
1177        ret = mipi_dsi_create_packet(&packet, msg);
1178        if (ret) {
1179                pr_err("%s: create packet failed, %d\n", __func__, ret);
1180                return ret;
1181        }
1182        len = (packet.size + 3) & (~0x3);
1183
1184        if (len > msm_host->tx_size) {
1185                pr_err("%s: packet size is too big\n", __func__);
1186                return -EINVAL;
1187        }
1188
1189        data = cfg_hnd->ops->tx_buf_get(msm_host);
1190        if (IS_ERR(data)) {
1191                ret = PTR_ERR(data);
1192                pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1193                return ret;
1194        }
1195
1196        /* MSM specific command format in memory */
1197        data[0] = packet.header[1];
1198        data[1] = packet.header[2];
1199        data[2] = packet.header[0];
1200        data[3] = BIT(7); /* Last packet */
1201        if (mipi_dsi_packet_format_is_long(msg->type))
1202                data[3] |= BIT(6);
1203        if (msg->rx_buf && msg->rx_len)
1204                data[3] |= BIT(5);
1205
1206        /* Long packet */
1207        if (packet.payload && packet.payload_length)
1208                memcpy(data + 4, packet.payload, packet.payload_length);
1209
1210        /* Append 0xff to the end */
1211        if (packet.size < len)
1212                memset(data + packet.size, 0xff, len - packet.size);
1213
1214        if (cfg_hnd->ops->tx_buf_put)
1215                cfg_hnd->ops->tx_buf_put(msm_host);
1216
1217        return len;
1218}
1219
1220/*
1221 * dsi_short_read1_resp: 1 parameter
1222 */
1223static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1224{
1225        u8 *data = msg->rx_buf;
1226        if (data && (msg->rx_len >= 1)) {
1227                *data = buf[1]; /* strip out dcs type */
1228                return 1;
1229        } else {
1230                pr_err("%s: read data does not match with rx_buf len %zu\n",
1231                        __func__, msg->rx_len);
1232                return -EINVAL;
1233        }
1234}
1235
1236/*
1237 * dsi_short_read2_resp: 2 parameter
1238 */
1239static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1240{
1241        u8 *data = msg->rx_buf;
1242        if (data && (msg->rx_len >= 2)) {
1243                data[0] = buf[1]; /* strip out dcs type */
1244                data[1] = buf[2];
1245                return 2;
1246        } else {
1247                pr_err("%s: read data does not match with rx_buf len %zu\n",
1248                        __func__, msg->rx_len);
1249                return -EINVAL;
1250        }
1251}
1252
1253static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1254{
1255        /* strip out 4 byte dcs header */
1256        if (msg->rx_buf && msg->rx_len)
1257                memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1258
1259        return msg->rx_len;
1260}
1261
1262int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
1263{
1264        struct drm_device *dev = msm_host->dev;
1265        struct msm_drm_private *priv = dev->dev_private;
1266
1267        if (!dma_base)
1268                return -EINVAL;
1269
1270        return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
1271                                priv->kms->aspace, dma_base);
1272}
1273
1274int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
1275{
1276        if (!dma_base)
1277                return -EINVAL;
1278
1279        *dma_base = msm_host->tx_buf_paddr;
1280        return 0;
1281}
1282
1283static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1284{
1285        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1286        int ret;
1287        uint64_t dma_base;
1288        bool triggered;
1289
1290        ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
1291        if (ret) {
1292                pr_err("%s: failed to get iova: %d\n", __func__, ret);
1293                return ret;
1294        }
1295
1296        reinit_completion(&msm_host->dma_comp);
1297
1298        dsi_wait4video_eng_busy(msm_host);
1299
1300        triggered = msm_dsi_manager_cmd_xfer_trigger(
1301                                                msm_host->id, dma_base, len);
1302        if (triggered) {
1303                ret = wait_for_completion_timeout(&msm_host->dma_comp,
1304                                        msecs_to_jiffies(200));
1305                DBG("ret=%d", ret);
1306                if (ret == 0)
1307                        ret = -ETIMEDOUT;
1308                else
1309                        ret = len;
1310        } else
1311                ret = len;
1312
1313        return ret;
1314}
1315
1316static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1317                        u8 *buf, int rx_byte, int pkt_size)
1318{
1319        u32 *temp, data;
1320        int i, j = 0, cnt;
1321        u32 read_cnt;
1322        u8 reg[16];
1323        int repeated_bytes = 0;
1324        int buf_offset = buf - msm_host->rx_buf;
1325
1326        temp = (u32 *)reg;
1327        cnt = (rx_byte + 3) >> 2;
1328        if (cnt > 4)
1329                cnt = 4; /* 4 x 32 bits registers only */
1330
1331        if (rx_byte == 4)
1332                read_cnt = 4;
1333        else
1334                read_cnt = pkt_size + 6;
1335
1336        /*
1337         * In case of multiple reads from the panel, after the first read, there
1338         * is possibility that there are some bytes in the payload repeating in
1339         * the RDBK_DATA registers. Since we read all the parameters from the
1340         * panel right from the first byte for every pass. We need to skip the
1341         * repeating bytes and then append the new parameters to the rx buffer.
1342         */
1343        if (read_cnt > 16) {
1344                int bytes_shifted;
1345                /* Any data more than 16 bytes will be shifted out.
1346                 * The temp read buffer should already contain these bytes.
1347                 * The remaining bytes in read buffer are the repeated bytes.
1348                 */
1349                bytes_shifted = read_cnt - 16;
1350                repeated_bytes = buf_offset - bytes_shifted;
1351        }
1352
1353        for (i = cnt - 1; i >= 0; i--) {
1354                data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1355                *temp++ = ntohl(data); /* to host byte order */
1356                DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1357        }
1358
1359        for (i = repeated_bytes; i < 16; i++)
1360                buf[j++] = reg[i];
1361
1362        return j;
1363}
1364
1365static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1366                                const struct mipi_dsi_msg *msg)
1367{
1368        int len, ret;
1369        int bllp_len = msm_host->mode->hdisplay *
1370                        dsi_get_bpp(msm_host->format) / 8;
1371
1372        len = dsi_cmd_dma_add(msm_host, msg);
1373        if (!len) {
1374                pr_err("%s: failed to add cmd type = 0x%x\n",
1375                        __func__,  msg->type);
1376                return -EINVAL;
1377        }
1378
1379        /* for video mode, do not send cmds more than
1380        * one pixel line, since it only transmit it
1381        * during BLLP.
1382        */
1383        /* TODO: if the command is sent in LP mode, the bit rate is only
1384         * half of esc clk rate. In this case, if the video is already
1385         * actively streaming, we need to check more carefully if the
1386         * command can be fit into one BLLP.
1387         */
1388        if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1389                pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1390                        __func__, len);
1391                return -EINVAL;
1392        }
1393
1394        ret = dsi_cmd_dma_tx(msm_host, len);
1395        if (ret < len) {
1396                pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1397                        __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1398                return -ECOMM;
1399        }
1400
1401        return len;
1402}
1403
1404static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1405{
1406        u32 data0, data1;
1407
1408        data0 = dsi_read(msm_host, REG_DSI_CTRL);
1409        data1 = data0;
1410        data1 &= ~DSI_CTRL_ENABLE;
1411        dsi_write(msm_host, REG_DSI_CTRL, data1);
1412        /*
1413         * dsi controller need to be disabled before
1414         * clocks turned on
1415         */
1416        wmb();
1417
1418        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1419        wmb();  /* make sure clocks enabled */
1420
1421        /* dsi controller can only be reset while clocks are running */
1422        dsi_write(msm_host, REG_DSI_RESET, 1);
1423        msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
1424        dsi_write(msm_host, REG_DSI_RESET, 0);
1425        wmb();  /* controller out of reset */
1426        dsi_write(msm_host, REG_DSI_CTRL, data0);
1427        wmb();  /* make sure dsi controller enabled again */
1428}
1429
1430static void dsi_hpd_worker(struct work_struct *work)
1431{
1432        struct msm_dsi_host *msm_host =
1433                container_of(work, struct msm_dsi_host, hpd_work);
1434
1435        drm_helper_hpd_irq_event(msm_host->dev);
1436}
1437
1438static void dsi_err_worker(struct work_struct *work)
1439{
1440        struct msm_dsi_host *msm_host =
1441                container_of(work, struct msm_dsi_host, err_work);
1442        u32 status = msm_host->err_work_state;
1443
1444        pr_err_ratelimited("%s: status=%x\n", __func__, status);
1445        if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1446                dsi_sw_reset_restore(msm_host);
1447
1448        /* It is safe to clear here because error irq is disabled. */
1449        msm_host->err_work_state = 0;
1450
1451        /* enable dsi error interrupt */
1452        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1453}
1454
1455static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1456{
1457        u32 status;
1458
1459        status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1460
1461        if (status) {
1462                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1463                /* Writing of an extra 0 needed to clear error bits */
1464                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1465                msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1466        }
1467}
1468
1469static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1470{
1471        u32 status;
1472
1473        status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1474
1475        if (status) {
1476                dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1477                msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1478        }
1479}
1480
1481static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1482{
1483        u32 status;
1484
1485        status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1486
1487        if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1488                        DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1489                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1490                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1491                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1492                dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1493                msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1494        }
1495}
1496
1497static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1498{
1499        u32 status;
1500
1501        status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1502
1503        /* fifo underflow, overflow */
1504        if (status) {
1505                dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1506                msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1507                if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1508                        msm_host->err_work_state |=
1509                                        DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1510        }
1511}
1512
1513static void dsi_status(struct msm_dsi_host *msm_host)
1514{
1515        u32 status;
1516
1517        status = dsi_read(msm_host, REG_DSI_STATUS0);
1518
1519        if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1520                dsi_write(msm_host, REG_DSI_STATUS0, status);
1521                msm_host->err_work_state |=
1522                        DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1523        }
1524}
1525
1526static void dsi_clk_status(struct msm_dsi_host *msm_host)
1527{
1528        u32 status;
1529
1530        status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1531
1532        if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1533                dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1534                msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1535        }
1536}
1537
1538static void dsi_error(struct msm_dsi_host *msm_host)
1539{
1540        /* disable dsi error interrupt */
1541        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1542
1543        dsi_clk_status(msm_host);
1544        dsi_fifo_status(msm_host);
1545        dsi_ack_err_status(msm_host);
1546        dsi_timeout_status(msm_host);
1547        dsi_status(msm_host);
1548        dsi_dln0_phy_err(msm_host);
1549
1550        queue_work(msm_host->workqueue, &msm_host->err_work);
1551}
1552
1553static irqreturn_t dsi_host_irq(int irq, void *ptr)
1554{
1555        struct msm_dsi_host *msm_host = ptr;
1556        u32 isr;
1557        unsigned long flags;
1558
1559        if (!msm_host->ctrl_base)
1560                return IRQ_HANDLED;
1561
1562        spin_lock_irqsave(&msm_host->intr_lock, flags);
1563        isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1564        dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1565        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1566
1567        DBG("isr=0x%x, id=%d", isr, msm_host->id);
1568
1569        if (isr & DSI_IRQ_ERROR)
1570                dsi_error(msm_host);
1571
1572        if (isr & DSI_IRQ_VIDEO_DONE)
1573                complete(&msm_host->video_comp);
1574
1575        if (isr & DSI_IRQ_CMD_DMA_DONE)
1576                complete(&msm_host->dma_comp);
1577
1578        return IRQ_HANDLED;
1579}
1580
1581static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1582                        struct device *panel_device)
1583{
1584        msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1585                                                         "disp-enable",
1586                                                         GPIOD_OUT_LOW);
1587        if (IS_ERR(msm_host->disp_en_gpio)) {
1588                DBG("cannot get disp-enable-gpios %ld",
1589                                PTR_ERR(msm_host->disp_en_gpio));
1590                return PTR_ERR(msm_host->disp_en_gpio);
1591        }
1592
1593        msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1594                                                                GPIOD_IN);
1595        if (IS_ERR(msm_host->te_gpio)) {
1596                DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1597                return PTR_ERR(msm_host->te_gpio);
1598        }
1599
1600        return 0;
1601}
1602
1603static int dsi_host_attach(struct mipi_dsi_host *host,
1604                                        struct mipi_dsi_device *dsi)
1605{
1606        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1607        int ret;
1608
1609        if (dsi->lanes > msm_host->num_data_lanes)
1610                return -EINVAL;
1611
1612        msm_host->channel = dsi->channel;
1613        msm_host->lanes = dsi->lanes;
1614        msm_host->format = dsi->format;
1615        msm_host->mode_flags = dsi->mode_flags;
1616
1617        /* Some gpios defined in panel DT need to be controlled by host */
1618        ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1619        if (ret)
1620                return ret;
1621
1622        DBG("id=%d", msm_host->id);
1623        if (msm_host->dev)
1624                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1625
1626        return 0;
1627}
1628
1629static int dsi_host_detach(struct mipi_dsi_host *host,
1630                                        struct mipi_dsi_device *dsi)
1631{
1632        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1633
1634        msm_host->device_node = NULL;
1635
1636        DBG("id=%d", msm_host->id);
1637        if (msm_host->dev)
1638                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1639
1640        return 0;
1641}
1642
1643static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1644                                        const struct mipi_dsi_msg *msg)
1645{
1646        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1647        int ret;
1648
1649        if (!msg || !msm_host->power_on)
1650                return -EINVAL;
1651
1652        mutex_lock(&msm_host->cmd_mutex);
1653        ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1654        mutex_unlock(&msm_host->cmd_mutex);
1655
1656        return ret;
1657}
1658
1659static const struct mipi_dsi_host_ops dsi_host_ops = {
1660        .attach = dsi_host_attach,
1661        .detach = dsi_host_detach,
1662        .transfer = dsi_host_transfer,
1663};
1664
1665/*
1666 * List of supported physical to logical lane mappings.
1667 * For example, the 2nd entry represents the following mapping:
1668 *
1669 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
1670 */
1671static const int supported_data_lane_swaps[][4] = {
1672        { 0, 1, 2, 3 },
1673        { 3, 0, 1, 2 },
1674        { 2, 3, 0, 1 },
1675        { 1, 2, 3, 0 },
1676        { 0, 3, 2, 1 },
1677        { 1, 0, 3, 2 },
1678        { 2, 1, 0, 3 },
1679        { 3, 2, 1, 0 },
1680};
1681
1682static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1683                                    struct device_node *ep)
1684{
1685        struct device *dev = &msm_host->pdev->dev;
1686        struct property *prop;
1687        u32 lane_map[4];
1688        int ret, i, len, num_lanes;
1689
1690        prop = of_find_property(ep, "data-lanes", &len);
1691        if (!prop) {
1692                DRM_DEV_DEBUG(dev,
1693                        "failed to find data lane mapping, using default\n");
1694                return 0;
1695        }
1696
1697        num_lanes = len / sizeof(u32);
1698
1699        if (num_lanes < 1 || num_lanes > 4) {
1700                DRM_DEV_ERROR(dev, "bad number of data lanes\n");
1701                return -EINVAL;
1702        }
1703
1704        msm_host->num_data_lanes = num_lanes;
1705
1706        ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1707                                         num_lanes);
1708        if (ret) {
1709                DRM_DEV_ERROR(dev, "failed to read lane data\n");
1710                return ret;
1711        }
1712
1713        /*
1714         * compare DT specified physical-logical lane mappings with the ones
1715         * supported by hardware
1716         */
1717        for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
1718                const int *swap = supported_data_lane_swaps[i];
1719                int j;
1720
1721                /*
1722                 * the data-lanes array we get from DT has a logical->physical
1723                 * mapping. The "data lane swap" register field represents
1724                 * supported configurations in a physical->logical mapping.
1725                 * Translate the DT mapping to what we understand and find a
1726                 * configuration that works.
1727                 */
1728                for (j = 0; j < num_lanes; j++) {
1729                        if (lane_map[j] < 0 || lane_map[j] > 3)
1730                                DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
1731                                        lane_map[j]);
1732
1733                        if (swap[lane_map[j]] != j)
1734                                break;
1735                }
1736
1737                if (j == num_lanes) {
1738                        msm_host->dlane_swap = i;
1739                        return 0;
1740                }
1741        }
1742
1743        return -EINVAL;
1744}
1745
1746static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1747{
1748        struct device *dev = &msm_host->pdev->dev;
1749        struct device_node *np = dev->of_node;
1750        struct device_node *endpoint, *device_node;
1751        int ret = 0;
1752
1753        /*
1754         * Get the endpoint of the output port of the DSI host. In our case,
1755         * this is mapped to port number with reg = 1. Don't return an error if
1756         * the remote endpoint isn't defined. It's possible that there is
1757         * nothing connected to the dsi output.
1758         */
1759        endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1760        if (!endpoint) {
1761                DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
1762                return 0;
1763        }
1764
1765        ret = dsi_host_parse_lane_data(msm_host, endpoint);
1766        if (ret) {
1767                DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
1768                        __func__, ret);
1769                ret = -EINVAL;
1770                goto err;
1771        }
1772
1773        /* Get panel node from the output port's endpoint data */
1774        device_node = of_graph_get_remote_node(np, 1, 0);
1775        if (!device_node) {
1776                DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
1777                ret = -ENODEV;
1778                goto err;
1779        }
1780
1781        msm_host->device_node = device_node;
1782
1783        if (of_property_read_bool(np, "syscon-sfpb")) {
1784                msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1785                                        "syscon-sfpb");
1786                if (IS_ERR(msm_host->sfpb)) {
1787                        DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
1788                                __func__);
1789                        ret = PTR_ERR(msm_host->sfpb);
1790                }
1791        }
1792
1793        of_node_put(device_node);
1794
1795err:
1796        of_node_put(endpoint);
1797
1798        return ret;
1799}
1800
1801static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1802{
1803        struct platform_device *pdev = msm_host->pdev;
1804        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1805        struct resource *res;
1806        int i;
1807
1808        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1809        if (!res)
1810                return -EINVAL;
1811
1812        for (i = 0; i < cfg->num_dsi; i++) {
1813                if (cfg->io_start[i] == res->start)
1814                        return i;
1815        }
1816
1817        return -EINVAL;
1818}
1819
1820int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1821{
1822        struct msm_dsi_host *msm_host = NULL;
1823        struct platform_device *pdev = msm_dsi->pdev;
1824        int ret;
1825
1826        msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1827        if (!msm_host) {
1828                ret = -ENOMEM;
1829                goto fail;
1830        }
1831
1832        msm_host->pdev = pdev;
1833        msm_dsi->host = &msm_host->base;
1834
1835        ret = dsi_host_parse_dt(msm_host);
1836        if (ret) {
1837                pr_err("%s: failed to parse dt\n", __func__);
1838                goto fail;
1839        }
1840
1841        msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", "DSI CTRL", &msm_host->ctrl_size);
1842        if (IS_ERR(msm_host->ctrl_base)) {
1843                pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1844                ret = PTR_ERR(msm_host->ctrl_base);
1845                goto fail;
1846        }
1847
1848        pm_runtime_enable(&pdev->dev);
1849
1850        msm_host->cfg_hnd = dsi_get_config(msm_host);
1851        if (!msm_host->cfg_hnd) {
1852                ret = -EINVAL;
1853                pr_err("%s: get config failed\n", __func__);
1854                goto fail;
1855        }
1856
1857        msm_host->id = dsi_host_get_id(msm_host);
1858        if (msm_host->id < 0) {
1859                ret = msm_host->id;
1860                pr_err("%s: unable to identify DSI host index\n", __func__);
1861                goto fail;
1862        }
1863
1864        /* fixup base address by io offset */
1865        msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1866
1867        ret = dsi_regulator_init(msm_host);
1868        if (ret) {
1869                pr_err("%s: regulator init failed\n", __func__);
1870                goto fail;
1871        }
1872
1873        ret = dsi_clk_init(msm_host);
1874        if (ret) {
1875                pr_err("%s: unable to initialize dsi clks\n", __func__);
1876                goto fail;
1877        }
1878
1879        msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1880        if (!msm_host->rx_buf) {
1881                ret = -ENOMEM;
1882                pr_err("%s: alloc rx temp buf failed\n", __func__);
1883                goto fail;
1884        }
1885
1886        ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
1887        if (ret)
1888                return ret;
1889        /* OPP table is optional */
1890        ret = devm_pm_opp_of_add_table(&pdev->dev);
1891        if (ret && ret != -ENODEV) {
1892                dev_err(&pdev->dev, "invalid OPP table in device tree\n");
1893                return ret;
1894        }
1895
1896        init_completion(&msm_host->dma_comp);
1897        init_completion(&msm_host->video_comp);
1898        mutex_init(&msm_host->dev_mutex);
1899        mutex_init(&msm_host->cmd_mutex);
1900        spin_lock_init(&msm_host->intr_lock);
1901
1902        /* setup workqueue */
1903        msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1904        INIT_WORK(&msm_host->err_work, dsi_err_worker);
1905        INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1906
1907        msm_dsi->id = msm_host->id;
1908
1909        DBG("Dsi Host %d initialized", msm_host->id);
1910        return 0;
1911
1912fail:
1913        return ret;
1914}
1915
1916void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1917{
1918        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1919
1920        DBG("");
1921        dsi_tx_buf_free(msm_host);
1922        if (msm_host->workqueue) {
1923                flush_workqueue(msm_host->workqueue);
1924                destroy_workqueue(msm_host->workqueue);
1925                msm_host->workqueue = NULL;
1926        }
1927
1928        mutex_destroy(&msm_host->cmd_mutex);
1929        mutex_destroy(&msm_host->dev_mutex);
1930
1931        pm_runtime_disable(&msm_host->pdev->dev);
1932}
1933
1934int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1935                                        struct drm_device *dev)
1936{
1937        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1938        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1939        struct platform_device *pdev = msm_host->pdev;
1940        int ret;
1941
1942        msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1943        if (msm_host->irq < 0) {
1944                ret = msm_host->irq;
1945                DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
1946                return ret;
1947        }
1948
1949        ret = devm_request_irq(&pdev->dev, msm_host->irq,
1950                        dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1951                        "dsi_isr", msm_host);
1952        if (ret < 0) {
1953                DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
1954                                msm_host->irq, ret);
1955                return ret;
1956        }
1957
1958        msm_host->dev = dev;
1959        ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
1960        if (ret) {
1961                pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1962                return ret;
1963        }
1964
1965        return 0;
1966}
1967
1968int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1969{
1970        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1971        int ret;
1972
1973        /* Register mipi dsi host */
1974        if (!msm_host->registered) {
1975                host->dev = &msm_host->pdev->dev;
1976                host->ops = &dsi_host_ops;
1977                ret = mipi_dsi_host_register(host);
1978                if (ret)
1979                        return ret;
1980
1981                msm_host->registered = true;
1982
1983                /* If the panel driver has not been probed after host register,
1984                 * we should defer the host's probe.
1985                 * It makes sure panel is connected when fbcon detects
1986                 * connector status and gets the proper display mode to
1987                 * create framebuffer.
1988                 * Don't try to defer if there is nothing connected to the dsi
1989                 * output
1990                 */
1991                if (check_defer && msm_host->device_node) {
1992                        if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
1993                                if (!of_drm_find_bridge(msm_host->device_node))
1994                                        return -EPROBE_DEFER;
1995                }
1996        }
1997
1998        return 0;
1999}
2000
2001void msm_dsi_host_unregister(struct mipi_dsi_host *host)
2002{
2003        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2004
2005        if (msm_host->registered) {
2006                mipi_dsi_host_unregister(host);
2007                host->dev = NULL;
2008                host->ops = NULL;
2009                msm_host->registered = false;
2010        }
2011}
2012
2013int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
2014                                const struct mipi_dsi_msg *msg)
2015{
2016        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2017        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2018
2019        /* TODO: make sure dsi_cmd_mdp is idle.
2020         * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
2021         * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
2022         * How to handle the old versions? Wait for mdp cmd done?
2023         */
2024
2025        /*
2026         * mdss interrupt is generated in mdp core clock domain
2027         * mdp clock need to be enabled to receive dsi interrupt
2028         */
2029        pm_runtime_get_sync(&msm_host->pdev->dev);
2030        cfg_hnd->ops->link_clk_set_rate(msm_host);
2031        cfg_hnd->ops->link_clk_enable(msm_host);
2032
2033        /* TODO: vote for bus bandwidth */
2034
2035        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
2036                dsi_set_tx_power_mode(0, msm_host);
2037
2038        msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
2039        dsi_write(msm_host, REG_DSI_CTRL,
2040                msm_host->dma_cmd_ctrl_restore |
2041                DSI_CTRL_CMD_MODE_EN |
2042                DSI_CTRL_ENABLE);
2043        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
2044
2045        return 0;
2046}
2047
2048void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
2049                                const struct mipi_dsi_msg *msg)
2050{
2051        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2052        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2053
2054        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
2055        dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
2056
2057        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
2058                dsi_set_tx_power_mode(1, msm_host);
2059
2060        /* TODO: unvote for bus bandwidth */
2061
2062        cfg_hnd->ops->link_clk_disable(msm_host);
2063        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2064}
2065
2066int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
2067                                const struct mipi_dsi_msg *msg)
2068{
2069        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2070
2071        return dsi_cmds2buf_tx(msm_host, msg);
2072}
2073
2074int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
2075                                const struct mipi_dsi_msg *msg)
2076{
2077        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2078        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2079        int data_byte, rx_byte, dlen, end;
2080        int short_response, diff, pkt_size, ret = 0;
2081        char cmd;
2082        int rlen = msg->rx_len;
2083        u8 *buf;
2084
2085        if (rlen <= 2) {
2086                short_response = 1;
2087                pkt_size = rlen;
2088                rx_byte = 4;
2089        } else {
2090                short_response = 0;
2091                data_byte = 10; /* first read */
2092                if (rlen < data_byte)
2093                        pkt_size = rlen;
2094                else
2095                        pkt_size = data_byte;
2096                rx_byte = data_byte + 6; /* 4 header + 2 crc */
2097        }
2098
2099        buf = msm_host->rx_buf;
2100        end = 0;
2101        while (!end) {
2102                u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
2103                struct mipi_dsi_msg max_pkt_size_msg = {
2104                        .channel = msg->channel,
2105                        .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
2106                        .tx_len = 2,
2107                        .tx_buf = tx,
2108                };
2109
2110                DBG("rlen=%d pkt_size=%d rx_byte=%d",
2111                        rlen, pkt_size, rx_byte);
2112
2113                ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
2114                if (ret < 2) {
2115                        pr_err("%s: Set max pkt size failed, %d\n",
2116                                __func__, ret);
2117                        return -EINVAL;
2118                }
2119
2120                if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
2121                        (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
2122                        /* Clear the RDBK_DATA registers */
2123                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
2124                                        DSI_RDBK_DATA_CTRL_CLR);
2125                        wmb(); /* make sure the RDBK registers are cleared */
2126                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
2127                        wmb(); /* release cleared status before transfer */
2128                }
2129
2130                ret = dsi_cmds2buf_tx(msm_host, msg);
2131                if (ret < msg->tx_len) {
2132                        pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
2133                        return ret;
2134                }
2135
2136                /*
2137                 * once cmd_dma_done interrupt received,
2138                 * return data from client is ready and stored
2139                 * at RDBK_DATA register already
2140                 * since rx fifo is 16 bytes, dcs header is kept at first loop,
2141                 * after that dcs header lost during shift into registers
2142                 */
2143                dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
2144
2145                if (dlen <= 0)
2146                        return 0;
2147
2148                if (short_response)
2149                        break;
2150
2151                if (rlen <= data_byte) {
2152                        diff = data_byte - rlen;
2153                        end = 1;
2154                } else {
2155                        diff = 0;
2156                        rlen -= data_byte;
2157                }
2158
2159                if (!end) {
2160                        dlen -= 2; /* 2 crc */
2161                        dlen -= diff;
2162                        buf += dlen;    /* next start position */
2163                        data_byte = 14; /* NOT first read */
2164                        if (rlen < data_byte)
2165                                pkt_size += rlen;
2166                        else
2167                                pkt_size += data_byte;
2168                        DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
2169                }
2170        }
2171
2172        /*
2173         * For single Long read, if the requested rlen < 10,
2174         * we need to shift the start position of rx
2175         * data buffer to skip the bytes which are not
2176         * updated.
2177         */
2178        if (pkt_size < 10 && !short_response)
2179                buf = msm_host->rx_buf + (10 - rlen);
2180        else
2181                buf = msm_host->rx_buf;
2182
2183        cmd = buf[0];
2184        switch (cmd) {
2185        case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
2186                pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
2187                ret = 0;
2188                break;
2189        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
2190        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
2191                ret = dsi_short_read1_resp(buf, msg);
2192                break;
2193        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
2194        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
2195                ret = dsi_short_read2_resp(buf, msg);
2196                break;
2197        case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
2198        case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
2199                ret = dsi_long_read_resp(buf, msg);
2200                break;
2201        default:
2202                pr_warn("%s:Invalid response cmd\n", __func__);
2203                ret = 0;
2204        }
2205
2206        return ret;
2207}
2208
2209void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
2210                                  u32 len)
2211{
2212        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2213
2214        dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
2215        dsi_write(msm_host, REG_DSI_DMA_LEN, len);
2216        dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
2217
2218        /* Make sure trigger happens */
2219        wmb();
2220}
2221
2222int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
2223        struct msm_dsi_phy *src_phy)
2224{
2225        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2226        struct clk *byte_clk_provider, *pixel_clk_provider;
2227        int ret;
2228
2229        ret = msm_dsi_phy_get_clk_provider(src_phy,
2230                                &byte_clk_provider, &pixel_clk_provider);
2231        if (ret) {
2232                pr_info("%s: can't get provider from pll, don't set parent\n",
2233                        __func__);
2234                return 0;
2235        }
2236
2237        ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
2238        if (ret) {
2239                pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
2240                        __func__, ret);
2241                goto exit;
2242        }
2243
2244        ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
2245        if (ret) {
2246                pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
2247                        __func__, ret);
2248                goto exit;
2249        }
2250
2251        if (msm_host->dsi_clk_src) {
2252                ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
2253                if (ret) {
2254                        pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2255                                __func__, ret);
2256                        goto exit;
2257                }
2258        }
2259
2260        if (msm_host->esc_clk_src) {
2261                ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2262                if (ret) {
2263                        pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2264                                __func__, ret);
2265                        goto exit;
2266                }
2267        }
2268
2269exit:
2270        return ret;
2271}
2272
2273void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
2274{
2275        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2276
2277        DBG("");
2278        dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
2279        /* Make sure fully reset */
2280        wmb();
2281        udelay(1000);
2282        dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
2283        udelay(100);
2284}
2285
2286void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2287                        struct msm_dsi_phy_clk_request *clk_req,
2288                        bool is_dual_dsi)
2289{
2290        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2291        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2292        int ret;
2293
2294        ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
2295        if (ret) {
2296                pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2297                return;
2298        }
2299
2300        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2301        clk_req->escclk_rate = msm_host->esc_clk_rate;
2302}
2303
2304int msm_dsi_host_enable(struct mipi_dsi_host *host)
2305{
2306        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2307
2308        dsi_op_mode_config(msm_host,
2309                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
2310
2311        /* TODO: clock should be turned off for command mode,
2312         * and only turned on before MDP START.
2313         * This part of code should be enabled once mdp driver support it.
2314         */
2315        /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
2316         *      dsi_link_clk_disable(msm_host);
2317         *      pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2318         * }
2319         */
2320        msm_host->enabled = true;
2321        return 0;
2322}
2323
2324int msm_dsi_host_disable(struct mipi_dsi_host *host)
2325{
2326        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2327
2328        msm_host->enabled = false;
2329        dsi_op_mode_config(msm_host,
2330                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2331
2332        /* Since we have disabled INTF, the video engine won't stop so that
2333         * the cmd engine will be blocked.
2334         * Reset to disable video engine so that we can send off cmd.
2335         */
2336        dsi_sw_reset(msm_host);
2337
2338        return 0;
2339}
2340
2341static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2342{
2343        enum sfpb_ahb_arb_master_port_en en;
2344
2345        if (!msm_host->sfpb)
2346                return;
2347
2348        en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2349
2350        regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2351                        SFPB_GPREG_MASTER_PORT_EN__MASK,
2352                        SFPB_GPREG_MASTER_PORT_EN(en));
2353}
2354
2355int msm_dsi_host_power_on(struct mipi_dsi_host *host,
2356                        struct msm_dsi_phy_shared_timings *phy_shared_timings,
2357                        bool is_dual_dsi)
2358{
2359        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2360        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2361        int ret = 0;
2362
2363        mutex_lock(&msm_host->dev_mutex);
2364        if (msm_host->power_on) {
2365                DBG("dsi host already on");
2366                goto unlock_ret;
2367        }
2368
2369        msm_dsi_sfpb_config(msm_host, true);
2370
2371        ret = dsi_host_regulator_enable(msm_host);
2372        if (ret) {
2373                pr_err("%s:Failed to enable vregs.ret=%d\n",
2374                        __func__, ret);
2375                goto unlock_ret;
2376        }
2377
2378        pm_runtime_get_sync(&msm_host->pdev->dev);
2379        ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
2380        if (!ret)
2381                ret = cfg_hnd->ops->link_clk_enable(msm_host);
2382        if (ret) {
2383                pr_err("%s: failed to enable link clocks. ret=%d\n",
2384                       __func__, ret);
2385                goto fail_disable_reg;
2386        }
2387
2388        ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2389        if (ret) {
2390                pr_err("%s: failed to set pinctrl default state, %d\n",
2391                        __func__, ret);
2392                goto fail_disable_clk;
2393        }
2394
2395        dsi_timing_setup(msm_host, is_dual_dsi);
2396        dsi_sw_reset(msm_host);
2397        dsi_ctrl_config(msm_host, true, phy_shared_timings);
2398
2399        if (msm_host->disp_en_gpio)
2400                gpiod_set_value(msm_host->disp_en_gpio, 1);
2401
2402        msm_host->power_on = true;
2403        mutex_unlock(&msm_host->dev_mutex);
2404
2405        return 0;
2406
2407fail_disable_clk:
2408        cfg_hnd->ops->link_clk_disable(msm_host);
2409        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2410fail_disable_reg:
2411        dsi_host_regulator_disable(msm_host);
2412unlock_ret:
2413        mutex_unlock(&msm_host->dev_mutex);
2414        return ret;
2415}
2416
2417int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2418{
2419        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2420        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2421
2422        mutex_lock(&msm_host->dev_mutex);
2423        if (!msm_host->power_on) {
2424                DBG("dsi host already off");
2425                goto unlock_ret;
2426        }
2427
2428        dsi_ctrl_config(msm_host, false, NULL);
2429
2430        if (msm_host->disp_en_gpio)
2431                gpiod_set_value(msm_host->disp_en_gpio, 0);
2432
2433        pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2434
2435        cfg_hnd->ops->link_clk_disable(msm_host);
2436        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2437
2438        dsi_host_regulator_disable(msm_host);
2439
2440        msm_dsi_sfpb_config(msm_host, false);
2441
2442        DBG("-");
2443
2444        msm_host->power_on = false;
2445
2446unlock_ret:
2447        mutex_unlock(&msm_host->dev_mutex);
2448        return 0;
2449}
2450
2451int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2452                                  const struct drm_display_mode *mode)
2453{
2454        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2455
2456        if (msm_host->mode) {
2457                drm_mode_destroy(msm_host->dev, msm_host->mode);
2458                msm_host->mode = NULL;
2459        }
2460
2461        msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2462        if (!msm_host->mode) {
2463                pr_err("%s: cannot duplicate mode\n", __func__);
2464                return -ENOMEM;
2465        }
2466
2467        return 0;
2468}
2469
2470struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
2471{
2472        return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
2473}
2474
2475unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
2476{
2477        return to_msm_dsi_host(host)->mode_flags;
2478}
2479
2480struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2481{
2482        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2483
2484        return of_drm_find_bridge(msm_host->device_node);
2485}
2486
2487void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
2488{
2489        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2490
2491        pm_runtime_get_sync(&msm_host->pdev->dev);
2492
2493        msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size,
2494                        msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id);
2495
2496        pm_runtime_put_sync(&msm_host->pdev->dev);
2497}
2498