linux/drivers/gpu/drm/msm/dsi/dsi_host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/err.h>
  10#include <linux/gpio/consumer.h>
  11#include <linux/interrupt.h>
  12#include <linux/mfd/syscon.h>
  13#include <linux/of_device.h>
  14#include <linux/of_graph.h>
  15#include <linux/of_irq.h>
  16#include <linux/pinctrl/consumer.h>
  17#include <linux/regmap.h>
  18#include <linux/regulator/consumer.h>
  19#include <linux/spinlock.h>
  20
  21#include <video/mipi_display.h>
  22
  23#include "dsi.h"
  24#include "dsi.xml.h"
  25#include "sfpb.xml.h"
  26#include "dsi_cfg.h"
  27#include "msm_kms.h"
  28
  29#define DSI_RESET_TOGGLE_DELAY_MS 20
  30
  31static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  32{
  33        u32 ver;
  34
  35        if (!major || !minor)
  36                return -EINVAL;
  37
  38        /*
  39         * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  40         * makes all other registers 4-byte shifted down.
  41         *
  42         * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  43         * older, we read the DSI_VERSION register without any shift(offset
  44         * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  45         * the case of DSI6G, this has to be zero (the offset points to a
  46         * scratch register which we never touch)
  47         */
  48
  49        ver = msm_readl(base + REG_DSI_VERSION);
  50        if (ver) {
  51                /* older dsi host, there is no register shift */
  52                ver = FIELD(ver, DSI_VERSION_MAJOR);
  53                if (ver <= MSM_DSI_VER_MAJOR_V2) {
  54                        /* old versions */
  55                        *major = ver;
  56                        *minor = 0;
  57                        return 0;
  58                } else {
  59                        return -EINVAL;
  60                }
  61        } else {
  62                /*
  63                 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  64                 * registers are shifted down, read DSI_VERSION again with
  65                 * the shifted offset
  66                 */
  67                ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  68                ver = FIELD(ver, DSI_VERSION_MAJOR);
  69                if (ver == MSM_DSI_VER_MAJOR_6G) {
  70                        /* 6G version */
  71                        *major = ver;
  72                        *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  73                        return 0;
  74                } else {
  75                        return -EINVAL;
  76                }
  77        }
  78}
  79
  80#define DSI_ERR_STATE_ACK                       0x0000
  81#define DSI_ERR_STATE_TIMEOUT                   0x0001
  82#define DSI_ERR_STATE_DLN0_PHY                  0x0002
  83#define DSI_ERR_STATE_FIFO                      0x0004
  84#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW        0x0008
  85#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION  0x0010
  86#define DSI_ERR_STATE_PLL_UNLOCKED              0x0020
  87
  88#define DSI_CLK_CTRL_ENABLE_CLKS        \
  89                (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  90                DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  91                DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  92                DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  93
  94struct msm_dsi_host {
  95        struct mipi_dsi_host base;
  96
  97        struct platform_device *pdev;
  98        struct drm_device *dev;
  99
 100        int id;
 101
 102        void __iomem *ctrl_base;
 103        struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
 104
 105        struct clk *bus_clks[DSI_BUS_CLK_MAX];
 106
 107        struct clk *byte_clk;
 108        struct clk *esc_clk;
 109        struct clk *pixel_clk;
 110        struct clk *byte_clk_src;
 111        struct clk *pixel_clk_src;
 112        struct clk *byte_intf_clk;
 113
 114        u32 byte_clk_rate;
 115        u32 pixel_clk_rate;
 116        u32 esc_clk_rate;
 117
 118        /* DSI v2 specific clocks */
 119        struct clk *src_clk;
 120        struct clk *esc_clk_src;
 121        struct clk *dsi_clk_src;
 122
 123        u32 src_clk_rate;
 124
 125        struct gpio_desc *disp_en_gpio;
 126        struct gpio_desc *te_gpio;
 127
 128        const struct msm_dsi_cfg_handler *cfg_hnd;
 129
 130        struct completion dma_comp;
 131        struct completion video_comp;
 132        struct mutex dev_mutex;
 133        struct mutex cmd_mutex;
 134        spinlock_t intr_lock; /* Protect interrupt ctrl register */
 135
 136        u32 err_work_state;
 137        struct work_struct err_work;
 138        struct work_struct hpd_work;
 139        struct workqueue_struct *workqueue;
 140
 141        /* DSI 6G TX buffer*/
 142        struct drm_gem_object *tx_gem_obj;
 143
 144        /* DSI v2 TX buffer */
 145        void *tx_buf;
 146        dma_addr_t tx_buf_paddr;
 147
 148        int tx_size;
 149
 150        u8 *rx_buf;
 151
 152        struct regmap *sfpb;
 153
 154        struct drm_display_mode *mode;
 155
 156        /* connected device info */
 157        struct device_node *device_node;
 158        unsigned int channel;
 159        unsigned int lanes;
 160        enum mipi_dsi_pixel_format format;
 161        unsigned long mode_flags;
 162
 163        /* lane data parsed via DT */
 164        int dlane_swap;
 165        int num_data_lanes;
 166
 167        u32 dma_cmd_ctrl_restore;
 168
 169        bool registered;
 170        bool power_on;
 171        bool enabled;
 172        int irq;
 173};
 174
 175static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
 176{
 177        switch (fmt) {
 178        case MIPI_DSI_FMT_RGB565:               return 16;
 179        case MIPI_DSI_FMT_RGB666_PACKED:        return 18;
 180        case MIPI_DSI_FMT_RGB666:
 181        case MIPI_DSI_FMT_RGB888:
 182        default:                                return 24;
 183        }
 184}
 185
 186static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 187{
 188        return msm_readl(msm_host->ctrl_base + reg);
 189}
 190static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 191{
 192        msm_writel(data, msm_host->ctrl_base + reg);
 193}
 194
 195static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 196static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 197
 198static const struct msm_dsi_cfg_handler *dsi_get_config(
 199                                                struct msm_dsi_host *msm_host)
 200{
 201        const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 202        struct device *dev = &msm_host->pdev->dev;
 203        struct regulator *gdsc_reg;
 204        struct clk *ahb_clk;
 205        int ret;
 206        u32 major = 0, minor = 0;
 207
 208        gdsc_reg = regulator_get(dev, "gdsc");
 209        if (IS_ERR(gdsc_reg)) {
 210                pr_err("%s: cannot get gdsc\n", __func__);
 211                goto exit;
 212        }
 213
 214        ahb_clk = msm_clk_get(msm_host->pdev, "iface");
 215        if (IS_ERR(ahb_clk)) {
 216                pr_err("%s: cannot get interface clock\n", __func__);
 217                goto put_gdsc;
 218        }
 219
 220        pm_runtime_get_sync(dev);
 221
 222        ret = regulator_enable(gdsc_reg);
 223        if (ret) {
 224                pr_err("%s: unable to enable gdsc\n", __func__);
 225                goto put_gdsc;
 226        }
 227
 228        ret = clk_prepare_enable(ahb_clk);
 229        if (ret) {
 230                pr_err("%s: unable to enable ahb_clk\n", __func__);
 231                goto disable_gdsc;
 232        }
 233
 234        ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
 235        if (ret) {
 236                pr_err("%s: Invalid version\n", __func__);
 237                goto disable_clks;
 238        }
 239
 240        cfg_hnd = msm_dsi_cfg_get(major, minor);
 241
 242        DBG("%s: Version %x:%x\n", __func__, major, minor);
 243
 244disable_clks:
 245        clk_disable_unprepare(ahb_clk);
 246disable_gdsc:
 247        regulator_disable(gdsc_reg);
 248        pm_runtime_put_sync(dev);
 249put_gdsc:
 250        regulator_put(gdsc_reg);
 251exit:
 252        return cfg_hnd;
 253}
 254
 255static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
 256{
 257        return container_of(host, struct msm_dsi_host, base);
 258}
 259
 260static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 261{
 262        struct regulator_bulk_data *s = msm_host->supplies;
 263        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 264        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 265        int i;
 266
 267        DBG("");
 268        for (i = num - 1; i >= 0; i--)
 269                if (regs[i].disable_load >= 0)
 270                        regulator_set_load(s[i].consumer,
 271                                           regs[i].disable_load);
 272
 273        regulator_bulk_disable(num, s);
 274}
 275
 276static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 277{
 278        struct regulator_bulk_data *s = msm_host->supplies;
 279        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 280        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 281        int ret, i;
 282
 283        DBG("");
 284        for (i = 0; i < num; i++) {
 285                if (regs[i].enable_load >= 0) {
 286                        ret = regulator_set_load(s[i].consumer,
 287                                                 regs[i].enable_load);
 288                        if (ret < 0) {
 289                                pr_err("regulator %d set op mode failed, %d\n",
 290                                        i, ret);
 291                                goto fail;
 292                        }
 293                }
 294        }
 295
 296        ret = regulator_bulk_enable(num, s);
 297        if (ret < 0) {
 298                pr_err("regulator enable failed, %d\n", ret);
 299                goto fail;
 300        }
 301
 302        return 0;
 303
 304fail:
 305        for (i--; i >= 0; i--)
 306                regulator_set_load(s[i].consumer, regs[i].disable_load);
 307        return ret;
 308}
 309
 310static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 311{
 312        struct regulator_bulk_data *s = msm_host->supplies;
 313        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 314        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 315        int i, ret;
 316
 317        for (i = 0; i < num; i++)
 318                s[i].supply = regs[i].name;
 319
 320        ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
 321        if (ret < 0) {
 322                pr_err("%s: failed to init regulator, ret=%d\n",
 323                                                __func__, ret);
 324                return ret;
 325        }
 326
 327        return 0;
 328}
 329
 330int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
 331{
 332        struct platform_device *pdev = msm_host->pdev;
 333        int ret = 0;
 334
 335        msm_host->src_clk = msm_clk_get(pdev, "src");
 336
 337        if (IS_ERR(msm_host->src_clk)) {
 338                ret = PTR_ERR(msm_host->src_clk);
 339                pr_err("%s: can't find src clock. ret=%d\n",
 340                        __func__, ret);
 341                msm_host->src_clk = NULL;
 342                return ret;
 343        }
 344
 345        msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
 346        if (!msm_host->esc_clk_src) {
 347                ret = -ENODEV;
 348                pr_err("%s: can't get esc clock parent. ret=%d\n",
 349                        __func__, ret);
 350                return ret;
 351        }
 352
 353        msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
 354        if (!msm_host->dsi_clk_src) {
 355                ret = -ENODEV;
 356                pr_err("%s: can't get src clock parent. ret=%d\n",
 357                        __func__, ret);
 358        }
 359
 360        return ret;
 361}
 362
 363int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
 364{
 365        struct platform_device *pdev = msm_host->pdev;
 366        int ret = 0;
 367
 368        msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
 369        if (IS_ERR(msm_host->byte_intf_clk)) {
 370                ret = PTR_ERR(msm_host->byte_intf_clk);
 371                pr_err("%s: can't find byte_intf clock. ret=%d\n",
 372                        __func__, ret);
 373        }
 374
 375        return ret;
 376}
 377
 378static int dsi_clk_init(struct msm_dsi_host *msm_host)
 379{
 380        struct platform_device *pdev = msm_host->pdev;
 381        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 382        const struct msm_dsi_config *cfg = cfg_hnd->cfg;
 383        int i, ret = 0;
 384
 385        /* get bus clocks */
 386        for (i = 0; i < cfg->num_bus_clks; i++) {
 387                msm_host->bus_clks[i] = msm_clk_get(pdev,
 388                                                cfg->bus_clk_names[i]);
 389                if (IS_ERR(msm_host->bus_clks[i])) {
 390                        ret = PTR_ERR(msm_host->bus_clks[i]);
 391                        pr_err("%s: Unable to get %s clock, ret = %d\n",
 392                                __func__, cfg->bus_clk_names[i], ret);
 393                        goto exit;
 394                }
 395        }
 396
 397        /* get link and source clocks */
 398        msm_host->byte_clk = msm_clk_get(pdev, "byte");
 399        if (IS_ERR(msm_host->byte_clk)) {
 400                ret = PTR_ERR(msm_host->byte_clk);
 401                pr_err("%s: can't find dsi_byte clock. ret=%d\n",
 402                        __func__, ret);
 403                msm_host->byte_clk = NULL;
 404                goto exit;
 405        }
 406
 407        msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
 408        if (IS_ERR(msm_host->pixel_clk)) {
 409                ret = PTR_ERR(msm_host->pixel_clk);
 410                pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
 411                        __func__, ret);
 412                msm_host->pixel_clk = NULL;
 413                goto exit;
 414        }
 415
 416        msm_host->esc_clk = msm_clk_get(pdev, "core");
 417        if (IS_ERR(msm_host->esc_clk)) {
 418                ret = PTR_ERR(msm_host->esc_clk);
 419                pr_err("%s: can't find dsi_esc clock. ret=%d\n",
 420                        __func__, ret);
 421                msm_host->esc_clk = NULL;
 422                goto exit;
 423        }
 424
 425        msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
 426        if (IS_ERR(msm_host->byte_clk_src)) {
 427                ret = PTR_ERR(msm_host->byte_clk_src);
 428                pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
 429                goto exit;
 430        }
 431
 432        msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
 433        if (IS_ERR(msm_host->pixel_clk_src)) {
 434                ret = PTR_ERR(msm_host->pixel_clk_src);
 435                pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
 436                goto exit;
 437        }
 438
 439        if (cfg_hnd->ops->clk_init_ver)
 440                ret = cfg_hnd->ops->clk_init_ver(msm_host);
 441exit:
 442        return ret;
 443}
 444
 445static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
 446{
 447        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 448        int i, ret;
 449
 450        DBG("id=%d", msm_host->id);
 451
 452        for (i = 0; i < cfg->num_bus_clks; i++) {
 453                ret = clk_prepare_enable(msm_host->bus_clks[i]);
 454                if (ret) {
 455                        pr_err("%s: failed to enable bus clock %d ret %d\n",
 456                                __func__, i, ret);
 457                        goto err;
 458                }
 459        }
 460
 461        return 0;
 462err:
 463        for (; i > 0; i--)
 464                clk_disable_unprepare(msm_host->bus_clks[i]);
 465
 466        return ret;
 467}
 468
 469static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
 470{
 471        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 472        int i;
 473
 474        DBG("");
 475
 476        for (i = cfg->num_bus_clks - 1; i >= 0; i--)
 477                clk_disable_unprepare(msm_host->bus_clks[i]);
 478}
 479
 480int msm_dsi_runtime_suspend(struct device *dev)
 481{
 482        struct platform_device *pdev = to_platform_device(dev);
 483        struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
 484        struct mipi_dsi_host *host = msm_dsi->host;
 485        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 486
 487        if (!msm_host->cfg_hnd)
 488                return 0;
 489
 490        dsi_bus_clk_disable(msm_host);
 491
 492        return 0;
 493}
 494
 495int msm_dsi_runtime_resume(struct device *dev)
 496{
 497        struct platform_device *pdev = to_platform_device(dev);
 498        struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
 499        struct mipi_dsi_host *host = msm_dsi->host;
 500        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 501
 502        if (!msm_host->cfg_hnd)
 503                return 0;
 504
 505        return dsi_bus_clk_enable(msm_host);
 506}
 507
 508int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
 509{
 510        int ret;
 511
 512        DBG("Set clk rates: pclk=%d, byteclk=%d",
 513                msm_host->mode->clock, msm_host->byte_clk_rate);
 514
 515        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 516        if (ret) {
 517                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 518                return ret;
 519        }
 520
 521        ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 522        if (ret) {
 523                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 524                return ret;
 525        }
 526
 527        if (msm_host->byte_intf_clk) {
 528                ret = clk_set_rate(msm_host->byte_intf_clk,
 529                                   msm_host->byte_clk_rate / 2);
 530                if (ret) {
 531                        pr_err("%s: Failed to set rate byte intf clk, %d\n",
 532                               __func__, ret);
 533                        return ret;
 534                }
 535        }
 536
 537        return 0;
 538}
 539
 540
 541int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 542{
 543        int ret;
 544
 545        ret = clk_prepare_enable(msm_host->esc_clk);
 546        if (ret) {
 547                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 548                goto error;
 549        }
 550
 551        ret = clk_prepare_enable(msm_host->byte_clk);
 552        if (ret) {
 553                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 554                goto byte_clk_err;
 555        }
 556
 557        ret = clk_prepare_enable(msm_host->pixel_clk);
 558        if (ret) {
 559                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 560                goto pixel_clk_err;
 561        }
 562
 563        if (msm_host->byte_intf_clk) {
 564                ret = clk_prepare_enable(msm_host->byte_intf_clk);
 565                if (ret) {
 566                        pr_err("%s: Failed to enable byte intf clk\n",
 567                               __func__);
 568                        goto byte_intf_clk_err;
 569                }
 570        }
 571
 572        return 0;
 573
 574byte_intf_clk_err:
 575        clk_disable_unprepare(msm_host->pixel_clk);
 576pixel_clk_err:
 577        clk_disable_unprepare(msm_host->byte_clk);
 578byte_clk_err:
 579        clk_disable_unprepare(msm_host->esc_clk);
 580error:
 581        return ret;
 582}
 583
 584int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
 585{
 586        int ret;
 587
 588        DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
 589                msm_host->mode->clock, msm_host->byte_clk_rate,
 590                msm_host->esc_clk_rate, msm_host->src_clk_rate);
 591
 592        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 593        if (ret) {
 594                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 595                return ret;
 596        }
 597
 598        ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
 599        if (ret) {
 600                pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
 601                return ret;
 602        }
 603
 604        ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
 605        if (ret) {
 606                pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
 607                return ret;
 608        }
 609
 610        ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 611        if (ret) {
 612                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 613                return ret;
 614        }
 615
 616        return 0;
 617}
 618
 619int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
 620{
 621        int ret;
 622
 623        ret = clk_prepare_enable(msm_host->byte_clk);
 624        if (ret) {
 625                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 626                goto error;
 627        }
 628
 629        ret = clk_prepare_enable(msm_host->esc_clk);
 630        if (ret) {
 631                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 632                goto esc_clk_err;
 633        }
 634
 635        ret = clk_prepare_enable(msm_host->src_clk);
 636        if (ret) {
 637                pr_err("%s: Failed to enable dsi src clk\n", __func__);
 638                goto src_clk_err;
 639        }
 640
 641        ret = clk_prepare_enable(msm_host->pixel_clk);
 642        if (ret) {
 643                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 644                goto pixel_clk_err;
 645        }
 646
 647        return 0;
 648
 649pixel_clk_err:
 650        clk_disable_unprepare(msm_host->src_clk);
 651src_clk_err:
 652        clk_disable_unprepare(msm_host->esc_clk);
 653esc_clk_err:
 654        clk_disable_unprepare(msm_host->byte_clk);
 655error:
 656        return ret;
 657}
 658
 659void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
 660{
 661        clk_disable_unprepare(msm_host->esc_clk);
 662        clk_disable_unprepare(msm_host->pixel_clk);
 663        if (msm_host->byte_intf_clk)
 664                clk_disable_unprepare(msm_host->byte_intf_clk);
 665        clk_disable_unprepare(msm_host->byte_clk);
 666}
 667
 668void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
 669{
 670        clk_disable_unprepare(msm_host->pixel_clk);
 671        clk_disable_unprepare(msm_host->src_clk);
 672        clk_disable_unprepare(msm_host->esc_clk);
 673        clk_disable_unprepare(msm_host->byte_clk);
 674}
 675
 676static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 677{
 678        struct drm_display_mode *mode = msm_host->mode;
 679        u32 pclk_rate;
 680
 681        pclk_rate = mode->clock * 1000;
 682
 683        /*
 684         * For dual DSI mode, the current DRM mode has the complete width of the
 685         * panel. Since, the complete panel is driven by two DSI controllers,
 686         * the clock rates have to be split between the two dsi controllers.
 687         * Adjust the byte and pixel clock rates for each dsi host accordingly.
 688         */
 689        if (is_dual_dsi)
 690                pclk_rate /= 2;
 691
 692        return pclk_rate;
 693}
 694
 695static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 696{
 697        u8 lanes = msm_host->lanes;
 698        u32 bpp = dsi_get_bpp(msm_host->format);
 699        u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
 700        u64 pclk_bpp = (u64)pclk_rate * bpp;
 701
 702        if (lanes == 0) {
 703                pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
 704                lanes = 1;
 705        }
 706
 707        do_div(pclk_bpp, (8 * lanes));
 708
 709        msm_host->pixel_clk_rate = pclk_rate;
 710        msm_host->byte_clk_rate = pclk_bpp;
 711
 712        DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
 713                                msm_host->byte_clk_rate);
 714
 715}
 716
 717int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 718{
 719        if (!msm_host->mode) {
 720                pr_err("%s: mode not set\n", __func__);
 721                return -EINVAL;
 722        }
 723
 724        dsi_calc_pclk(msm_host, is_dual_dsi);
 725        msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
 726        return 0;
 727}
 728
 729int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 730{
 731        u32 bpp = dsi_get_bpp(msm_host->format);
 732        u64 pclk_bpp;
 733        unsigned int esc_mhz, esc_div;
 734        unsigned long byte_mhz;
 735
 736        dsi_calc_pclk(msm_host, is_dual_dsi);
 737
 738        pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
 739        do_div(pclk_bpp, 8);
 740        msm_host->src_clk_rate = pclk_bpp;
 741
 742        /*
 743         * esc clock is byte clock followed by a 4 bit divider,
 744         * we need to find an escape clock frequency within the
 745         * mipi DSI spec range within the maximum divider limit
 746         * We iterate here between an escape clock frequencey
 747         * between 20 Mhz to 5 Mhz and pick up the first one
 748         * that can be supported by our divider
 749         */
 750
 751        byte_mhz = msm_host->byte_clk_rate / 1000000;
 752
 753        for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
 754                esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
 755
 756                /*
 757                 * TODO: Ideally, we shouldn't know what sort of divider
 758                 * is available in mmss_cc, we're just assuming that
 759                 * it'll always be a 4 bit divider. Need to come up with
 760                 * a better way here.
 761                 */
 762                if (esc_div >= 1 && esc_div <= 16)
 763                        break;
 764        }
 765
 766        if (esc_mhz < 5)
 767                return -EINVAL;
 768
 769        msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
 770
 771        DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
 772                msm_host->src_clk_rate);
 773
 774        return 0;
 775}
 776
 777static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
 778{
 779        u32 intr;
 780        unsigned long flags;
 781
 782        spin_lock_irqsave(&msm_host->intr_lock, flags);
 783        intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
 784
 785        if (enable)
 786                intr |= mask;
 787        else
 788                intr &= ~mask;
 789
 790        DBG("intr=%x enable=%d", intr, enable);
 791
 792        dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
 793        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
 794}
 795
 796static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
 797{
 798        if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
 799                return BURST_MODE;
 800        else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
 801                return NON_BURST_SYNCH_PULSE;
 802
 803        return NON_BURST_SYNCH_EVENT;
 804}
 805
 806static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
 807                                const enum mipi_dsi_pixel_format mipi_fmt)
 808{
 809        switch (mipi_fmt) {
 810        case MIPI_DSI_FMT_RGB888:       return VID_DST_FORMAT_RGB888;
 811        case MIPI_DSI_FMT_RGB666:       return VID_DST_FORMAT_RGB666_LOOSE;
 812        case MIPI_DSI_FMT_RGB666_PACKED:        return VID_DST_FORMAT_RGB666;
 813        case MIPI_DSI_FMT_RGB565:       return VID_DST_FORMAT_RGB565;
 814        default:                        return VID_DST_FORMAT_RGB888;
 815        }
 816}
 817
 818static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
 819                                const enum mipi_dsi_pixel_format mipi_fmt)
 820{
 821        switch (mipi_fmt) {
 822        case MIPI_DSI_FMT_RGB888:       return CMD_DST_FORMAT_RGB888;
 823        case MIPI_DSI_FMT_RGB666_PACKED:
 824        case MIPI_DSI_FMT_RGB666:       return CMD_DST_FORMAT_RGB666;
 825        case MIPI_DSI_FMT_RGB565:       return CMD_DST_FORMAT_RGB565;
 826        default:                        return CMD_DST_FORMAT_RGB888;
 827        }
 828}
 829
 830static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 831                        struct msm_dsi_phy_shared_timings *phy_shared_timings)
 832{
 833        u32 flags = msm_host->mode_flags;
 834        enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
 835        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 836        u32 data = 0, lane_ctrl = 0;
 837
 838        if (!enable) {
 839                dsi_write(msm_host, REG_DSI_CTRL, 0);
 840                return;
 841        }
 842
 843        if (flags & MIPI_DSI_MODE_VIDEO) {
 844                if (flags & MIPI_DSI_MODE_VIDEO_HSE)
 845                        data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
 846                if (flags & MIPI_DSI_MODE_VIDEO_HFP)
 847                        data |= DSI_VID_CFG0_HFP_POWER_STOP;
 848                if (flags & MIPI_DSI_MODE_VIDEO_HBP)
 849                        data |= DSI_VID_CFG0_HBP_POWER_STOP;
 850                if (flags & MIPI_DSI_MODE_VIDEO_HSA)
 851                        data |= DSI_VID_CFG0_HSA_POWER_STOP;
 852                /* Always set low power stop mode for BLLP
 853                 * to let command engine send packets
 854                 */
 855                data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
 856                        DSI_VID_CFG0_BLLP_POWER_STOP;
 857                data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
 858                data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
 859                data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
 860                dsi_write(msm_host, REG_DSI_VID_CFG0, data);
 861
 862                /* Do not swap RGB colors */
 863                data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
 864                dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
 865        } else {
 866                /* Do not swap RGB colors */
 867                data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
 868                data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
 869                dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
 870
 871                data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
 872                        DSI_CMD_CFG1_WR_MEM_CONTINUE(
 873                                        MIPI_DCS_WRITE_MEMORY_CONTINUE);
 874                /* Always insert DCS command */
 875                data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
 876                dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
 877        }
 878
 879        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
 880                        DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
 881                        DSI_CMD_DMA_CTRL_LOW_POWER);
 882
 883        data = 0;
 884        /* Always assume dedicated TE pin */
 885        data |= DSI_TRIG_CTRL_TE;
 886        data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 887        data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 888        data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
 889        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 890                (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 891                data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 892        dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 893
 894        data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
 895                DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
 896        dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
 897
 898        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 899            (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
 900            phy_shared_timings->clk_pre_inc_by_2)
 901                dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
 902                          DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
 903
 904        data = 0;
 905        if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
 906                data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
 907        dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
 908
 909        /* allow only ack-err-status to generate interrupt */
 910        dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
 911
 912        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
 913
 914        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
 915
 916        data = DSI_CTRL_CLK_EN;
 917
 918        DBG("lane number=%d", msm_host->lanes);
 919        data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
 920
 921        dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
 922                  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
 923
 924        if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
 925                lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
 926                dsi_write(msm_host, REG_DSI_LANE_CTRL,
 927                        lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
 928        }
 929
 930        data |= DSI_CTRL_ENABLE;
 931
 932        dsi_write(msm_host, REG_DSI_CTRL, data);
 933}
 934
 935static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 936{
 937        struct drm_display_mode *mode = msm_host->mode;
 938        u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
 939        u32 h_total = mode->htotal;
 940        u32 v_total = mode->vtotal;
 941        u32 hs_end = mode->hsync_end - mode->hsync_start;
 942        u32 vs_end = mode->vsync_end - mode->vsync_start;
 943        u32 ha_start = h_total - mode->hsync_start;
 944        u32 ha_end = ha_start + mode->hdisplay;
 945        u32 va_start = v_total - mode->vsync_start;
 946        u32 va_end = va_start + mode->vdisplay;
 947        u32 hdisplay = mode->hdisplay;
 948        u32 wc;
 949
 950        DBG("");
 951
 952        /*
 953         * For dual DSI mode, the current DRM mode has
 954         * the complete width of the panel. Since, the complete
 955         * panel is driven by two DSI controllers, the horizontal
 956         * timings have to be split between the two dsi controllers.
 957         * Adjust the DSI host timing values accordingly.
 958         */
 959        if (is_dual_dsi) {
 960                h_total /= 2;
 961                hs_end /= 2;
 962                ha_start /= 2;
 963                ha_end /= 2;
 964                hdisplay /= 2;
 965        }
 966
 967        if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
 968                dsi_write(msm_host, REG_DSI_ACTIVE_H,
 969                        DSI_ACTIVE_H_START(ha_start) |
 970                        DSI_ACTIVE_H_END(ha_end));
 971                dsi_write(msm_host, REG_DSI_ACTIVE_V,
 972                        DSI_ACTIVE_V_START(va_start) |
 973                        DSI_ACTIVE_V_END(va_end));
 974                dsi_write(msm_host, REG_DSI_TOTAL,
 975                        DSI_TOTAL_H_TOTAL(h_total - 1) |
 976                        DSI_TOTAL_V_TOTAL(v_total - 1));
 977
 978                dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
 979                        DSI_ACTIVE_HSYNC_START(hs_start) |
 980                        DSI_ACTIVE_HSYNC_END(hs_end));
 981                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
 982                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
 983                        DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
 984                        DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
 985        } else {                /* command mode */
 986                /* image data and 1 byte write_memory_start cmd */
 987                wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
 988
 989                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
 990                        DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
 991                        DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
 992                                        msm_host->channel) |
 993                        DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
 994                                        MIPI_DSI_DCS_LONG_WRITE));
 995
 996                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
 997                        DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
 998                        DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
 999        }
1000}
1001
1002static void dsi_sw_reset(struct msm_dsi_host *msm_host)
1003{
1004        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1005        wmb(); /* clocks need to be enabled before reset */
1006
1007        dsi_write(msm_host, REG_DSI_RESET, 1);
1008        msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
1009        dsi_write(msm_host, REG_DSI_RESET, 0);
1010}
1011
1012static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
1013                                        bool video_mode, bool enable)
1014{
1015        u32 dsi_ctrl;
1016
1017        dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
1018
1019        if (!enable) {
1020                dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
1021                                DSI_CTRL_CMD_MODE_EN);
1022                dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
1023                                        DSI_IRQ_MASK_VIDEO_DONE, 0);
1024        } else {
1025                if (video_mode) {
1026                        dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
1027                } else {                /* command mode */
1028                        dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
1029                        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
1030                }
1031                dsi_ctrl |= DSI_CTRL_ENABLE;
1032        }
1033
1034        dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
1035}
1036
1037static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
1038{
1039        u32 data;
1040
1041        data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
1042
1043        if (mode == 0)
1044                data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
1045        else
1046                data |= DSI_CMD_DMA_CTRL_LOW_POWER;
1047
1048        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
1049}
1050
1051static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
1052{
1053        u32 ret = 0;
1054        struct device *dev = &msm_host->pdev->dev;
1055
1056        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
1057
1058        reinit_completion(&msm_host->video_comp);
1059
1060        ret = wait_for_completion_timeout(&msm_host->video_comp,
1061                        msecs_to_jiffies(70));
1062
1063        if (ret == 0)
1064                DRM_DEV_ERROR(dev, "wait for video done timed out\n");
1065
1066        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
1067}
1068
1069static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1070{
1071        if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1072                return;
1073
1074        if (msm_host->power_on && msm_host->enabled) {
1075                dsi_wait4video_done(msm_host);
1076                /* delay 4 ms to skip BLLP */
1077                usleep_range(2000, 4000);
1078        }
1079}
1080
1081int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
1082{
1083        struct drm_device *dev = msm_host->dev;
1084        struct msm_drm_private *priv = dev->dev_private;
1085        uint64_t iova;
1086        u8 *data;
1087
1088        data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
1089                                        priv->kms->aspace,
1090                                        &msm_host->tx_gem_obj, &iova);
1091
1092        if (IS_ERR(data)) {
1093                msm_host->tx_gem_obj = NULL;
1094                return PTR_ERR(data);
1095        }
1096
1097        msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
1098
1099        msm_host->tx_size = msm_host->tx_gem_obj->size;
1100
1101        return 0;
1102}
1103
1104int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
1105{
1106        struct drm_device *dev = msm_host->dev;
1107
1108        msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1109                                        &msm_host->tx_buf_paddr, GFP_KERNEL);
1110        if (!msm_host->tx_buf)
1111                return -ENOMEM;
1112
1113        msm_host->tx_size = size;
1114
1115        return 0;
1116}
1117
1118static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1119{
1120        struct drm_device *dev = msm_host->dev;
1121        struct msm_drm_private *priv;
1122
1123        /*
1124         * This is possible if we're tearing down before we've had a chance to
1125         * fully initialize. A very real possibility if our probe is deferred,
1126         * in which case we'll hit msm_dsi_host_destroy() without having run
1127         * through the dsi_tx_buf_alloc().
1128         */
1129        if (!dev)
1130                return;
1131
1132        priv = dev->dev_private;
1133        if (msm_host->tx_gem_obj) {
1134                msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
1135                drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
1136                msm_host->tx_gem_obj = NULL;
1137        }
1138
1139        if (msm_host->tx_buf)
1140                dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1141                        msm_host->tx_buf_paddr);
1142}
1143
1144void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
1145{
1146        return msm_gem_get_vaddr(msm_host->tx_gem_obj);
1147}
1148
1149void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
1150{
1151        return msm_host->tx_buf;
1152}
1153
1154void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
1155{
1156        msm_gem_put_vaddr(msm_host->tx_gem_obj);
1157}
1158
1159/*
1160 * prepare cmd buffer to be txed
1161 */
1162static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1163                           const struct mipi_dsi_msg *msg)
1164{
1165        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1166        struct mipi_dsi_packet packet;
1167        int len;
1168        int ret;
1169        u8 *data;
1170
1171        ret = mipi_dsi_create_packet(&packet, msg);
1172        if (ret) {
1173                pr_err("%s: create packet failed, %d\n", __func__, ret);
1174                return ret;
1175        }
1176        len = (packet.size + 3) & (~0x3);
1177
1178        if (len > msm_host->tx_size) {
1179                pr_err("%s: packet size is too big\n", __func__);
1180                return -EINVAL;
1181        }
1182
1183        data = cfg_hnd->ops->tx_buf_get(msm_host);
1184        if (IS_ERR(data)) {
1185                ret = PTR_ERR(data);
1186                pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1187                return ret;
1188        }
1189
1190        /* MSM specific command format in memory */
1191        data[0] = packet.header[1];
1192        data[1] = packet.header[2];
1193        data[2] = packet.header[0];
1194        data[3] = BIT(7); /* Last packet */
1195        if (mipi_dsi_packet_format_is_long(msg->type))
1196                data[3] |= BIT(6);
1197        if (msg->rx_buf && msg->rx_len)
1198                data[3] |= BIT(5);
1199
1200        /* Long packet */
1201        if (packet.payload && packet.payload_length)
1202                memcpy(data + 4, packet.payload, packet.payload_length);
1203
1204        /* Append 0xff to the end */
1205        if (packet.size < len)
1206                memset(data + packet.size, 0xff, len - packet.size);
1207
1208        if (cfg_hnd->ops->tx_buf_put)
1209                cfg_hnd->ops->tx_buf_put(msm_host);
1210
1211        return len;
1212}
1213
1214/*
1215 * dsi_short_read1_resp: 1 parameter
1216 */
1217static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1218{
1219        u8 *data = msg->rx_buf;
1220        if (data && (msg->rx_len >= 1)) {
1221                *data = buf[1]; /* strip out dcs type */
1222                return 1;
1223        } else {
1224                pr_err("%s: read data does not match with rx_buf len %zu\n",
1225                        __func__, msg->rx_len);
1226                return -EINVAL;
1227        }
1228}
1229
1230/*
1231 * dsi_short_read2_resp: 2 parameter
1232 */
1233static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1234{
1235        u8 *data = msg->rx_buf;
1236        if (data && (msg->rx_len >= 2)) {
1237                data[0] = buf[1]; /* strip out dcs type */
1238                data[1] = buf[2];
1239                return 2;
1240        } else {
1241                pr_err("%s: read data does not match with rx_buf len %zu\n",
1242                        __func__, msg->rx_len);
1243                return -EINVAL;
1244        }
1245}
1246
1247static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1248{
1249        /* strip out 4 byte dcs header */
1250        if (msg->rx_buf && msg->rx_len)
1251                memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1252
1253        return msg->rx_len;
1254}
1255
1256int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
1257{
1258        struct drm_device *dev = msm_host->dev;
1259        struct msm_drm_private *priv = dev->dev_private;
1260
1261        if (!dma_base)
1262                return -EINVAL;
1263
1264        return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
1265                                priv->kms->aspace, dma_base);
1266}
1267
1268int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
1269{
1270        if (!dma_base)
1271                return -EINVAL;
1272
1273        *dma_base = msm_host->tx_buf_paddr;
1274        return 0;
1275}
1276
1277static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1278{
1279        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1280        int ret;
1281        uint64_t dma_base;
1282        bool triggered;
1283
1284        ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
1285        if (ret) {
1286                pr_err("%s: failed to get iova: %d\n", __func__, ret);
1287                return ret;
1288        }
1289
1290        reinit_completion(&msm_host->dma_comp);
1291
1292        dsi_wait4video_eng_busy(msm_host);
1293
1294        triggered = msm_dsi_manager_cmd_xfer_trigger(
1295                                                msm_host->id, dma_base, len);
1296        if (triggered) {
1297                ret = wait_for_completion_timeout(&msm_host->dma_comp,
1298                                        msecs_to_jiffies(200));
1299                DBG("ret=%d", ret);
1300                if (ret == 0)
1301                        ret = -ETIMEDOUT;
1302                else
1303                        ret = len;
1304        } else
1305                ret = len;
1306
1307        return ret;
1308}
1309
1310static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1311                        u8 *buf, int rx_byte, int pkt_size)
1312{
1313        u32 *temp, data;
1314        int i, j = 0, cnt;
1315        u32 read_cnt;
1316        u8 reg[16];
1317        int repeated_bytes = 0;
1318        int buf_offset = buf - msm_host->rx_buf;
1319
1320        temp = (u32 *)reg;
1321        cnt = (rx_byte + 3) >> 2;
1322        if (cnt > 4)
1323                cnt = 4; /* 4 x 32 bits registers only */
1324
1325        if (rx_byte == 4)
1326                read_cnt = 4;
1327        else
1328                read_cnt = pkt_size + 6;
1329
1330        /*
1331         * In case of multiple reads from the panel, after the first read, there
1332         * is possibility that there are some bytes in the payload repeating in
1333         * the RDBK_DATA registers. Since we read all the parameters from the
1334         * panel right from the first byte for every pass. We need to skip the
1335         * repeating bytes and then append the new parameters to the rx buffer.
1336         */
1337        if (read_cnt > 16) {
1338                int bytes_shifted;
1339                /* Any data more than 16 bytes will be shifted out.
1340                 * The temp read buffer should already contain these bytes.
1341                 * The remaining bytes in read buffer are the repeated bytes.
1342                 */
1343                bytes_shifted = read_cnt - 16;
1344                repeated_bytes = buf_offset - bytes_shifted;
1345        }
1346
1347        for (i = cnt - 1; i >= 0; i--) {
1348                data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1349                *temp++ = ntohl(data); /* to host byte order */
1350                DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1351        }
1352
1353        for (i = repeated_bytes; i < 16; i++)
1354                buf[j++] = reg[i];
1355
1356        return j;
1357}
1358
1359static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1360                                const struct mipi_dsi_msg *msg)
1361{
1362        int len, ret;
1363        int bllp_len = msm_host->mode->hdisplay *
1364                        dsi_get_bpp(msm_host->format) / 8;
1365
1366        len = dsi_cmd_dma_add(msm_host, msg);
1367        if (!len) {
1368                pr_err("%s: failed to add cmd type = 0x%x\n",
1369                        __func__,  msg->type);
1370                return -EINVAL;
1371        }
1372
1373        /* for video mode, do not send cmds more than
1374        * one pixel line, since it only transmit it
1375        * during BLLP.
1376        */
1377        /* TODO: if the command is sent in LP mode, the bit rate is only
1378         * half of esc clk rate. In this case, if the video is already
1379         * actively streaming, we need to check more carefully if the
1380         * command can be fit into one BLLP.
1381         */
1382        if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1383                pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1384                        __func__, len);
1385                return -EINVAL;
1386        }
1387
1388        ret = dsi_cmd_dma_tx(msm_host, len);
1389        if (ret < len) {
1390                pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1391                        __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1392                return -ECOMM;
1393        }
1394
1395        return len;
1396}
1397
1398static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1399{
1400        u32 data0, data1;
1401
1402        data0 = dsi_read(msm_host, REG_DSI_CTRL);
1403        data1 = data0;
1404        data1 &= ~DSI_CTRL_ENABLE;
1405        dsi_write(msm_host, REG_DSI_CTRL, data1);
1406        /*
1407         * dsi controller need to be disabled before
1408         * clocks turned on
1409         */
1410        wmb();
1411
1412        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1413        wmb();  /* make sure clocks enabled */
1414
1415        /* dsi controller can only be reset while clocks are running */
1416        dsi_write(msm_host, REG_DSI_RESET, 1);
1417        msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
1418        dsi_write(msm_host, REG_DSI_RESET, 0);
1419        wmb();  /* controller out of reset */
1420        dsi_write(msm_host, REG_DSI_CTRL, data0);
1421        wmb();  /* make sure dsi controller enabled again */
1422}
1423
1424static void dsi_hpd_worker(struct work_struct *work)
1425{
1426        struct msm_dsi_host *msm_host =
1427                container_of(work, struct msm_dsi_host, hpd_work);
1428
1429        drm_helper_hpd_irq_event(msm_host->dev);
1430}
1431
1432static void dsi_err_worker(struct work_struct *work)
1433{
1434        struct msm_dsi_host *msm_host =
1435                container_of(work, struct msm_dsi_host, err_work);
1436        u32 status = msm_host->err_work_state;
1437
1438        pr_err_ratelimited("%s: status=%x\n", __func__, status);
1439        if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1440                dsi_sw_reset_restore(msm_host);
1441
1442        /* It is safe to clear here because error irq is disabled. */
1443        msm_host->err_work_state = 0;
1444
1445        /* enable dsi error interrupt */
1446        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1447}
1448
1449static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1450{
1451        u32 status;
1452
1453        status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1454
1455        if (status) {
1456                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1457                /* Writing of an extra 0 needed to clear error bits */
1458                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1459                msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1460        }
1461}
1462
1463static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1464{
1465        u32 status;
1466
1467        status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1468
1469        if (status) {
1470                dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1471                msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1472        }
1473}
1474
1475static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1476{
1477        u32 status;
1478
1479        status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1480
1481        if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1482                        DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1483                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1484                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1485                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1486                dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1487                msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1488        }
1489}
1490
1491static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1492{
1493        u32 status;
1494
1495        status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1496
1497        /* fifo underflow, overflow */
1498        if (status) {
1499                dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1500                msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1501                if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1502                        msm_host->err_work_state |=
1503                                        DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1504        }
1505}
1506
1507static void dsi_status(struct msm_dsi_host *msm_host)
1508{
1509        u32 status;
1510
1511        status = dsi_read(msm_host, REG_DSI_STATUS0);
1512
1513        if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1514                dsi_write(msm_host, REG_DSI_STATUS0, status);
1515                msm_host->err_work_state |=
1516                        DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1517        }
1518}
1519
1520static void dsi_clk_status(struct msm_dsi_host *msm_host)
1521{
1522        u32 status;
1523
1524        status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1525
1526        if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1527                dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1528                msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1529        }
1530}
1531
1532static void dsi_error(struct msm_dsi_host *msm_host)
1533{
1534        /* disable dsi error interrupt */
1535        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1536
1537        dsi_clk_status(msm_host);
1538        dsi_fifo_status(msm_host);
1539        dsi_ack_err_status(msm_host);
1540        dsi_timeout_status(msm_host);
1541        dsi_status(msm_host);
1542        dsi_dln0_phy_err(msm_host);
1543
1544        queue_work(msm_host->workqueue, &msm_host->err_work);
1545}
1546
1547static irqreturn_t dsi_host_irq(int irq, void *ptr)
1548{
1549        struct msm_dsi_host *msm_host = ptr;
1550        u32 isr;
1551        unsigned long flags;
1552
1553        if (!msm_host->ctrl_base)
1554                return IRQ_HANDLED;
1555
1556        spin_lock_irqsave(&msm_host->intr_lock, flags);
1557        isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1558        dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1559        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1560
1561        DBG("isr=0x%x, id=%d", isr, msm_host->id);
1562
1563        if (isr & DSI_IRQ_ERROR)
1564                dsi_error(msm_host);
1565
1566        if (isr & DSI_IRQ_VIDEO_DONE)
1567                complete(&msm_host->video_comp);
1568
1569        if (isr & DSI_IRQ_CMD_DMA_DONE)
1570                complete(&msm_host->dma_comp);
1571
1572        return IRQ_HANDLED;
1573}
1574
1575static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1576                        struct device *panel_device)
1577{
1578        msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1579                                                         "disp-enable",
1580                                                         GPIOD_OUT_LOW);
1581        if (IS_ERR(msm_host->disp_en_gpio)) {
1582                DBG("cannot get disp-enable-gpios %ld",
1583                                PTR_ERR(msm_host->disp_en_gpio));
1584                return PTR_ERR(msm_host->disp_en_gpio);
1585        }
1586
1587        msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1588                                                                GPIOD_IN);
1589        if (IS_ERR(msm_host->te_gpio)) {
1590                DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1591                return PTR_ERR(msm_host->te_gpio);
1592        }
1593
1594        return 0;
1595}
1596
1597static int dsi_host_attach(struct mipi_dsi_host *host,
1598                                        struct mipi_dsi_device *dsi)
1599{
1600        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1601        int ret;
1602
1603        if (dsi->lanes > msm_host->num_data_lanes)
1604                return -EINVAL;
1605
1606        msm_host->channel = dsi->channel;
1607        msm_host->lanes = dsi->lanes;
1608        msm_host->format = dsi->format;
1609        msm_host->mode_flags = dsi->mode_flags;
1610
1611        /* Some gpios defined in panel DT need to be controlled by host */
1612        ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1613        if (ret)
1614                return ret;
1615
1616        DBG("id=%d", msm_host->id);
1617        if (msm_host->dev)
1618                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1619
1620        return 0;
1621}
1622
1623static int dsi_host_detach(struct mipi_dsi_host *host,
1624                                        struct mipi_dsi_device *dsi)
1625{
1626        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1627
1628        msm_host->device_node = NULL;
1629
1630        DBG("id=%d", msm_host->id);
1631        if (msm_host->dev)
1632                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1633
1634        return 0;
1635}
1636
1637static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1638                                        const struct mipi_dsi_msg *msg)
1639{
1640        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1641        int ret;
1642
1643        if (!msg || !msm_host->power_on)
1644                return -EINVAL;
1645
1646        mutex_lock(&msm_host->cmd_mutex);
1647        ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1648        mutex_unlock(&msm_host->cmd_mutex);
1649
1650        return ret;
1651}
1652
1653static struct mipi_dsi_host_ops dsi_host_ops = {
1654        .attach = dsi_host_attach,
1655        .detach = dsi_host_detach,
1656        .transfer = dsi_host_transfer,
1657};
1658
1659/*
1660 * List of supported physical to logical lane mappings.
1661 * For example, the 2nd entry represents the following mapping:
1662 *
1663 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
1664 */
1665static const int supported_data_lane_swaps[][4] = {
1666        { 0, 1, 2, 3 },
1667        { 3, 0, 1, 2 },
1668        { 2, 3, 0, 1 },
1669        { 1, 2, 3, 0 },
1670        { 0, 3, 2, 1 },
1671        { 1, 0, 3, 2 },
1672        { 2, 1, 0, 3 },
1673        { 3, 2, 1, 0 },
1674};
1675
1676static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1677                                    struct device_node *ep)
1678{
1679        struct device *dev = &msm_host->pdev->dev;
1680        struct property *prop;
1681        u32 lane_map[4];
1682        int ret, i, len, num_lanes;
1683
1684        prop = of_find_property(ep, "data-lanes", &len);
1685        if (!prop) {
1686                DRM_DEV_DEBUG(dev,
1687                        "failed to find data lane mapping, using default\n");
1688                return 0;
1689        }
1690
1691        num_lanes = len / sizeof(u32);
1692
1693        if (num_lanes < 1 || num_lanes > 4) {
1694                DRM_DEV_ERROR(dev, "bad number of data lanes\n");
1695                return -EINVAL;
1696        }
1697
1698        msm_host->num_data_lanes = num_lanes;
1699
1700        ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1701                                         num_lanes);
1702        if (ret) {
1703                DRM_DEV_ERROR(dev, "failed to read lane data\n");
1704                return ret;
1705        }
1706
1707        /*
1708         * compare DT specified physical-logical lane mappings with the ones
1709         * supported by hardware
1710         */
1711        for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
1712                const int *swap = supported_data_lane_swaps[i];
1713                int j;
1714
1715                /*
1716                 * the data-lanes array we get from DT has a logical->physical
1717                 * mapping. The "data lane swap" register field represents
1718                 * supported configurations in a physical->logical mapping.
1719                 * Translate the DT mapping to what we understand and find a
1720                 * configuration that works.
1721                 */
1722                for (j = 0; j < num_lanes; j++) {
1723                        if (lane_map[j] < 0 || lane_map[j] > 3)
1724                                DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
1725                                        lane_map[j]);
1726
1727                        if (swap[lane_map[j]] != j)
1728                                break;
1729                }
1730
1731                if (j == num_lanes) {
1732                        msm_host->dlane_swap = i;
1733                        return 0;
1734                }
1735        }
1736
1737        return -EINVAL;
1738}
1739
1740static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1741{
1742        struct device *dev = &msm_host->pdev->dev;
1743        struct device_node *np = dev->of_node;
1744        struct device_node *endpoint, *device_node;
1745        int ret = 0;
1746
1747        /*
1748         * Get the endpoint of the output port of the DSI host. In our case,
1749         * this is mapped to port number with reg = 1. Don't return an error if
1750         * the remote endpoint isn't defined. It's possible that there is
1751         * nothing connected to the dsi output.
1752         */
1753        endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1754        if (!endpoint) {
1755                DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
1756                return 0;
1757        }
1758
1759        ret = dsi_host_parse_lane_data(msm_host, endpoint);
1760        if (ret) {
1761                DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
1762                        __func__, ret);
1763                ret = -EINVAL;
1764                goto err;
1765        }
1766
1767        /* Get panel node from the output port's endpoint data */
1768        device_node = of_graph_get_remote_node(np, 1, 0);
1769        if (!device_node) {
1770                DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
1771                ret = -ENODEV;
1772                goto err;
1773        }
1774
1775        msm_host->device_node = device_node;
1776
1777        if (of_property_read_bool(np, "syscon-sfpb")) {
1778                msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1779                                        "syscon-sfpb");
1780                if (IS_ERR(msm_host->sfpb)) {
1781                        DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
1782                                __func__);
1783                        ret = PTR_ERR(msm_host->sfpb);
1784                }
1785        }
1786
1787        of_node_put(device_node);
1788
1789err:
1790        of_node_put(endpoint);
1791
1792        return ret;
1793}
1794
1795static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1796{
1797        struct platform_device *pdev = msm_host->pdev;
1798        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1799        struct resource *res;
1800        int i;
1801
1802        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1803        if (!res)
1804                return -EINVAL;
1805
1806        for (i = 0; i < cfg->num_dsi; i++) {
1807                if (cfg->io_start[i] == res->start)
1808                        return i;
1809        }
1810
1811        return -EINVAL;
1812}
1813
1814int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1815{
1816        struct msm_dsi_host *msm_host = NULL;
1817        struct platform_device *pdev = msm_dsi->pdev;
1818        int ret;
1819
1820        msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1821        if (!msm_host) {
1822                pr_err("%s: FAILED: cannot alloc dsi host\n",
1823                       __func__);
1824                ret = -ENOMEM;
1825                goto fail;
1826        }
1827
1828        msm_host->pdev = pdev;
1829        msm_dsi->host = &msm_host->base;
1830
1831        ret = dsi_host_parse_dt(msm_host);
1832        if (ret) {
1833                pr_err("%s: failed to parse dt\n", __func__);
1834                goto fail;
1835        }
1836
1837        msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1838        if (IS_ERR(msm_host->ctrl_base)) {
1839                pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1840                ret = PTR_ERR(msm_host->ctrl_base);
1841                goto fail;
1842        }
1843
1844        pm_runtime_enable(&pdev->dev);
1845
1846        msm_host->cfg_hnd = dsi_get_config(msm_host);
1847        if (!msm_host->cfg_hnd) {
1848                ret = -EINVAL;
1849                pr_err("%s: get config failed\n", __func__);
1850                goto fail;
1851        }
1852
1853        msm_host->id = dsi_host_get_id(msm_host);
1854        if (msm_host->id < 0) {
1855                ret = msm_host->id;
1856                pr_err("%s: unable to identify DSI host index\n", __func__);
1857                goto fail;
1858        }
1859
1860        /* fixup base address by io offset */
1861        msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1862
1863        ret = dsi_regulator_init(msm_host);
1864        if (ret) {
1865                pr_err("%s: regulator init failed\n", __func__);
1866                goto fail;
1867        }
1868
1869        ret = dsi_clk_init(msm_host);
1870        if (ret) {
1871                pr_err("%s: unable to initialize dsi clks\n", __func__);
1872                goto fail;
1873        }
1874
1875        msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1876        if (!msm_host->rx_buf) {
1877                ret = -ENOMEM;
1878                pr_err("%s: alloc rx temp buf failed\n", __func__);
1879                goto fail;
1880        }
1881
1882        init_completion(&msm_host->dma_comp);
1883        init_completion(&msm_host->video_comp);
1884        mutex_init(&msm_host->dev_mutex);
1885        mutex_init(&msm_host->cmd_mutex);
1886        spin_lock_init(&msm_host->intr_lock);
1887
1888        /* setup workqueue */
1889        msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1890        INIT_WORK(&msm_host->err_work, dsi_err_worker);
1891        INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1892
1893        msm_dsi->id = msm_host->id;
1894
1895        DBG("Dsi Host %d initialized", msm_host->id);
1896        return 0;
1897
1898fail:
1899        return ret;
1900}
1901
1902void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1903{
1904        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1905
1906        DBG("");
1907        dsi_tx_buf_free(msm_host);
1908        if (msm_host->workqueue) {
1909                flush_workqueue(msm_host->workqueue);
1910                destroy_workqueue(msm_host->workqueue);
1911                msm_host->workqueue = NULL;
1912        }
1913
1914        mutex_destroy(&msm_host->cmd_mutex);
1915        mutex_destroy(&msm_host->dev_mutex);
1916
1917        pm_runtime_disable(&msm_host->pdev->dev);
1918}
1919
1920int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1921                                        struct drm_device *dev)
1922{
1923        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1924        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1925        struct platform_device *pdev = msm_host->pdev;
1926        int ret;
1927
1928        msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1929        if (msm_host->irq < 0) {
1930                ret = msm_host->irq;
1931                DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
1932                return ret;
1933        }
1934
1935        ret = devm_request_irq(&pdev->dev, msm_host->irq,
1936                        dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1937                        "dsi_isr", msm_host);
1938        if (ret < 0) {
1939                DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
1940                                msm_host->irq, ret);
1941                return ret;
1942        }
1943
1944        msm_host->dev = dev;
1945        ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
1946        if (ret) {
1947                pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1948                return ret;
1949        }
1950
1951        return 0;
1952}
1953
1954int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1955{
1956        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1957        int ret;
1958
1959        /* Register mipi dsi host */
1960        if (!msm_host->registered) {
1961                host->dev = &msm_host->pdev->dev;
1962                host->ops = &dsi_host_ops;
1963                ret = mipi_dsi_host_register(host);
1964                if (ret)
1965                        return ret;
1966
1967                msm_host->registered = true;
1968
1969                /* If the panel driver has not been probed after host register,
1970                 * we should defer the host's probe.
1971                 * It makes sure panel is connected when fbcon detects
1972                 * connector status and gets the proper display mode to
1973                 * create framebuffer.
1974                 * Don't try to defer if there is nothing connected to the dsi
1975                 * output
1976                 */
1977                if (check_defer && msm_host->device_node) {
1978                        if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
1979                                if (!of_drm_find_bridge(msm_host->device_node))
1980                                        return -EPROBE_DEFER;
1981                }
1982        }
1983
1984        return 0;
1985}
1986
1987void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1988{
1989        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1990
1991        if (msm_host->registered) {
1992                mipi_dsi_host_unregister(host);
1993                host->dev = NULL;
1994                host->ops = NULL;
1995                msm_host->registered = false;
1996        }
1997}
1998
1999int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
2000                                const struct mipi_dsi_msg *msg)
2001{
2002        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2003        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2004
2005        /* TODO: make sure dsi_cmd_mdp is idle.
2006         * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
2007         * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
2008         * How to handle the old versions? Wait for mdp cmd done?
2009         */
2010
2011        /*
2012         * mdss interrupt is generated in mdp core clock domain
2013         * mdp clock need to be enabled to receive dsi interrupt
2014         */
2015        pm_runtime_get_sync(&msm_host->pdev->dev);
2016        cfg_hnd->ops->link_clk_set_rate(msm_host);
2017        cfg_hnd->ops->link_clk_enable(msm_host);
2018
2019        /* TODO: vote for bus bandwidth */
2020
2021        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
2022                dsi_set_tx_power_mode(0, msm_host);
2023
2024        msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
2025        dsi_write(msm_host, REG_DSI_CTRL,
2026                msm_host->dma_cmd_ctrl_restore |
2027                DSI_CTRL_CMD_MODE_EN |
2028                DSI_CTRL_ENABLE);
2029        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
2030
2031        return 0;
2032}
2033
2034void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
2035                                const struct mipi_dsi_msg *msg)
2036{
2037        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2038        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2039
2040        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
2041        dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
2042
2043        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
2044                dsi_set_tx_power_mode(1, msm_host);
2045
2046        /* TODO: unvote for bus bandwidth */
2047
2048        cfg_hnd->ops->link_clk_disable(msm_host);
2049        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2050}
2051
2052int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
2053                                const struct mipi_dsi_msg *msg)
2054{
2055        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2056
2057        return dsi_cmds2buf_tx(msm_host, msg);
2058}
2059
2060int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
2061                                const struct mipi_dsi_msg *msg)
2062{
2063        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2064        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2065        int data_byte, rx_byte, dlen, end;
2066        int short_response, diff, pkt_size, ret = 0;
2067        char cmd;
2068        int rlen = msg->rx_len;
2069        u8 *buf;
2070
2071        if (rlen <= 2) {
2072                short_response = 1;
2073                pkt_size = rlen;
2074                rx_byte = 4;
2075        } else {
2076                short_response = 0;
2077                data_byte = 10; /* first read */
2078                if (rlen < data_byte)
2079                        pkt_size = rlen;
2080                else
2081                        pkt_size = data_byte;
2082                rx_byte = data_byte + 6; /* 4 header + 2 crc */
2083        }
2084
2085        buf = msm_host->rx_buf;
2086        end = 0;
2087        while (!end) {
2088                u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
2089                struct mipi_dsi_msg max_pkt_size_msg = {
2090                        .channel = msg->channel,
2091                        .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
2092                        .tx_len = 2,
2093                        .tx_buf = tx,
2094                };
2095
2096                DBG("rlen=%d pkt_size=%d rx_byte=%d",
2097                        rlen, pkt_size, rx_byte);
2098
2099                ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
2100                if (ret < 2) {
2101                        pr_err("%s: Set max pkt size failed, %d\n",
2102                                __func__, ret);
2103                        return -EINVAL;
2104                }
2105
2106                if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
2107                        (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
2108                        /* Clear the RDBK_DATA registers */
2109                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
2110                                        DSI_RDBK_DATA_CTRL_CLR);
2111                        wmb(); /* make sure the RDBK registers are cleared */
2112                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
2113                        wmb(); /* release cleared status before transfer */
2114                }
2115
2116                ret = dsi_cmds2buf_tx(msm_host, msg);
2117                if (ret < msg->tx_len) {
2118                        pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
2119                        return ret;
2120                }
2121
2122                /*
2123                 * once cmd_dma_done interrupt received,
2124                 * return data from client is ready and stored
2125                 * at RDBK_DATA register already
2126                 * since rx fifo is 16 bytes, dcs header is kept at first loop,
2127                 * after that dcs header lost during shift into registers
2128                 */
2129                dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
2130
2131                if (dlen <= 0)
2132                        return 0;
2133
2134                if (short_response)
2135                        break;
2136
2137                if (rlen <= data_byte) {
2138                        diff = data_byte - rlen;
2139                        end = 1;
2140                } else {
2141                        diff = 0;
2142                        rlen -= data_byte;
2143                }
2144
2145                if (!end) {
2146                        dlen -= 2; /* 2 crc */
2147                        dlen -= diff;
2148                        buf += dlen;    /* next start position */
2149                        data_byte = 14; /* NOT first read */
2150                        if (rlen < data_byte)
2151                                pkt_size += rlen;
2152                        else
2153                                pkt_size += data_byte;
2154                        DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
2155                }
2156        }
2157
2158        /*
2159         * For single Long read, if the requested rlen < 10,
2160         * we need to shift the start position of rx
2161         * data buffer to skip the bytes which are not
2162         * updated.
2163         */
2164        if (pkt_size < 10 && !short_response)
2165                buf = msm_host->rx_buf + (10 - rlen);
2166        else
2167                buf = msm_host->rx_buf;
2168
2169        cmd = buf[0];
2170        switch (cmd) {
2171        case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
2172                pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
2173                ret = 0;
2174                break;
2175        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
2176        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
2177                ret = dsi_short_read1_resp(buf, msg);
2178                break;
2179        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
2180        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
2181                ret = dsi_short_read2_resp(buf, msg);
2182                break;
2183        case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
2184        case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
2185                ret = dsi_long_read_resp(buf, msg);
2186                break;
2187        default:
2188                pr_warn("%s:Invalid response cmd\n", __func__);
2189                ret = 0;
2190        }
2191
2192        return ret;
2193}
2194
2195void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
2196                                  u32 len)
2197{
2198        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2199
2200        dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
2201        dsi_write(msm_host, REG_DSI_DMA_LEN, len);
2202        dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
2203
2204        /* Make sure trigger happens */
2205        wmb();
2206}
2207
2208int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
2209        struct msm_dsi_pll *src_pll)
2210{
2211        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2212        struct clk *byte_clk_provider, *pixel_clk_provider;
2213        int ret;
2214
2215        ret = msm_dsi_pll_get_clk_provider(src_pll,
2216                                &byte_clk_provider, &pixel_clk_provider);
2217        if (ret) {
2218                pr_info("%s: can't get provider from pll, don't set parent\n",
2219                        __func__);
2220                return 0;
2221        }
2222
2223        ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
2224        if (ret) {
2225                pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
2226                        __func__, ret);
2227                goto exit;
2228        }
2229
2230        ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
2231        if (ret) {
2232                pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
2233                        __func__, ret);
2234                goto exit;
2235        }
2236
2237        if (msm_host->dsi_clk_src) {
2238                ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
2239                if (ret) {
2240                        pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2241                                __func__, ret);
2242                        goto exit;
2243                }
2244        }
2245
2246        if (msm_host->esc_clk_src) {
2247                ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2248                if (ret) {
2249                        pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2250                                __func__, ret);
2251                        goto exit;
2252                }
2253        }
2254
2255exit:
2256        return ret;
2257}
2258
2259void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
2260{
2261        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2262
2263        DBG("");
2264        dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
2265        /* Make sure fully reset */
2266        wmb();
2267        udelay(1000);
2268        dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
2269        udelay(100);
2270}
2271
2272void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2273                        struct msm_dsi_phy_clk_request *clk_req,
2274                        bool is_dual_dsi)
2275{
2276        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2277        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2278        int ret;
2279
2280        ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
2281        if (ret) {
2282                pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2283                return;
2284        }
2285
2286        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2287        clk_req->escclk_rate = msm_host->esc_clk_rate;
2288}
2289
2290int msm_dsi_host_enable(struct mipi_dsi_host *host)
2291{
2292        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2293
2294        dsi_op_mode_config(msm_host,
2295                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
2296
2297        /* TODO: clock should be turned off for command mode,
2298         * and only turned on before MDP START.
2299         * This part of code should be enabled once mdp driver support it.
2300         */
2301        /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
2302         *      dsi_link_clk_disable(msm_host);
2303         *      pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2304         * }
2305         */
2306        msm_host->enabled = true;
2307        return 0;
2308}
2309
2310int msm_dsi_host_disable(struct mipi_dsi_host *host)
2311{
2312        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2313
2314        msm_host->enabled = false;
2315        dsi_op_mode_config(msm_host,
2316                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2317
2318        /* Since we have disabled INTF, the video engine won't stop so that
2319         * the cmd engine will be blocked.
2320         * Reset to disable video engine so that we can send off cmd.
2321         */
2322        dsi_sw_reset(msm_host);
2323
2324        return 0;
2325}
2326
2327static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2328{
2329        enum sfpb_ahb_arb_master_port_en en;
2330
2331        if (!msm_host->sfpb)
2332                return;
2333
2334        en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2335
2336        regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2337                        SFPB_GPREG_MASTER_PORT_EN__MASK,
2338                        SFPB_GPREG_MASTER_PORT_EN(en));
2339}
2340
2341int msm_dsi_host_power_on(struct mipi_dsi_host *host,
2342                        struct msm_dsi_phy_shared_timings *phy_shared_timings,
2343                        bool is_dual_dsi)
2344{
2345        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2346        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2347        int ret = 0;
2348
2349        mutex_lock(&msm_host->dev_mutex);
2350        if (msm_host->power_on) {
2351                DBG("dsi host already on");
2352                goto unlock_ret;
2353        }
2354
2355        msm_dsi_sfpb_config(msm_host, true);
2356
2357        ret = dsi_host_regulator_enable(msm_host);
2358        if (ret) {
2359                pr_err("%s:Failed to enable vregs.ret=%d\n",
2360                        __func__, ret);
2361                goto unlock_ret;
2362        }
2363
2364        pm_runtime_get_sync(&msm_host->pdev->dev);
2365        ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
2366        if (!ret)
2367                ret = cfg_hnd->ops->link_clk_enable(msm_host);
2368        if (ret) {
2369                pr_err("%s: failed to enable link clocks. ret=%d\n",
2370                       __func__, ret);
2371                goto fail_disable_reg;
2372        }
2373
2374        ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2375        if (ret) {
2376                pr_err("%s: failed to set pinctrl default state, %d\n",
2377                        __func__, ret);
2378                goto fail_disable_clk;
2379        }
2380
2381        dsi_timing_setup(msm_host, is_dual_dsi);
2382        dsi_sw_reset(msm_host);
2383        dsi_ctrl_config(msm_host, true, phy_shared_timings);
2384
2385        if (msm_host->disp_en_gpio)
2386                gpiod_set_value(msm_host->disp_en_gpio, 1);
2387
2388        msm_host->power_on = true;
2389        mutex_unlock(&msm_host->dev_mutex);
2390
2391        return 0;
2392
2393fail_disable_clk:
2394        cfg_hnd->ops->link_clk_disable(msm_host);
2395        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2396fail_disable_reg:
2397        dsi_host_regulator_disable(msm_host);
2398unlock_ret:
2399        mutex_unlock(&msm_host->dev_mutex);
2400        return ret;
2401}
2402
2403int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2404{
2405        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2406        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2407
2408        mutex_lock(&msm_host->dev_mutex);
2409        if (!msm_host->power_on) {
2410                DBG("dsi host already off");
2411                goto unlock_ret;
2412        }
2413
2414        dsi_ctrl_config(msm_host, false, NULL);
2415
2416        if (msm_host->disp_en_gpio)
2417                gpiod_set_value(msm_host->disp_en_gpio, 0);
2418
2419        pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2420
2421        cfg_hnd->ops->link_clk_disable(msm_host);
2422        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2423
2424        dsi_host_regulator_disable(msm_host);
2425
2426        msm_dsi_sfpb_config(msm_host, false);
2427
2428        DBG("-");
2429
2430        msm_host->power_on = false;
2431
2432unlock_ret:
2433        mutex_unlock(&msm_host->dev_mutex);
2434        return 0;
2435}
2436
2437int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2438                                  const struct drm_display_mode *mode)
2439{
2440        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2441
2442        if (msm_host->mode) {
2443                drm_mode_destroy(msm_host->dev, msm_host->mode);
2444                msm_host->mode = NULL;
2445        }
2446
2447        msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2448        if (!msm_host->mode) {
2449                pr_err("%s: cannot duplicate mode\n", __func__);
2450                return -ENOMEM;
2451        }
2452
2453        return 0;
2454}
2455
2456struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
2457{
2458        return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
2459}
2460
2461unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
2462{
2463        return to_msm_dsi_host(host)->mode_flags;
2464}
2465
2466struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2467{
2468        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2469
2470        return of_drm_find_bridge(msm_host->device_node);
2471}
2472