linux/drivers/gpu/drm/msm/dsi/dsi_host.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/delay.h>
  16#include <linux/err.h>
  17#include <linux/gpio.h>
  18#include <linux/gpio/consumer.h>
  19#include <linux/interrupt.h>
  20#include <linux/of_device.h>
  21#include <linux/of_gpio.h>
  22#include <linux/of_irq.h>
  23#include <linux/pinctrl/consumer.h>
  24#include <linux/of_graph.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/spinlock.h>
  27#include <linux/mfd/syscon.h>
  28#include <linux/regmap.h>
  29#include <video/mipi_display.h>
  30
  31#include "dsi.h"
  32#include "dsi.xml.h"
  33#include "sfpb.xml.h"
  34#include "dsi_cfg.h"
  35#include "msm_kms.h"
  36
  37static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  38{
  39        u32 ver;
  40
  41        if (!major || !minor)
  42                return -EINVAL;
  43
  44        /*
  45         * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  46         * makes all other registers 4-byte shifted down.
  47         *
  48         * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  49         * older, we read the DSI_VERSION register without any shift(offset
  50         * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  51         * the case of DSI6G, this has to be zero (the offset points to a
  52         * scratch register which we never touch)
  53         */
  54
  55        ver = msm_readl(base + REG_DSI_VERSION);
  56        if (ver) {
  57                /* older dsi host, there is no register shift */
  58                ver = FIELD(ver, DSI_VERSION_MAJOR);
  59                if (ver <= MSM_DSI_VER_MAJOR_V2) {
  60                        /* old versions */
  61                        *major = ver;
  62                        *minor = 0;
  63                        return 0;
  64                } else {
  65                        return -EINVAL;
  66                }
  67        } else {
  68                /*
  69                 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  70                 * registers are shifted down, read DSI_VERSION again with
  71                 * the shifted offset
  72                 */
  73                ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  74                ver = FIELD(ver, DSI_VERSION_MAJOR);
  75                if (ver == MSM_DSI_VER_MAJOR_6G) {
  76                        /* 6G version */
  77                        *major = ver;
  78                        *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  79                        return 0;
  80                } else {
  81                        return -EINVAL;
  82                }
  83        }
  84}
  85
  86#define DSI_ERR_STATE_ACK                       0x0000
  87#define DSI_ERR_STATE_TIMEOUT                   0x0001
  88#define DSI_ERR_STATE_DLN0_PHY                  0x0002
  89#define DSI_ERR_STATE_FIFO                      0x0004
  90#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW        0x0008
  91#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION  0x0010
  92#define DSI_ERR_STATE_PLL_UNLOCKED              0x0020
  93
  94#define DSI_CLK_CTRL_ENABLE_CLKS        \
  95                (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  96                DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  97                DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  98                DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  99
 100struct msm_dsi_host {
 101        struct mipi_dsi_host base;
 102
 103        struct platform_device *pdev;
 104        struct drm_device *dev;
 105
 106        int id;
 107
 108        void __iomem *ctrl_base;
 109        struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
 110
 111        struct clk *bus_clks[DSI_BUS_CLK_MAX];
 112
 113        struct clk *byte_clk;
 114        struct clk *esc_clk;
 115        struct clk *pixel_clk;
 116        struct clk *byte_clk_src;
 117        struct clk *pixel_clk_src;
 118        struct clk *byte_intf_clk;
 119
 120        u32 byte_clk_rate;
 121        u32 pixel_clk_rate;
 122        u32 esc_clk_rate;
 123
 124        /* DSI v2 specific clocks */
 125        struct clk *src_clk;
 126        struct clk *esc_clk_src;
 127        struct clk *dsi_clk_src;
 128
 129        u32 src_clk_rate;
 130
 131        struct gpio_desc *disp_en_gpio;
 132        struct gpio_desc *te_gpio;
 133
 134        const struct msm_dsi_cfg_handler *cfg_hnd;
 135
 136        struct completion dma_comp;
 137        struct completion video_comp;
 138        struct mutex dev_mutex;
 139        struct mutex cmd_mutex;
 140        spinlock_t intr_lock; /* Protect interrupt ctrl register */
 141
 142        u32 err_work_state;
 143        struct work_struct err_work;
 144        struct work_struct hpd_work;
 145        struct workqueue_struct *workqueue;
 146
 147        /* DSI 6G TX buffer*/
 148        struct drm_gem_object *tx_gem_obj;
 149
 150        /* DSI v2 TX buffer */
 151        void *tx_buf;
 152        dma_addr_t tx_buf_paddr;
 153
 154        int tx_size;
 155
 156        u8 *rx_buf;
 157
 158        struct regmap *sfpb;
 159
 160        struct drm_display_mode *mode;
 161
 162        /* connected device info */
 163        struct device_node *device_node;
 164        unsigned int channel;
 165        unsigned int lanes;
 166        enum mipi_dsi_pixel_format format;
 167        unsigned long mode_flags;
 168
 169        /* lane data parsed via DT */
 170        int dlane_swap;
 171        int num_data_lanes;
 172
 173        u32 dma_cmd_ctrl_restore;
 174
 175        bool registered;
 176        bool power_on;
 177        bool enabled;
 178        int irq;
 179};
 180
 181static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
 182{
 183        switch (fmt) {
 184        case MIPI_DSI_FMT_RGB565:               return 16;
 185        case MIPI_DSI_FMT_RGB666_PACKED:        return 18;
 186        case MIPI_DSI_FMT_RGB666:
 187        case MIPI_DSI_FMT_RGB888:
 188        default:                                return 24;
 189        }
 190}
 191
 192static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 193{
 194        return msm_readl(msm_host->ctrl_base + reg);
 195}
 196static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 197{
 198        msm_writel(data, msm_host->ctrl_base + reg);
 199}
 200
 201static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 202static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 203
 204static const struct msm_dsi_cfg_handler *dsi_get_config(
 205                                                struct msm_dsi_host *msm_host)
 206{
 207        const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 208        struct device *dev = &msm_host->pdev->dev;
 209        struct regulator *gdsc_reg;
 210        struct clk *ahb_clk;
 211        int ret;
 212        u32 major = 0, minor = 0;
 213
 214        gdsc_reg = regulator_get(dev, "gdsc");
 215        if (IS_ERR(gdsc_reg)) {
 216                pr_err("%s: cannot get gdsc\n", __func__);
 217                goto exit;
 218        }
 219
 220        ahb_clk = msm_clk_get(msm_host->pdev, "iface");
 221        if (IS_ERR(ahb_clk)) {
 222                pr_err("%s: cannot get interface clock\n", __func__);
 223                goto put_gdsc;
 224        }
 225
 226        pm_runtime_get_sync(dev);
 227
 228        ret = regulator_enable(gdsc_reg);
 229        if (ret) {
 230                pr_err("%s: unable to enable gdsc\n", __func__);
 231                goto put_gdsc;
 232        }
 233
 234        ret = clk_prepare_enable(ahb_clk);
 235        if (ret) {
 236                pr_err("%s: unable to enable ahb_clk\n", __func__);
 237                goto disable_gdsc;
 238        }
 239
 240        ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
 241        if (ret) {
 242                pr_err("%s: Invalid version\n", __func__);
 243                goto disable_clks;
 244        }
 245
 246        cfg_hnd = msm_dsi_cfg_get(major, minor);
 247
 248        DBG("%s: Version %x:%x\n", __func__, major, minor);
 249
 250disable_clks:
 251        clk_disable_unprepare(ahb_clk);
 252disable_gdsc:
 253        regulator_disable(gdsc_reg);
 254        pm_runtime_put_sync(dev);
 255put_gdsc:
 256        regulator_put(gdsc_reg);
 257exit:
 258        return cfg_hnd;
 259}
 260
 261static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
 262{
 263        return container_of(host, struct msm_dsi_host, base);
 264}
 265
 266static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 267{
 268        struct regulator_bulk_data *s = msm_host->supplies;
 269        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 270        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 271        int i;
 272
 273        DBG("");
 274        for (i = num - 1; i >= 0; i--)
 275                if (regs[i].disable_load >= 0)
 276                        regulator_set_load(s[i].consumer,
 277                                           regs[i].disable_load);
 278
 279        regulator_bulk_disable(num, s);
 280}
 281
 282static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 283{
 284        struct regulator_bulk_data *s = msm_host->supplies;
 285        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 286        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 287        int ret, i;
 288
 289        DBG("");
 290        for (i = 0; i < num; i++) {
 291                if (regs[i].enable_load >= 0) {
 292                        ret = regulator_set_load(s[i].consumer,
 293                                                 regs[i].enable_load);
 294                        if (ret < 0) {
 295                                pr_err("regulator %d set op mode failed, %d\n",
 296                                        i, ret);
 297                                goto fail;
 298                        }
 299                }
 300        }
 301
 302        ret = regulator_bulk_enable(num, s);
 303        if (ret < 0) {
 304                pr_err("regulator enable failed, %d\n", ret);
 305                goto fail;
 306        }
 307
 308        return 0;
 309
 310fail:
 311        for (i--; i >= 0; i--)
 312                regulator_set_load(s[i].consumer, regs[i].disable_load);
 313        return ret;
 314}
 315
 316static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 317{
 318        struct regulator_bulk_data *s = msm_host->supplies;
 319        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 320        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 321        int i, ret;
 322
 323        for (i = 0; i < num; i++)
 324                s[i].supply = regs[i].name;
 325
 326        ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
 327        if (ret < 0) {
 328                pr_err("%s: failed to init regulator, ret=%d\n",
 329                                                __func__, ret);
 330                return ret;
 331        }
 332
 333        return 0;
 334}
 335
 336int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
 337{
 338        struct platform_device *pdev = msm_host->pdev;
 339        int ret = 0;
 340
 341        msm_host->src_clk = msm_clk_get(pdev, "src");
 342
 343        if (IS_ERR(msm_host->src_clk)) {
 344                ret = PTR_ERR(msm_host->src_clk);
 345                pr_err("%s: can't find src clock. ret=%d\n",
 346                        __func__, ret);
 347                msm_host->src_clk = NULL;
 348                return ret;
 349        }
 350
 351        msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
 352        if (!msm_host->esc_clk_src) {
 353                ret = -ENODEV;
 354                pr_err("%s: can't get esc clock parent. ret=%d\n",
 355                        __func__, ret);
 356                return ret;
 357        }
 358
 359        msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
 360        if (!msm_host->dsi_clk_src) {
 361                ret = -ENODEV;
 362                pr_err("%s: can't get src clock parent. ret=%d\n",
 363                        __func__, ret);
 364        }
 365
 366        return ret;
 367}
 368
 369int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
 370{
 371        struct platform_device *pdev = msm_host->pdev;
 372        int ret = 0;
 373
 374        msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
 375        if (IS_ERR(msm_host->byte_intf_clk)) {
 376                ret = PTR_ERR(msm_host->byte_intf_clk);
 377                pr_err("%s: can't find byte_intf clock. ret=%d\n",
 378                        __func__, ret);
 379        }
 380
 381        return ret;
 382}
 383
 384static int dsi_clk_init(struct msm_dsi_host *msm_host)
 385{
 386        struct platform_device *pdev = msm_host->pdev;
 387        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 388        const struct msm_dsi_config *cfg = cfg_hnd->cfg;
 389        int i, ret = 0;
 390
 391        /* get bus clocks */
 392        for (i = 0; i < cfg->num_bus_clks; i++) {
 393                msm_host->bus_clks[i] = msm_clk_get(pdev,
 394                                                cfg->bus_clk_names[i]);
 395                if (IS_ERR(msm_host->bus_clks[i])) {
 396                        ret = PTR_ERR(msm_host->bus_clks[i]);
 397                        pr_err("%s: Unable to get %s clock, ret = %d\n",
 398                                __func__, cfg->bus_clk_names[i], ret);
 399                        goto exit;
 400                }
 401        }
 402
 403        /* get link and source clocks */
 404        msm_host->byte_clk = msm_clk_get(pdev, "byte");
 405        if (IS_ERR(msm_host->byte_clk)) {
 406                ret = PTR_ERR(msm_host->byte_clk);
 407                pr_err("%s: can't find dsi_byte clock. ret=%d\n",
 408                        __func__, ret);
 409                msm_host->byte_clk = NULL;
 410                goto exit;
 411        }
 412
 413        msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
 414        if (IS_ERR(msm_host->pixel_clk)) {
 415                ret = PTR_ERR(msm_host->pixel_clk);
 416                pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
 417                        __func__, ret);
 418                msm_host->pixel_clk = NULL;
 419                goto exit;
 420        }
 421
 422        msm_host->esc_clk = msm_clk_get(pdev, "core");
 423        if (IS_ERR(msm_host->esc_clk)) {
 424                ret = PTR_ERR(msm_host->esc_clk);
 425                pr_err("%s: can't find dsi_esc clock. ret=%d\n",
 426                        __func__, ret);
 427                msm_host->esc_clk = NULL;
 428                goto exit;
 429        }
 430
 431        msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
 432        if (!msm_host->byte_clk_src) {
 433                ret = -ENODEV;
 434                pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
 435                goto exit;
 436        }
 437
 438        msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
 439        if (!msm_host->pixel_clk_src) {
 440                ret = -ENODEV;
 441                pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
 442                goto exit;
 443        }
 444
 445        if (cfg_hnd->ops->clk_init_ver)
 446                ret = cfg_hnd->ops->clk_init_ver(msm_host);
 447exit:
 448        return ret;
 449}
 450
 451static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
 452{
 453        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 454        int i, ret;
 455
 456        DBG("id=%d", msm_host->id);
 457
 458        for (i = 0; i < cfg->num_bus_clks; i++) {
 459                ret = clk_prepare_enable(msm_host->bus_clks[i]);
 460                if (ret) {
 461                        pr_err("%s: failed to enable bus clock %d ret %d\n",
 462                                __func__, i, ret);
 463                        goto err;
 464                }
 465        }
 466
 467        return 0;
 468err:
 469        for (; i > 0; i--)
 470                clk_disable_unprepare(msm_host->bus_clks[i]);
 471
 472        return ret;
 473}
 474
 475static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
 476{
 477        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 478        int i;
 479
 480        DBG("");
 481
 482        for (i = cfg->num_bus_clks - 1; i >= 0; i--)
 483                clk_disable_unprepare(msm_host->bus_clks[i]);
 484}
 485
 486int msm_dsi_runtime_suspend(struct device *dev)
 487{
 488        struct platform_device *pdev = to_platform_device(dev);
 489        struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
 490        struct mipi_dsi_host *host = msm_dsi->host;
 491        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 492
 493        if (!msm_host->cfg_hnd)
 494                return 0;
 495
 496        dsi_bus_clk_disable(msm_host);
 497
 498        return 0;
 499}
 500
 501int msm_dsi_runtime_resume(struct device *dev)
 502{
 503        struct platform_device *pdev = to_platform_device(dev);
 504        struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
 505        struct mipi_dsi_host *host = msm_dsi->host;
 506        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 507
 508        if (!msm_host->cfg_hnd)
 509                return 0;
 510
 511        return dsi_bus_clk_enable(msm_host);
 512}
 513
 514int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 515{
 516        int ret;
 517
 518        DBG("Set clk rates: pclk=%d, byteclk=%d",
 519                msm_host->mode->clock, msm_host->byte_clk_rate);
 520
 521        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 522        if (ret) {
 523                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 524                goto error;
 525        }
 526
 527        ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 528        if (ret) {
 529                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 530                goto error;
 531        }
 532
 533        if (msm_host->byte_intf_clk) {
 534                ret = clk_set_rate(msm_host->byte_intf_clk,
 535                                   msm_host->byte_clk_rate / 2);
 536                if (ret) {
 537                        pr_err("%s: Failed to set rate byte intf clk, %d\n",
 538                               __func__, ret);
 539                        goto error;
 540                }
 541        }
 542
 543        ret = clk_prepare_enable(msm_host->esc_clk);
 544        if (ret) {
 545                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 546                goto error;
 547        }
 548
 549        ret = clk_prepare_enable(msm_host->byte_clk);
 550        if (ret) {
 551                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 552                goto byte_clk_err;
 553        }
 554
 555        ret = clk_prepare_enable(msm_host->pixel_clk);
 556        if (ret) {
 557                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 558                goto pixel_clk_err;
 559        }
 560
 561        if (msm_host->byte_intf_clk) {
 562                ret = clk_prepare_enable(msm_host->byte_intf_clk);
 563                if (ret) {
 564                        pr_err("%s: Failed to enable byte intf clk\n",
 565                               __func__);
 566                        goto byte_intf_clk_err;
 567                }
 568        }
 569
 570        return 0;
 571
 572byte_intf_clk_err:
 573        clk_disable_unprepare(msm_host->pixel_clk);
 574pixel_clk_err:
 575        clk_disable_unprepare(msm_host->byte_clk);
 576byte_clk_err:
 577        clk_disable_unprepare(msm_host->esc_clk);
 578error:
 579        return ret;
 580}
 581
 582int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
 583{
 584        int ret;
 585
 586        DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
 587                msm_host->mode->clock, msm_host->byte_clk_rate,
 588                msm_host->esc_clk_rate, msm_host->src_clk_rate);
 589
 590        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 591        if (ret) {
 592                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 593                goto error;
 594        }
 595
 596        ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
 597        if (ret) {
 598                pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
 599                goto error;
 600        }
 601
 602        ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
 603        if (ret) {
 604                pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
 605                goto error;
 606        }
 607
 608        ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
 609        if (ret) {
 610                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 611                goto error;
 612        }
 613
 614        ret = clk_prepare_enable(msm_host->byte_clk);
 615        if (ret) {
 616                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 617                goto error;
 618        }
 619
 620        ret = clk_prepare_enable(msm_host->esc_clk);
 621        if (ret) {
 622                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 623                goto esc_clk_err;
 624        }
 625
 626        ret = clk_prepare_enable(msm_host->src_clk);
 627        if (ret) {
 628                pr_err("%s: Failed to enable dsi src clk\n", __func__);
 629                goto src_clk_err;
 630        }
 631
 632        ret = clk_prepare_enable(msm_host->pixel_clk);
 633        if (ret) {
 634                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 635                goto pixel_clk_err;
 636        }
 637
 638        return 0;
 639
 640pixel_clk_err:
 641        clk_disable_unprepare(msm_host->src_clk);
 642src_clk_err:
 643        clk_disable_unprepare(msm_host->esc_clk);
 644esc_clk_err:
 645        clk_disable_unprepare(msm_host->byte_clk);
 646error:
 647        return ret;
 648}
 649
 650void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
 651{
 652        clk_disable_unprepare(msm_host->esc_clk);
 653        clk_disable_unprepare(msm_host->pixel_clk);
 654        if (msm_host->byte_intf_clk)
 655                clk_disable_unprepare(msm_host->byte_intf_clk);
 656        clk_disable_unprepare(msm_host->byte_clk);
 657}
 658
 659void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
 660{
 661        clk_disable_unprepare(msm_host->pixel_clk);
 662        clk_disable_unprepare(msm_host->src_clk);
 663        clk_disable_unprepare(msm_host->esc_clk);
 664        clk_disable_unprepare(msm_host->byte_clk);
 665}
 666
 667static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 668{
 669        struct drm_display_mode *mode = msm_host->mode;
 670        u32 pclk_rate;
 671
 672        pclk_rate = mode->clock * 1000;
 673
 674        /*
 675         * For dual DSI mode, the current DRM mode has the complete width of the
 676         * panel. Since, the complete panel is driven by two DSI controllers,
 677         * the clock rates have to be split between the two dsi controllers.
 678         * Adjust the byte and pixel clock rates for each dsi host accordingly.
 679         */
 680        if (is_dual_dsi)
 681                pclk_rate /= 2;
 682
 683        return pclk_rate;
 684}
 685
 686static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 687{
 688        u8 lanes = msm_host->lanes;
 689        u32 bpp = dsi_get_bpp(msm_host->format);
 690        u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
 691        u64 pclk_bpp = (u64)pclk_rate * bpp;
 692
 693        if (lanes == 0) {
 694                pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
 695                lanes = 1;
 696        }
 697
 698        do_div(pclk_bpp, (8 * lanes));
 699
 700        msm_host->pixel_clk_rate = pclk_rate;
 701        msm_host->byte_clk_rate = pclk_bpp;
 702
 703        DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
 704                                msm_host->byte_clk_rate);
 705
 706}
 707
 708int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 709{
 710        if (!msm_host->mode) {
 711                pr_err("%s: mode not set\n", __func__);
 712                return -EINVAL;
 713        }
 714
 715        dsi_calc_pclk(msm_host, is_dual_dsi);
 716        msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
 717        return 0;
 718}
 719
 720int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 721{
 722        u32 bpp = dsi_get_bpp(msm_host->format);
 723        u64 pclk_bpp;
 724        unsigned int esc_mhz, esc_div;
 725        unsigned long byte_mhz;
 726
 727        dsi_calc_pclk(msm_host, is_dual_dsi);
 728
 729        pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
 730        do_div(pclk_bpp, 8);
 731        msm_host->src_clk_rate = pclk_bpp;
 732
 733        /*
 734         * esc clock is byte clock followed by a 4 bit divider,
 735         * we need to find an escape clock frequency within the
 736         * mipi DSI spec range within the maximum divider limit
 737         * We iterate here between an escape clock frequencey
 738         * between 20 Mhz to 5 Mhz and pick up the first one
 739         * that can be supported by our divider
 740         */
 741
 742        byte_mhz = msm_host->byte_clk_rate / 1000000;
 743
 744        for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
 745                esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
 746
 747                /*
 748                 * TODO: Ideally, we shouldn't know what sort of divider
 749                 * is available in mmss_cc, we're just assuming that
 750                 * it'll always be a 4 bit divider. Need to come up with
 751                 * a better way here.
 752                 */
 753                if (esc_div >= 1 && esc_div <= 16)
 754                        break;
 755        }
 756
 757        if (esc_mhz < 5)
 758                return -EINVAL;
 759
 760        msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
 761
 762        DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
 763                msm_host->src_clk_rate);
 764
 765        return 0;
 766}
 767
 768static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
 769{
 770        u32 intr;
 771        unsigned long flags;
 772
 773        spin_lock_irqsave(&msm_host->intr_lock, flags);
 774        intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
 775
 776        if (enable)
 777                intr |= mask;
 778        else
 779                intr &= ~mask;
 780
 781        DBG("intr=%x enable=%d", intr, enable);
 782
 783        dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
 784        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
 785}
 786
 787static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
 788{
 789        if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
 790                return BURST_MODE;
 791        else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
 792                return NON_BURST_SYNCH_PULSE;
 793
 794        return NON_BURST_SYNCH_EVENT;
 795}
 796
 797static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
 798                                const enum mipi_dsi_pixel_format mipi_fmt)
 799{
 800        switch (mipi_fmt) {
 801        case MIPI_DSI_FMT_RGB888:       return VID_DST_FORMAT_RGB888;
 802        case MIPI_DSI_FMT_RGB666:       return VID_DST_FORMAT_RGB666_LOOSE;
 803        case MIPI_DSI_FMT_RGB666_PACKED:        return VID_DST_FORMAT_RGB666;
 804        case MIPI_DSI_FMT_RGB565:       return VID_DST_FORMAT_RGB565;
 805        default:                        return VID_DST_FORMAT_RGB888;
 806        }
 807}
 808
 809static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
 810                                const enum mipi_dsi_pixel_format mipi_fmt)
 811{
 812        switch (mipi_fmt) {
 813        case MIPI_DSI_FMT_RGB888:       return CMD_DST_FORMAT_RGB888;
 814        case MIPI_DSI_FMT_RGB666_PACKED:
 815        case MIPI_DSI_FMT_RGB666:       return CMD_DST_FORMAT_RGB666;
 816        case MIPI_DSI_FMT_RGB565:       return CMD_DST_FORMAT_RGB565;
 817        default:                        return CMD_DST_FORMAT_RGB888;
 818        }
 819}
 820
 821static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 822                        struct msm_dsi_phy_shared_timings *phy_shared_timings)
 823{
 824        u32 flags = msm_host->mode_flags;
 825        enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
 826        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 827        u32 data = 0;
 828
 829        if (!enable) {
 830                dsi_write(msm_host, REG_DSI_CTRL, 0);
 831                return;
 832        }
 833
 834        if (flags & MIPI_DSI_MODE_VIDEO) {
 835                if (flags & MIPI_DSI_MODE_VIDEO_HSE)
 836                        data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
 837                if (flags & MIPI_DSI_MODE_VIDEO_HFP)
 838                        data |= DSI_VID_CFG0_HFP_POWER_STOP;
 839                if (flags & MIPI_DSI_MODE_VIDEO_HBP)
 840                        data |= DSI_VID_CFG0_HBP_POWER_STOP;
 841                if (flags & MIPI_DSI_MODE_VIDEO_HSA)
 842                        data |= DSI_VID_CFG0_HSA_POWER_STOP;
 843                /* Always set low power stop mode for BLLP
 844                 * to let command engine send packets
 845                 */
 846                data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
 847                        DSI_VID_CFG0_BLLP_POWER_STOP;
 848                data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
 849                data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
 850                data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
 851                dsi_write(msm_host, REG_DSI_VID_CFG0, data);
 852
 853                /* Do not swap RGB colors */
 854                data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
 855                dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
 856        } else {
 857                /* Do not swap RGB colors */
 858                data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
 859                data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
 860                dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
 861
 862                data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
 863                        DSI_CMD_CFG1_WR_MEM_CONTINUE(
 864                                        MIPI_DCS_WRITE_MEMORY_CONTINUE);
 865                /* Always insert DCS command */
 866                data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
 867                dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
 868        }
 869
 870        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
 871                        DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
 872                        DSI_CMD_DMA_CTRL_LOW_POWER);
 873
 874        data = 0;
 875        /* Always assume dedicated TE pin */
 876        data |= DSI_TRIG_CTRL_TE;
 877        data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 878        data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 879        data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
 880        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 881                (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 882                data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 883        dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 884
 885        data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
 886                DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
 887        dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
 888
 889        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 890            (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
 891            phy_shared_timings->clk_pre_inc_by_2)
 892                dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
 893                          DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
 894
 895        data = 0;
 896        if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
 897                data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
 898        dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
 899
 900        /* allow only ack-err-status to generate interrupt */
 901        dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
 902
 903        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
 904
 905        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
 906
 907        data = DSI_CTRL_CLK_EN;
 908
 909        DBG("lane number=%d", msm_host->lanes);
 910        data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
 911
 912        dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
 913                  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
 914
 915        if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
 916                dsi_write(msm_host, REG_DSI_LANE_CTRL,
 917                        DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
 918
 919        data |= DSI_CTRL_ENABLE;
 920
 921        dsi_write(msm_host, REG_DSI_CTRL, data);
 922}
 923
 924static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 925{
 926        struct drm_display_mode *mode = msm_host->mode;
 927        u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
 928        u32 h_total = mode->htotal;
 929        u32 v_total = mode->vtotal;
 930        u32 hs_end = mode->hsync_end - mode->hsync_start;
 931        u32 vs_end = mode->vsync_end - mode->vsync_start;
 932        u32 ha_start = h_total - mode->hsync_start;
 933        u32 ha_end = ha_start + mode->hdisplay;
 934        u32 va_start = v_total - mode->vsync_start;
 935        u32 va_end = va_start + mode->vdisplay;
 936        u32 hdisplay = mode->hdisplay;
 937        u32 wc;
 938
 939        DBG("");
 940
 941        /*
 942         * For dual DSI mode, the current DRM mode has
 943         * the complete width of the panel. Since, the complete
 944         * panel is driven by two DSI controllers, the horizontal
 945         * timings have to be split between the two dsi controllers.
 946         * Adjust the DSI host timing values accordingly.
 947         */
 948        if (is_dual_dsi) {
 949                h_total /= 2;
 950                hs_end /= 2;
 951                ha_start /= 2;
 952                ha_end /= 2;
 953                hdisplay /= 2;
 954        }
 955
 956        if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
 957                dsi_write(msm_host, REG_DSI_ACTIVE_H,
 958                        DSI_ACTIVE_H_START(ha_start) |
 959                        DSI_ACTIVE_H_END(ha_end));
 960                dsi_write(msm_host, REG_DSI_ACTIVE_V,
 961                        DSI_ACTIVE_V_START(va_start) |
 962                        DSI_ACTIVE_V_END(va_end));
 963                dsi_write(msm_host, REG_DSI_TOTAL,
 964                        DSI_TOTAL_H_TOTAL(h_total - 1) |
 965                        DSI_TOTAL_V_TOTAL(v_total - 1));
 966
 967                dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
 968                        DSI_ACTIVE_HSYNC_START(hs_start) |
 969                        DSI_ACTIVE_HSYNC_END(hs_end));
 970                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
 971                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
 972                        DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
 973                        DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
 974        } else {                /* command mode */
 975                /* image data and 1 byte write_memory_start cmd */
 976                wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
 977
 978                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
 979                        DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
 980                        DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
 981                                        msm_host->channel) |
 982                        DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
 983                                        MIPI_DSI_DCS_LONG_WRITE));
 984
 985                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
 986                        DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
 987                        DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
 988        }
 989}
 990
 991static void dsi_sw_reset(struct msm_dsi_host *msm_host)
 992{
 993        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
 994        wmb(); /* clocks need to be enabled before reset */
 995
 996        dsi_write(msm_host, REG_DSI_RESET, 1);
 997        wmb(); /* make sure reset happen */
 998        dsi_write(msm_host, REG_DSI_RESET, 0);
 999}
1000
1001static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
1002                                        bool video_mode, bool enable)
1003{
1004        u32 dsi_ctrl;
1005
1006        dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
1007
1008        if (!enable) {
1009                dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
1010                                DSI_CTRL_CMD_MODE_EN);
1011                dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
1012                                        DSI_IRQ_MASK_VIDEO_DONE, 0);
1013        } else {
1014                if (video_mode) {
1015                        dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
1016                } else {                /* command mode */
1017                        dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
1018                        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
1019                }
1020                dsi_ctrl |= DSI_CTRL_ENABLE;
1021        }
1022
1023        dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
1024}
1025
1026static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
1027{
1028        u32 data;
1029
1030        data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
1031
1032        if (mode == 0)
1033                data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
1034        else
1035                data |= DSI_CMD_DMA_CTRL_LOW_POWER;
1036
1037        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
1038}
1039
1040static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
1041{
1042        u32 ret = 0;
1043        struct device *dev = &msm_host->pdev->dev;
1044
1045        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
1046
1047        reinit_completion(&msm_host->video_comp);
1048
1049        ret = wait_for_completion_timeout(&msm_host->video_comp,
1050                        msecs_to_jiffies(70));
1051
1052        if (ret <= 0)
1053                DRM_DEV_ERROR(dev, "wait for video done timed out\n");
1054
1055        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
1056}
1057
1058static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1059{
1060        if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1061                return;
1062
1063        if (msm_host->power_on && msm_host->enabled) {
1064                dsi_wait4video_done(msm_host);
1065                /* delay 4 ms to skip BLLP */
1066                usleep_range(2000, 4000);
1067        }
1068}
1069
1070int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
1071{
1072        struct drm_device *dev = msm_host->dev;
1073        struct msm_drm_private *priv = dev->dev_private;
1074        uint64_t iova;
1075        u8 *data;
1076
1077        data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
1078                                        priv->kms->aspace,
1079                                        &msm_host->tx_gem_obj, &iova);
1080
1081        if (IS_ERR(data)) {
1082                msm_host->tx_gem_obj = NULL;
1083                return PTR_ERR(data);
1084        }
1085
1086        msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
1087
1088        msm_host->tx_size = msm_host->tx_gem_obj->size;
1089
1090        return 0;
1091}
1092
1093int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
1094{
1095        struct drm_device *dev = msm_host->dev;
1096
1097        msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1098                                        &msm_host->tx_buf_paddr, GFP_KERNEL);
1099        if (!msm_host->tx_buf)
1100                return -ENOMEM;
1101
1102        msm_host->tx_size = size;
1103
1104        return 0;
1105}
1106
1107static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1108{
1109        struct drm_device *dev = msm_host->dev;
1110        struct msm_drm_private *priv;
1111
1112        /*
1113         * This is possible if we're tearing down before we've had a chance to
1114         * fully initialize. A very real possibility if our probe is deferred,
1115         * in which case we'll hit msm_dsi_host_destroy() without having run
1116         * through the dsi_tx_buf_alloc().
1117         */
1118        if (!dev)
1119                return;
1120
1121        priv = dev->dev_private;
1122        if (msm_host->tx_gem_obj) {
1123                msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
1124                drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
1125                msm_host->tx_gem_obj = NULL;
1126        }
1127
1128        if (msm_host->tx_buf)
1129                dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1130                        msm_host->tx_buf_paddr);
1131}
1132
1133void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
1134{
1135        return msm_gem_get_vaddr(msm_host->tx_gem_obj);
1136}
1137
1138void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
1139{
1140        return msm_host->tx_buf;
1141}
1142
1143void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
1144{
1145        msm_gem_put_vaddr(msm_host->tx_gem_obj);
1146}
1147
1148/*
1149 * prepare cmd buffer to be txed
1150 */
1151static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1152                           const struct mipi_dsi_msg *msg)
1153{
1154        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1155        struct mipi_dsi_packet packet;
1156        int len;
1157        int ret;
1158        u8 *data;
1159
1160        ret = mipi_dsi_create_packet(&packet, msg);
1161        if (ret) {
1162                pr_err("%s: create packet failed, %d\n", __func__, ret);
1163                return ret;
1164        }
1165        len = (packet.size + 3) & (~0x3);
1166
1167        if (len > msm_host->tx_size) {
1168                pr_err("%s: packet size is too big\n", __func__);
1169                return -EINVAL;
1170        }
1171
1172        data = cfg_hnd->ops->tx_buf_get(msm_host);
1173        if (IS_ERR(data)) {
1174                ret = PTR_ERR(data);
1175                pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1176                return ret;
1177        }
1178
1179        /* MSM specific command format in memory */
1180        data[0] = packet.header[1];
1181        data[1] = packet.header[2];
1182        data[2] = packet.header[0];
1183        data[3] = BIT(7); /* Last packet */
1184        if (mipi_dsi_packet_format_is_long(msg->type))
1185                data[3] |= BIT(6);
1186        if (msg->rx_buf && msg->rx_len)
1187                data[3] |= BIT(5);
1188
1189        /* Long packet */
1190        if (packet.payload && packet.payload_length)
1191                memcpy(data + 4, packet.payload, packet.payload_length);
1192
1193        /* Append 0xff to the end */
1194        if (packet.size < len)
1195                memset(data + packet.size, 0xff, len - packet.size);
1196
1197        if (cfg_hnd->ops->tx_buf_put)
1198                cfg_hnd->ops->tx_buf_put(msm_host);
1199
1200        return len;
1201}
1202
1203/*
1204 * dsi_short_read1_resp: 1 parameter
1205 */
1206static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1207{
1208        u8 *data = msg->rx_buf;
1209        if (data && (msg->rx_len >= 1)) {
1210                *data = buf[1]; /* strip out dcs type */
1211                return 1;
1212        } else {
1213                pr_err("%s: read data does not match with rx_buf len %zu\n",
1214                        __func__, msg->rx_len);
1215                return -EINVAL;
1216        }
1217}
1218
1219/*
1220 * dsi_short_read2_resp: 2 parameter
1221 */
1222static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1223{
1224        u8 *data = msg->rx_buf;
1225        if (data && (msg->rx_len >= 2)) {
1226                data[0] = buf[1]; /* strip out dcs type */
1227                data[1] = buf[2];
1228                return 2;
1229        } else {
1230                pr_err("%s: read data does not match with rx_buf len %zu\n",
1231                        __func__, msg->rx_len);
1232                return -EINVAL;
1233        }
1234}
1235
1236static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1237{
1238        /* strip out 4 byte dcs header */
1239        if (msg->rx_buf && msg->rx_len)
1240                memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1241
1242        return msg->rx_len;
1243}
1244
1245int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
1246{
1247        struct drm_device *dev = msm_host->dev;
1248        struct msm_drm_private *priv = dev->dev_private;
1249
1250        if (!dma_base)
1251                return -EINVAL;
1252
1253        return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
1254                                priv->kms->aspace, dma_base);
1255}
1256
1257int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
1258{
1259        if (!dma_base)
1260                return -EINVAL;
1261
1262        *dma_base = msm_host->tx_buf_paddr;
1263        return 0;
1264}
1265
1266static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1267{
1268        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1269        int ret;
1270        uint64_t dma_base;
1271        bool triggered;
1272
1273        ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
1274        if (ret) {
1275                pr_err("%s: failed to get iova: %d\n", __func__, ret);
1276                return ret;
1277        }
1278
1279        reinit_completion(&msm_host->dma_comp);
1280
1281        dsi_wait4video_eng_busy(msm_host);
1282
1283        triggered = msm_dsi_manager_cmd_xfer_trigger(
1284                                                msm_host->id, dma_base, len);
1285        if (triggered) {
1286                ret = wait_for_completion_timeout(&msm_host->dma_comp,
1287                                        msecs_to_jiffies(200));
1288                DBG("ret=%d", ret);
1289                if (ret == 0)
1290                        ret = -ETIMEDOUT;
1291                else
1292                        ret = len;
1293        } else
1294                ret = len;
1295
1296        return ret;
1297}
1298
1299static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1300                        u8 *buf, int rx_byte, int pkt_size)
1301{
1302        u32 *lp, *temp, data;
1303        int i, j = 0, cnt;
1304        u32 read_cnt;
1305        u8 reg[16];
1306        int repeated_bytes = 0;
1307        int buf_offset = buf - msm_host->rx_buf;
1308
1309        lp = (u32 *)buf;
1310        temp = (u32 *)reg;
1311        cnt = (rx_byte + 3) >> 2;
1312        if (cnt > 4)
1313                cnt = 4; /* 4 x 32 bits registers only */
1314
1315        if (rx_byte == 4)
1316                read_cnt = 4;
1317        else
1318                read_cnt = pkt_size + 6;
1319
1320        /*
1321         * In case of multiple reads from the panel, after the first read, there
1322         * is possibility that there are some bytes in the payload repeating in
1323         * the RDBK_DATA registers. Since we read all the parameters from the
1324         * panel right from the first byte for every pass. We need to skip the
1325         * repeating bytes and then append the new parameters to the rx buffer.
1326         */
1327        if (read_cnt > 16) {
1328                int bytes_shifted;
1329                /* Any data more than 16 bytes will be shifted out.
1330                 * The temp read buffer should already contain these bytes.
1331                 * The remaining bytes in read buffer are the repeated bytes.
1332                 */
1333                bytes_shifted = read_cnt - 16;
1334                repeated_bytes = buf_offset - bytes_shifted;
1335        }
1336
1337        for (i = cnt - 1; i >= 0; i--) {
1338                data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1339                *temp++ = ntohl(data); /* to host byte order */
1340                DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1341        }
1342
1343        for (i = repeated_bytes; i < 16; i++)
1344                buf[j++] = reg[i];
1345
1346        return j;
1347}
1348
1349static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1350                                const struct mipi_dsi_msg *msg)
1351{
1352        int len, ret;
1353        int bllp_len = msm_host->mode->hdisplay *
1354                        dsi_get_bpp(msm_host->format) / 8;
1355
1356        len = dsi_cmd_dma_add(msm_host, msg);
1357        if (!len) {
1358                pr_err("%s: failed to add cmd type = 0x%x\n",
1359                        __func__,  msg->type);
1360                return -EINVAL;
1361        }
1362
1363        /* for video mode, do not send cmds more than
1364        * one pixel line, since it only transmit it
1365        * during BLLP.
1366        */
1367        /* TODO: if the command is sent in LP mode, the bit rate is only
1368         * half of esc clk rate. In this case, if the video is already
1369         * actively streaming, we need to check more carefully if the
1370         * command can be fit into one BLLP.
1371         */
1372        if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1373                pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1374                        __func__, len);
1375                return -EINVAL;
1376        }
1377
1378        ret = dsi_cmd_dma_tx(msm_host, len);
1379        if (ret < len) {
1380                pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1381                        __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1382                return -ECOMM;
1383        }
1384
1385        return len;
1386}
1387
1388static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1389{
1390        u32 data0, data1;
1391
1392        data0 = dsi_read(msm_host, REG_DSI_CTRL);
1393        data1 = data0;
1394        data1 &= ~DSI_CTRL_ENABLE;
1395        dsi_write(msm_host, REG_DSI_CTRL, data1);
1396        /*
1397         * dsi controller need to be disabled before
1398         * clocks turned on
1399         */
1400        wmb();
1401
1402        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1403        wmb();  /* make sure clocks enabled */
1404
1405        /* dsi controller can only be reset while clocks are running */
1406        dsi_write(msm_host, REG_DSI_RESET, 1);
1407        wmb();  /* make sure reset happen */
1408        dsi_write(msm_host, REG_DSI_RESET, 0);
1409        wmb();  /* controller out of reset */
1410        dsi_write(msm_host, REG_DSI_CTRL, data0);
1411        wmb();  /* make sure dsi controller enabled again */
1412}
1413
1414static void dsi_hpd_worker(struct work_struct *work)
1415{
1416        struct msm_dsi_host *msm_host =
1417                container_of(work, struct msm_dsi_host, hpd_work);
1418
1419        drm_helper_hpd_irq_event(msm_host->dev);
1420}
1421
1422static void dsi_err_worker(struct work_struct *work)
1423{
1424        struct msm_dsi_host *msm_host =
1425                container_of(work, struct msm_dsi_host, err_work);
1426        u32 status = msm_host->err_work_state;
1427
1428        pr_err_ratelimited("%s: status=%x\n", __func__, status);
1429        if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1430                dsi_sw_reset_restore(msm_host);
1431
1432        /* It is safe to clear here because error irq is disabled. */
1433        msm_host->err_work_state = 0;
1434
1435        /* enable dsi error interrupt */
1436        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1437}
1438
1439static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1440{
1441        u32 status;
1442
1443        status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1444
1445        if (status) {
1446                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1447                /* Writing of an extra 0 needed to clear error bits */
1448                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1449                msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1450        }
1451}
1452
1453static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1454{
1455        u32 status;
1456
1457        status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1458
1459        if (status) {
1460                dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1461                msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1462        }
1463}
1464
1465static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1466{
1467        u32 status;
1468
1469        status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1470
1471        if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1472                        DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1473                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1474                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1475                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1476                dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1477                msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1478        }
1479}
1480
1481static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1482{
1483        u32 status;
1484
1485        status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1486
1487        /* fifo underflow, overflow */
1488        if (status) {
1489                dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1490                msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1491                if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1492                        msm_host->err_work_state |=
1493                                        DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1494        }
1495}
1496
1497static void dsi_status(struct msm_dsi_host *msm_host)
1498{
1499        u32 status;
1500
1501        status = dsi_read(msm_host, REG_DSI_STATUS0);
1502
1503        if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1504                dsi_write(msm_host, REG_DSI_STATUS0, status);
1505                msm_host->err_work_state |=
1506                        DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1507        }
1508}
1509
1510static void dsi_clk_status(struct msm_dsi_host *msm_host)
1511{
1512        u32 status;
1513
1514        status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1515
1516        if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1517                dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1518                msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1519        }
1520}
1521
1522static void dsi_error(struct msm_dsi_host *msm_host)
1523{
1524        /* disable dsi error interrupt */
1525        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1526
1527        dsi_clk_status(msm_host);
1528        dsi_fifo_status(msm_host);
1529        dsi_ack_err_status(msm_host);
1530        dsi_timeout_status(msm_host);
1531        dsi_status(msm_host);
1532        dsi_dln0_phy_err(msm_host);
1533
1534        queue_work(msm_host->workqueue, &msm_host->err_work);
1535}
1536
1537static irqreturn_t dsi_host_irq(int irq, void *ptr)
1538{
1539        struct msm_dsi_host *msm_host = ptr;
1540        u32 isr;
1541        unsigned long flags;
1542
1543        if (!msm_host->ctrl_base)
1544                return IRQ_HANDLED;
1545
1546        spin_lock_irqsave(&msm_host->intr_lock, flags);
1547        isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1548        dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1549        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1550
1551        DBG("isr=0x%x, id=%d", isr, msm_host->id);
1552
1553        if (isr & DSI_IRQ_ERROR)
1554                dsi_error(msm_host);
1555
1556        if (isr & DSI_IRQ_VIDEO_DONE)
1557                complete(&msm_host->video_comp);
1558
1559        if (isr & DSI_IRQ_CMD_DMA_DONE)
1560                complete(&msm_host->dma_comp);
1561
1562        return IRQ_HANDLED;
1563}
1564
1565static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1566                        struct device *panel_device)
1567{
1568        msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1569                                                         "disp-enable",
1570                                                         GPIOD_OUT_LOW);
1571        if (IS_ERR(msm_host->disp_en_gpio)) {
1572                DBG("cannot get disp-enable-gpios %ld",
1573                                PTR_ERR(msm_host->disp_en_gpio));
1574                return PTR_ERR(msm_host->disp_en_gpio);
1575        }
1576
1577        msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1578                                                                GPIOD_IN);
1579        if (IS_ERR(msm_host->te_gpio)) {
1580                DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1581                return PTR_ERR(msm_host->te_gpio);
1582        }
1583
1584        return 0;
1585}
1586
1587static int dsi_host_attach(struct mipi_dsi_host *host,
1588                                        struct mipi_dsi_device *dsi)
1589{
1590        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1591        int ret;
1592
1593        if (dsi->lanes > msm_host->num_data_lanes)
1594                return -EINVAL;
1595
1596        msm_host->channel = dsi->channel;
1597        msm_host->lanes = dsi->lanes;
1598        msm_host->format = dsi->format;
1599        msm_host->mode_flags = dsi->mode_flags;
1600
1601        msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
1602
1603        /* Some gpios defined in panel DT need to be controlled by host */
1604        ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1605        if (ret)
1606                return ret;
1607
1608        DBG("id=%d", msm_host->id);
1609        if (msm_host->dev)
1610                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1611
1612        return 0;
1613}
1614
1615static int dsi_host_detach(struct mipi_dsi_host *host,
1616                                        struct mipi_dsi_device *dsi)
1617{
1618        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1619
1620        msm_host->device_node = NULL;
1621
1622        DBG("id=%d", msm_host->id);
1623        if (msm_host->dev)
1624                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1625
1626        return 0;
1627}
1628
1629static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1630                                        const struct mipi_dsi_msg *msg)
1631{
1632        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1633        int ret;
1634
1635        if (!msg || !msm_host->power_on)
1636                return -EINVAL;
1637
1638        mutex_lock(&msm_host->cmd_mutex);
1639        ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1640        mutex_unlock(&msm_host->cmd_mutex);
1641
1642        return ret;
1643}
1644
1645static struct mipi_dsi_host_ops dsi_host_ops = {
1646        .attach = dsi_host_attach,
1647        .detach = dsi_host_detach,
1648        .transfer = dsi_host_transfer,
1649};
1650
1651/*
1652 * List of supported physical to logical lane mappings.
1653 * For example, the 2nd entry represents the following mapping:
1654 *
1655 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
1656 */
1657static const int supported_data_lane_swaps[][4] = {
1658        { 0, 1, 2, 3 },
1659        { 3, 0, 1, 2 },
1660        { 2, 3, 0, 1 },
1661        { 1, 2, 3, 0 },
1662        { 0, 3, 2, 1 },
1663        { 1, 0, 3, 2 },
1664        { 2, 1, 0, 3 },
1665        { 3, 2, 1, 0 },
1666};
1667
1668static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1669                                    struct device_node *ep)
1670{
1671        struct device *dev = &msm_host->pdev->dev;
1672        struct property *prop;
1673        u32 lane_map[4];
1674        int ret, i, len, num_lanes;
1675
1676        prop = of_find_property(ep, "data-lanes", &len);
1677        if (!prop) {
1678                DRM_DEV_DEBUG(dev,
1679                        "failed to find data lane mapping, using default\n");
1680                return 0;
1681        }
1682
1683        num_lanes = len / sizeof(u32);
1684
1685        if (num_lanes < 1 || num_lanes > 4) {
1686                DRM_DEV_ERROR(dev, "bad number of data lanes\n");
1687                return -EINVAL;
1688        }
1689
1690        msm_host->num_data_lanes = num_lanes;
1691
1692        ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1693                                         num_lanes);
1694        if (ret) {
1695                DRM_DEV_ERROR(dev, "failed to read lane data\n");
1696                return ret;
1697        }
1698
1699        /*
1700         * compare DT specified physical-logical lane mappings with the ones
1701         * supported by hardware
1702         */
1703        for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
1704                const int *swap = supported_data_lane_swaps[i];
1705                int j;
1706
1707                /*
1708                 * the data-lanes array we get from DT has a logical->physical
1709                 * mapping. The "data lane swap" register field represents
1710                 * supported configurations in a physical->logical mapping.
1711                 * Translate the DT mapping to what we understand and find a
1712                 * configuration that works.
1713                 */
1714                for (j = 0; j < num_lanes; j++) {
1715                        if (lane_map[j] < 0 || lane_map[j] > 3)
1716                                DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
1717                                        lane_map[j]);
1718
1719                        if (swap[lane_map[j]] != j)
1720                                break;
1721                }
1722
1723                if (j == num_lanes) {
1724                        msm_host->dlane_swap = i;
1725                        return 0;
1726                }
1727        }
1728
1729        return -EINVAL;
1730}
1731
1732static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1733{
1734        struct device *dev = &msm_host->pdev->dev;
1735        struct device_node *np = dev->of_node;
1736        struct device_node *endpoint, *device_node;
1737        int ret = 0;
1738
1739        /*
1740         * Get the endpoint of the output port of the DSI host. In our case,
1741         * this is mapped to port number with reg = 1. Don't return an error if
1742         * the remote endpoint isn't defined. It's possible that there is
1743         * nothing connected to the dsi output.
1744         */
1745        endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1746        if (!endpoint) {
1747                DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
1748                return 0;
1749        }
1750
1751        ret = dsi_host_parse_lane_data(msm_host, endpoint);
1752        if (ret) {
1753                DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
1754                        __func__, ret);
1755                ret = -EINVAL;
1756                goto err;
1757        }
1758
1759        /* Get panel node from the output port's endpoint data */
1760        device_node = of_graph_get_remote_node(np, 1, 0);
1761        if (!device_node) {
1762                DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
1763                ret = -ENODEV;
1764                goto err;
1765        }
1766
1767        msm_host->device_node = device_node;
1768
1769        if (of_property_read_bool(np, "syscon-sfpb")) {
1770                msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1771                                        "syscon-sfpb");
1772                if (IS_ERR(msm_host->sfpb)) {
1773                        DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
1774                                __func__);
1775                        ret = PTR_ERR(msm_host->sfpb);
1776                }
1777        }
1778
1779        of_node_put(device_node);
1780
1781err:
1782        of_node_put(endpoint);
1783
1784        return ret;
1785}
1786
1787static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1788{
1789        struct platform_device *pdev = msm_host->pdev;
1790        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1791        struct resource *res;
1792        int i;
1793
1794        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1795        if (!res)
1796                return -EINVAL;
1797
1798        for (i = 0; i < cfg->num_dsi; i++) {
1799                if (cfg->io_start[i] == res->start)
1800                        return i;
1801        }
1802
1803        return -EINVAL;
1804}
1805
1806int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1807{
1808        struct msm_dsi_host *msm_host = NULL;
1809        struct platform_device *pdev = msm_dsi->pdev;
1810        int ret;
1811
1812        msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1813        if (!msm_host) {
1814                pr_err("%s: FAILED: cannot alloc dsi host\n",
1815                       __func__);
1816                ret = -ENOMEM;
1817                goto fail;
1818        }
1819
1820        msm_host->pdev = pdev;
1821        msm_dsi->host = &msm_host->base;
1822
1823        ret = dsi_host_parse_dt(msm_host);
1824        if (ret) {
1825                pr_err("%s: failed to parse dt\n", __func__);
1826                goto fail;
1827        }
1828
1829        msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1830        if (IS_ERR(msm_host->ctrl_base)) {
1831                pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1832                ret = PTR_ERR(msm_host->ctrl_base);
1833                goto fail;
1834        }
1835
1836        pm_runtime_enable(&pdev->dev);
1837
1838        msm_host->cfg_hnd = dsi_get_config(msm_host);
1839        if (!msm_host->cfg_hnd) {
1840                ret = -EINVAL;
1841                pr_err("%s: get config failed\n", __func__);
1842                goto fail;
1843        }
1844
1845        msm_host->id = dsi_host_get_id(msm_host);
1846        if (msm_host->id < 0) {
1847                ret = msm_host->id;
1848                pr_err("%s: unable to identify DSI host index\n", __func__);
1849                goto fail;
1850        }
1851
1852        /* fixup base address by io offset */
1853        msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1854
1855        ret = dsi_regulator_init(msm_host);
1856        if (ret) {
1857                pr_err("%s: regulator init failed\n", __func__);
1858                goto fail;
1859        }
1860
1861        ret = dsi_clk_init(msm_host);
1862        if (ret) {
1863                pr_err("%s: unable to initialize dsi clks\n", __func__);
1864                goto fail;
1865        }
1866
1867        msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1868        if (!msm_host->rx_buf) {
1869                ret = -ENOMEM;
1870                pr_err("%s: alloc rx temp buf failed\n", __func__);
1871                goto fail;
1872        }
1873
1874        init_completion(&msm_host->dma_comp);
1875        init_completion(&msm_host->video_comp);
1876        mutex_init(&msm_host->dev_mutex);
1877        mutex_init(&msm_host->cmd_mutex);
1878        spin_lock_init(&msm_host->intr_lock);
1879
1880        /* setup workqueue */
1881        msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1882        INIT_WORK(&msm_host->err_work, dsi_err_worker);
1883        INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1884
1885        msm_dsi->id = msm_host->id;
1886
1887        DBG("Dsi Host %d initialized", msm_host->id);
1888        return 0;
1889
1890fail:
1891        return ret;
1892}
1893
1894void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1895{
1896        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1897
1898        DBG("");
1899        dsi_tx_buf_free(msm_host);
1900        if (msm_host->workqueue) {
1901                flush_workqueue(msm_host->workqueue);
1902                destroy_workqueue(msm_host->workqueue);
1903                msm_host->workqueue = NULL;
1904        }
1905
1906        mutex_destroy(&msm_host->cmd_mutex);
1907        mutex_destroy(&msm_host->dev_mutex);
1908
1909        pm_runtime_disable(&msm_host->pdev->dev);
1910}
1911
1912int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1913                                        struct drm_device *dev)
1914{
1915        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1916        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1917        struct platform_device *pdev = msm_host->pdev;
1918        int ret;
1919
1920        msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1921        if (msm_host->irq < 0) {
1922                ret = msm_host->irq;
1923                DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
1924                return ret;
1925        }
1926
1927        ret = devm_request_irq(&pdev->dev, msm_host->irq,
1928                        dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1929                        "dsi_isr", msm_host);
1930        if (ret < 0) {
1931                DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
1932                                msm_host->irq, ret);
1933                return ret;
1934        }
1935
1936        msm_host->dev = dev;
1937        ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
1938        if (ret) {
1939                pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1940                return ret;
1941        }
1942
1943        return 0;
1944}
1945
1946int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1947{
1948        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1949        int ret;
1950
1951        /* Register mipi dsi host */
1952        if (!msm_host->registered) {
1953                host->dev = &msm_host->pdev->dev;
1954                host->ops = &dsi_host_ops;
1955                ret = mipi_dsi_host_register(host);
1956                if (ret)
1957                        return ret;
1958
1959                msm_host->registered = true;
1960
1961                /* If the panel driver has not been probed after host register,
1962                 * we should defer the host's probe.
1963                 * It makes sure panel is connected when fbcon detects
1964                 * connector status and gets the proper display mode to
1965                 * create framebuffer.
1966                 * Don't try to defer if there is nothing connected to the dsi
1967                 * output
1968                 */
1969                if (check_defer && msm_host->device_node) {
1970                        if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
1971                                if (!of_drm_find_bridge(msm_host->device_node))
1972                                        return -EPROBE_DEFER;
1973                }
1974        }
1975
1976        return 0;
1977}
1978
1979void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1980{
1981        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1982
1983        if (msm_host->registered) {
1984                mipi_dsi_host_unregister(host);
1985                host->dev = NULL;
1986                host->ops = NULL;
1987                msm_host->registered = false;
1988        }
1989}
1990
1991int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1992                                const struct mipi_dsi_msg *msg)
1993{
1994        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1995        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1996
1997        /* TODO: make sure dsi_cmd_mdp is idle.
1998         * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1999         * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
2000         * How to handle the old versions? Wait for mdp cmd done?
2001         */
2002
2003        /*
2004         * mdss interrupt is generated in mdp core clock domain
2005         * mdp clock need to be enabled to receive dsi interrupt
2006         */
2007        pm_runtime_get_sync(&msm_host->pdev->dev);
2008        cfg_hnd->ops->link_clk_enable(msm_host);
2009
2010        /* TODO: vote for bus bandwidth */
2011
2012        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
2013                dsi_set_tx_power_mode(0, msm_host);
2014
2015        msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
2016        dsi_write(msm_host, REG_DSI_CTRL,
2017                msm_host->dma_cmd_ctrl_restore |
2018                DSI_CTRL_CMD_MODE_EN |
2019                DSI_CTRL_ENABLE);
2020        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
2021
2022        return 0;
2023}
2024
2025void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
2026                                const struct mipi_dsi_msg *msg)
2027{
2028        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2029        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2030
2031        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
2032        dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
2033
2034        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
2035                dsi_set_tx_power_mode(1, msm_host);
2036
2037        /* TODO: unvote for bus bandwidth */
2038
2039        cfg_hnd->ops->link_clk_disable(msm_host);
2040        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2041}
2042
2043int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
2044                                const struct mipi_dsi_msg *msg)
2045{
2046        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2047
2048        return dsi_cmds2buf_tx(msm_host, msg);
2049}
2050
2051int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
2052                                const struct mipi_dsi_msg *msg)
2053{
2054        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2055        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2056        int data_byte, rx_byte, dlen, end;
2057        int short_response, diff, pkt_size, ret = 0;
2058        char cmd;
2059        int rlen = msg->rx_len;
2060        u8 *buf;
2061
2062        if (rlen <= 2) {
2063                short_response = 1;
2064                pkt_size = rlen;
2065                rx_byte = 4;
2066        } else {
2067                short_response = 0;
2068                data_byte = 10; /* first read */
2069                if (rlen < data_byte)
2070                        pkt_size = rlen;
2071                else
2072                        pkt_size = data_byte;
2073                rx_byte = data_byte + 6; /* 4 header + 2 crc */
2074        }
2075
2076        buf = msm_host->rx_buf;
2077        end = 0;
2078        while (!end) {
2079                u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
2080                struct mipi_dsi_msg max_pkt_size_msg = {
2081                        .channel = msg->channel,
2082                        .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
2083                        .tx_len = 2,
2084                        .tx_buf = tx,
2085                };
2086
2087                DBG("rlen=%d pkt_size=%d rx_byte=%d",
2088                        rlen, pkt_size, rx_byte);
2089
2090                ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
2091                if (ret < 2) {
2092                        pr_err("%s: Set max pkt size failed, %d\n",
2093                                __func__, ret);
2094                        return -EINVAL;
2095                }
2096
2097                if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
2098                        (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
2099                        /* Clear the RDBK_DATA registers */
2100                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
2101                                        DSI_RDBK_DATA_CTRL_CLR);
2102                        wmb(); /* make sure the RDBK registers are cleared */
2103                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
2104                        wmb(); /* release cleared status before transfer */
2105                }
2106
2107                ret = dsi_cmds2buf_tx(msm_host, msg);
2108                if (ret < msg->tx_len) {
2109                        pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
2110                        return ret;
2111                }
2112
2113                /*
2114                 * once cmd_dma_done interrupt received,
2115                 * return data from client is ready and stored
2116                 * at RDBK_DATA register already
2117                 * since rx fifo is 16 bytes, dcs header is kept at first loop,
2118                 * after that dcs header lost during shift into registers
2119                 */
2120                dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
2121
2122                if (dlen <= 0)
2123                        return 0;
2124
2125                if (short_response)
2126                        break;
2127
2128                if (rlen <= data_byte) {
2129                        diff = data_byte - rlen;
2130                        end = 1;
2131                } else {
2132                        diff = 0;
2133                        rlen -= data_byte;
2134                }
2135
2136                if (!end) {
2137                        dlen -= 2; /* 2 crc */
2138                        dlen -= diff;
2139                        buf += dlen;    /* next start position */
2140                        data_byte = 14; /* NOT first read */
2141                        if (rlen < data_byte)
2142                                pkt_size += rlen;
2143                        else
2144                                pkt_size += data_byte;
2145                        DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
2146                }
2147        }
2148
2149        /*
2150         * For single Long read, if the requested rlen < 10,
2151         * we need to shift the start position of rx
2152         * data buffer to skip the bytes which are not
2153         * updated.
2154         */
2155        if (pkt_size < 10 && !short_response)
2156                buf = msm_host->rx_buf + (10 - rlen);
2157        else
2158                buf = msm_host->rx_buf;
2159
2160        cmd = buf[0];
2161        switch (cmd) {
2162        case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
2163                pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
2164                ret = 0;
2165                break;
2166        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
2167        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
2168                ret = dsi_short_read1_resp(buf, msg);
2169                break;
2170        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
2171        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
2172                ret = dsi_short_read2_resp(buf, msg);
2173                break;
2174        case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
2175        case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
2176                ret = dsi_long_read_resp(buf, msg);
2177                break;
2178        default:
2179                pr_warn("%s:Invalid response cmd\n", __func__);
2180                ret = 0;
2181        }
2182
2183        return ret;
2184}
2185
2186void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
2187                                  u32 len)
2188{
2189        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2190
2191        dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
2192        dsi_write(msm_host, REG_DSI_DMA_LEN, len);
2193        dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
2194
2195        /* Make sure trigger happens */
2196        wmb();
2197}
2198
2199int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
2200        struct msm_dsi_pll *src_pll)
2201{
2202        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2203        struct clk *byte_clk_provider, *pixel_clk_provider;
2204        int ret;
2205
2206        ret = msm_dsi_pll_get_clk_provider(src_pll,
2207                                &byte_clk_provider, &pixel_clk_provider);
2208        if (ret) {
2209                pr_info("%s: can't get provider from pll, don't set parent\n",
2210                        __func__);
2211                return 0;
2212        }
2213
2214        ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
2215        if (ret) {
2216                pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
2217                        __func__, ret);
2218                goto exit;
2219        }
2220
2221        ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
2222        if (ret) {
2223                pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
2224                        __func__, ret);
2225                goto exit;
2226        }
2227
2228        if (msm_host->dsi_clk_src) {
2229                ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
2230                if (ret) {
2231                        pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2232                                __func__, ret);
2233                        goto exit;
2234                }
2235        }
2236
2237        if (msm_host->esc_clk_src) {
2238                ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2239                if (ret) {
2240                        pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2241                                __func__, ret);
2242                        goto exit;
2243                }
2244        }
2245
2246exit:
2247        return ret;
2248}
2249
2250void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
2251{
2252        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2253
2254        DBG("");
2255        dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
2256        /* Make sure fully reset */
2257        wmb();
2258        udelay(1000);
2259        dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
2260        udelay(100);
2261}
2262
2263void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2264                        struct msm_dsi_phy_clk_request *clk_req,
2265                        bool is_dual_dsi)
2266{
2267        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2268        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2269        int ret;
2270
2271        ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
2272        if (ret) {
2273                pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2274                return;
2275        }
2276
2277        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2278        clk_req->escclk_rate = msm_host->esc_clk_rate;
2279}
2280
2281int msm_dsi_host_enable(struct mipi_dsi_host *host)
2282{
2283        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2284
2285        dsi_op_mode_config(msm_host,
2286                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
2287
2288        /* TODO: clock should be turned off for command mode,
2289         * and only turned on before MDP START.
2290         * This part of code should be enabled once mdp driver support it.
2291         */
2292        /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
2293         *      dsi_link_clk_disable(msm_host);
2294         *      pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2295         * }
2296         */
2297        msm_host->enabled = true;
2298        return 0;
2299}
2300
2301int msm_dsi_host_disable(struct mipi_dsi_host *host)
2302{
2303        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2304
2305        msm_host->enabled = false;
2306        dsi_op_mode_config(msm_host,
2307                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2308
2309        /* Since we have disabled INTF, the video engine won't stop so that
2310         * the cmd engine will be blocked.
2311         * Reset to disable video engine so that we can send off cmd.
2312         */
2313        dsi_sw_reset(msm_host);
2314
2315        return 0;
2316}
2317
2318static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2319{
2320        enum sfpb_ahb_arb_master_port_en en;
2321
2322        if (!msm_host->sfpb)
2323                return;
2324
2325        en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2326
2327        regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2328                        SFPB_GPREG_MASTER_PORT_EN__MASK,
2329                        SFPB_GPREG_MASTER_PORT_EN(en));
2330}
2331
2332int msm_dsi_host_power_on(struct mipi_dsi_host *host,
2333                        struct msm_dsi_phy_shared_timings *phy_shared_timings,
2334                        bool is_dual_dsi)
2335{
2336        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2337        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2338        int ret = 0;
2339
2340        mutex_lock(&msm_host->dev_mutex);
2341        if (msm_host->power_on) {
2342                DBG("dsi host already on");
2343                goto unlock_ret;
2344        }
2345
2346        msm_dsi_sfpb_config(msm_host, true);
2347
2348        ret = dsi_host_regulator_enable(msm_host);
2349        if (ret) {
2350                pr_err("%s:Failed to enable vregs.ret=%d\n",
2351                        __func__, ret);
2352                goto unlock_ret;
2353        }
2354
2355        pm_runtime_get_sync(&msm_host->pdev->dev);
2356        ret = cfg_hnd->ops->link_clk_enable(msm_host);
2357        if (ret) {
2358                pr_err("%s: failed to enable link clocks. ret=%d\n",
2359                       __func__, ret);
2360                goto fail_disable_reg;
2361        }
2362
2363        ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2364        if (ret) {
2365                pr_err("%s: failed to set pinctrl default state, %d\n",
2366                        __func__, ret);
2367                goto fail_disable_clk;
2368        }
2369
2370        dsi_timing_setup(msm_host, is_dual_dsi);
2371        dsi_sw_reset(msm_host);
2372        dsi_ctrl_config(msm_host, true, phy_shared_timings);
2373
2374        if (msm_host->disp_en_gpio)
2375                gpiod_set_value(msm_host->disp_en_gpio, 1);
2376
2377        msm_host->power_on = true;
2378        mutex_unlock(&msm_host->dev_mutex);
2379
2380        return 0;
2381
2382fail_disable_clk:
2383        cfg_hnd->ops->link_clk_disable(msm_host);
2384        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2385fail_disable_reg:
2386        dsi_host_regulator_disable(msm_host);
2387unlock_ret:
2388        mutex_unlock(&msm_host->dev_mutex);
2389        return ret;
2390}
2391
2392int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2393{
2394        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2395        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2396
2397        mutex_lock(&msm_host->dev_mutex);
2398        if (!msm_host->power_on) {
2399                DBG("dsi host already off");
2400                goto unlock_ret;
2401        }
2402
2403        dsi_ctrl_config(msm_host, false, NULL);
2404
2405        if (msm_host->disp_en_gpio)
2406                gpiod_set_value(msm_host->disp_en_gpio, 0);
2407
2408        pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2409
2410        cfg_hnd->ops->link_clk_disable(msm_host);
2411        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2412
2413        dsi_host_regulator_disable(msm_host);
2414
2415        msm_dsi_sfpb_config(msm_host, false);
2416
2417        DBG("-");
2418
2419        msm_host->power_on = false;
2420
2421unlock_ret:
2422        mutex_unlock(&msm_host->dev_mutex);
2423        return 0;
2424}
2425
2426int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2427                                  const struct drm_display_mode *mode)
2428{
2429        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2430
2431        if (msm_host->mode) {
2432                drm_mode_destroy(msm_host->dev, msm_host->mode);
2433                msm_host->mode = NULL;
2434        }
2435
2436        msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2437        if (!msm_host->mode) {
2438                pr_err("%s: cannot duplicate mode\n", __func__);
2439                return -ENOMEM;
2440        }
2441
2442        return 0;
2443}
2444
2445struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
2446                                unsigned long *panel_flags)
2447{
2448        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2449        struct drm_panel *panel;
2450
2451        panel = of_drm_find_panel(msm_host->device_node);
2452        if (panel_flags)
2453                        *panel_flags = msm_host->mode_flags;
2454
2455        return panel;
2456}
2457
2458struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2459{
2460        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2461
2462        return of_drm_find_bridge(msm_host->device_node);
2463}
2464