linux/drivers/gpu/drm/msm/dsi/dsi_host.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/clk.h>
  15#include <linux/delay.h>
  16#include <linux/err.h>
  17#include <linux/gpio.h>
  18#include <linux/gpio/consumer.h>
  19#include <linux/interrupt.h>
  20#include <linux/of_device.h>
  21#include <linux/of_gpio.h>
  22#include <linux/of_irq.h>
  23#include <linux/pinctrl/consumer.h>
  24#include <linux/of_graph.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/spinlock.h>
  27#include <linux/mfd/syscon.h>
  28#include <linux/regmap.h>
  29#include <video/mipi_display.h>
  30
  31#include "dsi.h"
  32#include "dsi.xml.h"
  33#include "sfpb.xml.h"
  34#include "dsi_cfg.h"
  35#include "msm_kms.h"
  36
  37static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  38{
  39        u32 ver;
  40
  41        if (!major || !minor)
  42                return -EINVAL;
  43
  44        /*
  45         * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  46         * makes all other registers 4-byte shifted down.
  47         *
  48         * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  49         * older, we read the DSI_VERSION register without any shift(offset
  50         * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  51         * the case of DSI6G, this has to be zero (the offset points to a
  52         * scratch register which we never touch)
  53         */
  54
  55        ver = msm_readl(base + REG_DSI_VERSION);
  56        if (ver) {
  57                /* older dsi host, there is no register shift */
  58                ver = FIELD(ver, DSI_VERSION_MAJOR);
  59                if (ver <= MSM_DSI_VER_MAJOR_V2) {
  60                        /* old versions */
  61                        *major = ver;
  62                        *minor = 0;
  63                        return 0;
  64                } else {
  65                        return -EINVAL;
  66                }
  67        } else {
  68                /*
  69                 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  70                 * registers are shifted down, read DSI_VERSION again with
  71                 * the shifted offset
  72                 */
  73                ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  74                ver = FIELD(ver, DSI_VERSION_MAJOR);
  75                if (ver == MSM_DSI_VER_MAJOR_6G) {
  76                        /* 6G version */
  77                        *major = ver;
  78                        *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  79                        return 0;
  80                } else {
  81                        return -EINVAL;
  82                }
  83        }
  84}
  85
  86#define DSI_ERR_STATE_ACK                       0x0000
  87#define DSI_ERR_STATE_TIMEOUT                   0x0001
  88#define DSI_ERR_STATE_DLN0_PHY                  0x0002
  89#define DSI_ERR_STATE_FIFO                      0x0004
  90#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW        0x0008
  91#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION  0x0010
  92#define DSI_ERR_STATE_PLL_UNLOCKED              0x0020
  93
  94#define DSI_CLK_CTRL_ENABLE_CLKS        \
  95                (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  96                DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  97                DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  98                DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  99
 100struct msm_dsi_host {
 101        struct mipi_dsi_host base;
 102
 103        struct platform_device *pdev;
 104        struct drm_device *dev;
 105
 106        int id;
 107
 108        void __iomem *ctrl_base;
 109        struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
 110
 111        struct clk *bus_clks[DSI_BUS_CLK_MAX];
 112
 113        struct clk *byte_clk;
 114        struct clk *esc_clk;
 115        struct clk *pixel_clk;
 116        struct clk *byte_clk_src;
 117        struct clk *pixel_clk_src;
 118
 119        u32 byte_clk_rate;
 120        u32 esc_clk_rate;
 121
 122        /* DSI v2 specific clocks */
 123        struct clk *src_clk;
 124        struct clk *esc_clk_src;
 125        struct clk *dsi_clk_src;
 126
 127        u32 src_clk_rate;
 128
 129        struct gpio_desc *disp_en_gpio;
 130        struct gpio_desc *te_gpio;
 131
 132        const struct msm_dsi_cfg_handler *cfg_hnd;
 133
 134        struct completion dma_comp;
 135        struct completion video_comp;
 136        struct mutex dev_mutex;
 137        struct mutex cmd_mutex;
 138        struct mutex clk_mutex;
 139        spinlock_t intr_lock; /* Protect interrupt ctrl register */
 140
 141        u32 err_work_state;
 142        struct work_struct err_work;
 143        struct work_struct hpd_work;
 144        struct workqueue_struct *workqueue;
 145
 146        /* DSI 6G TX buffer*/
 147        struct drm_gem_object *tx_gem_obj;
 148
 149        /* DSI v2 TX buffer */
 150        void *tx_buf;
 151        dma_addr_t tx_buf_paddr;
 152
 153        int tx_size;
 154
 155        u8 *rx_buf;
 156
 157        struct regmap *sfpb;
 158
 159        struct drm_display_mode *mode;
 160
 161        /* connected device info */
 162        struct device_node *device_node;
 163        unsigned int channel;
 164        unsigned int lanes;
 165        enum mipi_dsi_pixel_format format;
 166        unsigned long mode_flags;
 167
 168        /* lane data parsed via DT */
 169        int dlane_swap;
 170        int num_data_lanes;
 171
 172        u32 dma_cmd_ctrl_restore;
 173
 174        bool registered;
 175        bool power_on;
 176        int irq;
 177};
 178
 179static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
 180{
 181        switch (fmt) {
 182        case MIPI_DSI_FMT_RGB565:               return 16;
 183        case MIPI_DSI_FMT_RGB666_PACKED:        return 18;
 184        case MIPI_DSI_FMT_RGB666:
 185        case MIPI_DSI_FMT_RGB888:
 186        default:                                return 24;
 187        }
 188}
 189
 190static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 191{
 192        return msm_readl(msm_host->ctrl_base + reg);
 193}
 194static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 195{
 196        msm_writel(data, msm_host->ctrl_base + reg);
 197}
 198
 199static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 200static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 201
 202static const struct msm_dsi_cfg_handler *dsi_get_config(
 203                                                struct msm_dsi_host *msm_host)
 204{
 205        const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 206        struct device *dev = &msm_host->pdev->dev;
 207        struct regulator *gdsc_reg;
 208        struct clk *ahb_clk;
 209        int ret;
 210        u32 major = 0, minor = 0;
 211
 212        gdsc_reg = regulator_get(dev, "gdsc");
 213        if (IS_ERR(gdsc_reg)) {
 214                pr_err("%s: cannot get gdsc\n", __func__);
 215                goto exit;
 216        }
 217
 218        ahb_clk = clk_get(dev, "iface_clk");
 219        if (IS_ERR(ahb_clk)) {
 220                pr_err("%s: cannot get interface clock\n", __func__);
 221                goto put_gdsc;
 222        }
 223
 224        ret = regulator_enable(gdsc_reg);
 225        if (ret) {
 226                pr_err("%s: unable to enable gdsc\n", __func__);
 227                goto put_clk;
 228        }
 229
 230        ret = clk_prepare_enable(ahb_clk);
 231        if (ret) {
 232                pr_err("%s: unable to enable ahb_clk\n", __func__);
 233                goto disable_gdsc;
 234        }
 235
 236        ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
 237        if (ret) {
 238                pr_err("%s: Invalid version\n", __func__);
 239                goto disable_clks;
 240        }
 241
 242        cfg_hnd = msm_dsi_cfg_get(major, minor);
 243
 244        DBG("%s: Version %x:%x\n", __func__, major, minor);
 245
 246disable_clks:
 247        clk_disable_unprepare(ahb_clk);
 248disable_gdsc:
 249        regulator_disable(gdsc_reg);
 250put_clk:
 251        clk_put(ahb_clk);
 252put_gdsc:
 253        regulator_put(gdsc_reg);
 254exit:
 255        return cfg_hnd;
 256}
 257
 258static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
 259{
 260        return container_of(host, struct msm_dsi_host, base);
 261}
 262
 263static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 264{
 265        struct regulator_bulk_data *s = msm_host->supplies;
 266        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 267        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 268        int i;
 269
 270        DBG("");
 271        for (i = num - 1; i >= 0; i--)
 272                if (regs[i].disable_load >= 0)
 273                        regulator_set_load(s[i].consumer,
 274                                           regs[i].disable_load);
 275
 276        regulator_bulk_disable(num, s);
 277}
 278
 279static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 280{
 281        struct regulator_bulk_data *s = msm_host->supplies;
 282        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 283        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 284        int ret, i;
 285
 286        DBG("");
 287        for (i = 0; i < num; i++) {
 288                if (regs[i].enable_load >= 0) {
 289                        ret = regulator_set_load(s[i].consumer,
 290                                                 regs[i].enable_load);
 291                        if (ret < 0) {
 292                                pr_err("regulator %d set op mode failed, %d\n",
 293                                        i, ret);
 294                                goto fail;
 295                        }
 296                }
 297        }
 298
 299        ret = regulator_bulk_enable(num, s);
 300        if (ret < 0) {
 301                pr_err("regulator enable failed, %d\n", ret);
 302                goto fail;
 303        }
 304
 305        return 0;
 306
 307fail:
 308        for (i--; i >= 0; i--)
 309                regulator_set_load(s[i].consumer, regs[i].disable_load);
 310        return ret;
 311}
 312
 313static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 314{
 315        struct regulator_bulk_data *s = msm_host->supplies;
 316        const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
 317        int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 318        int i, ret;
 319
 320        for (i = 0; i < num; i++)
 321                s[i].supply = regs[i].name;
 322
 323        ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
 324        if (ret < 0) {
 325                pr_err("%s: failed to init regulator, ret=%d\n",
 326                                                __func__, ret);
 327                return ret;
 328        }
 329
 330        return 0;
 331}
 332
 333static int dsi_clk_init(struct msm_dsi_host *msm_host)
 334{
 335        struct device *dev = &msm_host->pdev->dev;
 336        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 337        const struct msm_dsi_config *cfg = cfg_hnd->cfg;
 338        int i, ret = 0;
 339
 340        /* get bus clocks */
 341        for (i = 0; i < cfg->num_bus_clks; i++) {
 342                msm_host->bus_clks[i] = devm_clk_get(dev,
 343                                                cfg->bus_clk_names[i]);
 344                if (IS_ERR(msm_host->bus_clks[i])) {
 345                        ret = PTR_ERR(msm_host->bus_clks[i]);
 346                        pr_err("%s: Unable to get %s, ret = %d\n",
 347                                __func__, cfg->bus_clk_names[i], ret);
 348                        goto exit;
 349                }
 350        }
 351
 352        /* get link and source clocks */
 353        msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
 354        if (IS_ERR(msm_host->byte_clk)) {
 355                ret = PTR_ERR(msm_host->byte_clk);
 356                pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
 357                        __func__, ret);
 358                msm_host->byte_clk = NULL;
 359                goto exit;
 360        }
 361
 362        msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
 363        if (IS_ERR(msm_host->pixel_clk)) {
 364                ret = PTR_ERR(msm_host->pixel_clk);
 365                pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
 366                        __func__, ret);
 367                msm_host->pixel_clk = NULL;
 368                goto exit;
 369        }
 370
 371        msm_host->esc_clk = devm_clk_get(dev, "core_clk");
 372        if (IS_ERR(msm_host->esc_clk)) {
 373                ret = PTR_ERR(msm_host->esc_clk);
 374                pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
 375                        __func__, ret);
 376                msm_host->esc_clk = NULL;
 377                goto exit;
 378        }
 379
 380        msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
 381        if (!msm_host->byte_clk_src) {
 382                ret = -ENODEV;
 383                pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
 384                goto exit;
 385        }
 386
 387        msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
 388        if (!msm_host->pixel_clk_src) {
 389                ret = -ENODEV;
 390                pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
 391                goto exit;
 392        }
 393
 394        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
 395                msm_host->src_clk = devm_clk_get(dev, "src_clk");
 396                if (IS_ERR(msm_host->src_clk)) {
 397                        ret = PTR_ERR(msm_host->src_clk);
 398                        pr_err("%s: can't find dsi_src_clk. ret=%d\n",
 399                                __func__, ret);
 400                        msm_host->src_clk = NULL;
 401                        goto exit;
 402                }
 403
 404                msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
 405                if (!msm_host->esc_clk_src) {
 406                        ret = -ENODEV;
 407                        pr_err("%s: can't get esc_clk_src. ret=%d\n",
 408                                __func__, ret);
 409                        goto exit;
 410                }
 411
 412                msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
 413                if (!msm_host->dsi_clk_src) {
 414                        ret = -ENODEV;
 415                        pr_err("%s: can't get dsi_clk_src. ret=%d\n",
 416                                __func__, ret);
 417                }
 418        }
 419exit:
 420        return ret;
 421}
 422
 423static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
 424{
 425        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 426        int i, ret;
 427
 428        DBG("id=%d", msm_host->id);
 429
 430        for (i = 0; i < cfg->num_bus_clks; i++) {
 431                ret = clk_prepare_enable(msm_host->bus_clks[i]);
 432                if (ret) {
 433                        pr_err("%s: failed to enable bus clock %d ret %d\n",
 434                                __func__, i, ret);
 435                        goto err;
 436                }
 437        }
 438
 439        return 0;
 440err:
 441        for (; i > 0; i--)
 442                clk_disable_unprepare(msm_host->bus_clks[i]);
 443
 444        return ret;
 445}
 446
 447static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
 448{
 449        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
 450        int i;
 451
 452        DBG("");
 453
 454        for (i = cfg->num_bus_clks - 1; i >= 0; i--)
 455                clk_disable_unprepare(msm_host->bus_clks[i]);
 456}
 457
 458static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 459{
 460        int ret;
 461
 462        DBG("Set clk rates: pclk=%d, byteclk=%d",
 463                msm_host->mode->clock, msm_host->byte_clk_rate);
 464
 465        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 466        if (ret) {
 467                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 468                goto error;
 469        }
 470
 471        ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
 472        if (ret) {
 473                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 474                goto error;
 475        }
 476
 477        ret = clk_prepare_enable(msm_host->esc_clk);
 478        if (ret) {
 479                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 480                goto error;
 481        }
 482
 483        ret = clk_prepare_enable(msm_host->byte_clk);
 484        if (ret) {
 485                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 486                goto byte_clk_err;
 487        }
 488
 489        ret = clk_prepare_enable(msm_host->pixel_clk);
 490        if (ret) {
 491                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 492                goto pixel_clk_err;
 493        }
 494
 495        return 0;
 496
 497pixel_clk_err:
 498        clk_disable_unprepare(msm_host->byte_clk);
 499byte_clk_err:
 500        clk_disable_unprepare(msm_host->esc_clk);
 501error:
 502        return ret;
 503}
 504
 505static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
 506{
 507        int ret;
 508
 509        DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
 510                msm_host->mode->clock, msm_host->byte_clk_rate,
 511                msm_host->esc_clk_rate, msm_host->src_clk_rate);
 512
 513        ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
 514        if (ret) {
 515                pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
 516                goto error;
 517        }
 518
 519        ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
 520        if (ret) {
 521                pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
 522                goto error;
 523        }
 524
 525        ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
 526        if (ret) {
 527                pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
 528                goto error;
 529        }
 530
 531        ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
 532        if (ret) {
 533                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
 534                goto error;
 535        }
 536
 537        ret = clk_prepare_enable(msm_host->byte_clk);
 538        if (ret) {
 539                pr_err("%s: Failed to enable dsi byte clk\n", __func__);
 540                goto error;
 541        }
 542
 543        ret = clk_prepare_enable(msm_host->esc_clk);
 544        if (ret) {
 545                pr_err("%s: Failed to enable dsi esc clk\n", __func__);
 546                goto esc_clk_err;
 547        }
 548
 549        ret = clk_prepare_enable(msm_host->src_clk);
 550        if (ret) {
 551                pr_err("%s: Failed to enable dsi src clk\n", __func__);
 552                goto src_clk_err;
 553        }
 554
 555        ret = clk_prepare_enable(msm_host->pixel_clk);
 556        if (ret) {
 557                pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
 558                goto pixel_clk_err;
 559        }
 560
 561        return 0;
 562
 563pixel_clk_err:
 564        clk_disable_unprepare(msm_host->src_clk);
 565src_clk_err:
 566        clk_disable_unprepare(msm_host->esc_clk);
 567esc_clk_err:
 568        clk_disable_unprepare(msm_host->byte_clk);
 569error:
 570        return ret;
 571}
 572
 573static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
 574{
 575        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 576
 577        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
 578                return dsi_link_clk_enable_6g(msm_host);
 579        else
 580                return dsi_link_clk_enable_v2(msm_host);
 581}
 582
 583static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
 584{
 585        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 586
 587        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
 588                clk_disable_unprepare(msm_host->esc_clk);
 589                clk_disable_unprepare(msm_host->pixel_clk);
 590                clk_disable_unprepare(msm_host->byte_clk);
 591        } else {
 592                clk_disable_unprepare(msm_host->pixel_clk);
 593                clk_disable_unprepare(msm_host->src_clk);
 594                clk_disable_unprepare(msm_host->esc_clk);
 595                clk_disable_unprepare(msm_host->byte_clk);
 596        }
 597}
 598
 599static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
 600{
 601        int ret = 0;
 602
 603        mutex_lock(&msm_host->clk_mutex);
 604        if (enable) {
 605                ret = dsi_bus_clk_enable(msm_host);
 606                if (ret) {
 607                        pr_err("%s: Can not enable bus clk, %d\n",
 608                                __func__, ret);
 609                        goto unlock_ret;
 610                }
 611                ret = dsi_link_clk_enable(msm_host);
 612                if (ret) {
 613                        pr_err("%s: Can not enable link clk, %d\n",
 614                                __func__, ret);
 615                        dsi_bus_clk_disable(msm_host);
 616                        goto unlock_ret;
 617                }
 618        } else {
 619                dsi_link_clk_disable(msm_host);
 620                dsi_bus_clk_disable(msm_host);
 621        }
 622
 623unlock_ret:
 624        mutex_unlock(&msm_host->clk_mutex);
 625        return ret;
 626}
 627
 628static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
 629{
 630        struct drm_display_mode *mode = msm_host->mode;
 631        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 632        u8 lanes = msm_host->lanes;
 633        u32 bpp = dsi_get_bpp(msm_host->format);
 634        u32 pclk_rate;
 635
 636        if (!mode) {
 637                pr_err("%s: mode not set\n", __func__);
 638                return -EINVAL;
 639        }
 640
 641        pclk_rate = mode->clock * 1000;
 642        if (lanes > 0) {
 643                msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
 644        } else {
 645                pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
 646                msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
 647        }
 648
 649        DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
 650
 651        msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
 652
 653        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
 654                unsigned int esc_mhz, esc_div;
 655                unsigned long byte_mhz;
 656
 657                msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
 658
 659                /*
 660                 * esc clock is byte clock followed by a 4 bit divider,
 661                 * we need to find an escape clock frequency within the
 662                 * mipi DSI spec range within the maximum divider limit
 663                 * We iterate here between an escape clock frequencey
 664                 * between 20 Mhz to 5 Mhz and pick up the first one
 665                 * that can be supported by our divider
 666                 */
 667
 668                byte_mhz = msm_host->byte_clk_rate / 1000000;
 669
 670                for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
 671                        esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
 672
 673                        /*
 674                         * TODO: Ideally, we shouldn't know what sort of divider
 675                         * is available in mmss_cc, we're just assuming that
 676                         * it'll always be a 4 bit divider. Need to come up with
 677                         * a better way here.
 678                         */
 679                        if (esc_div >= 1 && esc_div <= 16)
 680                                break;
 681                }
 682
 683                if (esc_mhz < 5)
 684                        return -EINVAL;
 685
 686                msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
 687
 688                DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
 689                        msm_host->src_clk_rate);
 690        }
 691
 692        return 0;
 693}
 694
 695static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
 696{
 697        u32 intr;
 698        unsigned long flags;
 699
 700        spin_lock_irqsave(&msm_host->intr_lock, flags);
 701        intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
 702
 703        if (enable)
 704                intr |= mask;
 705        else
 706                intr &= ~mask;
 707
 708        DBG("intr=%x enable=%d", intr, enable);
 709
 710        dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
 711        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
 712}
 713
 714static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
 715{
 716        if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
 717                return BURST_MODE;
 718        else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
 719                return NON_BURST_SYNCH_PULSE;
 720
 721        return NON_BURST_SYNCH_EVENT;
 722}
 723
 724static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
 725                                const enum mipi_dsi_pixel_format mipi_fmt)
 726{
 727        switch (mipi_fmt) {
 728        case MIPI_DSI_FMT_RGB888:       return VID_DST_FORMAT_RGB888;
 729        case MIPI_DSI_FMT_RGB666:       return VID_DST_FORMAT_RGB666_LOOSE;
 730        case MIPI_DSI_FMT_RGB666_PACKED:        return VID_DST_FORMAT_RGB666;
 731        case MIPI_DSI_FMT_RGB565:       return VID_DST_FORMAT_RGB565;
 732        default:                        return VID_DST_FORMAT_RGB888;
 733        }
 734}
 735
 736static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
 737                                const enum mipi_dsi_pixel_format mipi_fmt)
 738{
 739        switch (mipi_fmt) {
 740        case MIPI_DSI_FMT_RGB888:       return CMD_DST_FORMAT_RGB888;
 741        case MIPI_DSI_FMT_RGB666_PACKED:
 742        case MIPI_DSI_FMT_RGB666:       return VID_DST_FORMAT_RGB666;
 743        case MIPI_DSI_FMT_RGB565:       return CMD_DST_FORMAT_RGB565;
 744        default:                        return CMD_DST_FORMAT_RGB888;
 745        }
 746}
 747
 748static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 749                        struct msm_dsi_phy_shared_timings *phy_shared_timings)
 750{
 751        u32 flags = msm_host->mode_flags;
 752        enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
 753        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 754        u32 data = 0;
 755
 756        if (!enable) {
 757                dsi_write(msm_host, REG_DSI_CTRL, 0);
 758                return;
 759        }
 760
 761        if (flags & MIPI_DSI_MODE_VIDEO) {
 762                if (flags & MIPI_DSI_MODE_VIDEO_HSE)
 763                        data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
 764                if (flags & MIPI_DSI_MODE_VIDEO_HFP)
 765                        data |= DSI_VID_CFG0_HFP_POWER_STOP;
 766                if (flags & MIPI_DSI_MODE_VIDEO_HBP)
 767                        data |= DSI_VID_CFG0_HBP_POWER_STOP;
 768                if (flags & MIPI_DSI_MODE_VIDEO_HSA)
 769                        data |= DSI_VID_CFG0_HSA_POWER_STOP;
 770                /* Always set low power stop mode for BLLP
 771                 * to let command engine send packets
 772                 */
 773                data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
 774                        DSI_VID_CFG0_BLLP_POWER_STOP;
 775                data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
 776                data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
 777                data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
 778                dsi_write(msm_host, REG_DSI_VID_CFG0, data);
 779
 780                /* Do not swap RGB colors */
 781                data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
 782                dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
 783        } else {
 784                /* Do not swap RGB colors */
 785                data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
 786                data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
 787                dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
 788
 789                data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
 790                        DSI_CMD_CFG1_WR_MEM_CONTINUE(
 791                                        MIPI_DCS_WRITE_MEMORY_CONTINUE);
 792                /* Always insert DCS command */
 793                data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
 794                dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
 795        }
 796
 797        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
 798                        DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
 799                        DSI_CMD_DMA_CTRL_LOW_POWER);
 800
 801        data = 0;
 802        /* Always assume dedicated TE pin */
 803        data |= DSI_TRIG_CTRL_TE;
 804        data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 805        data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 806        data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
 807        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 808                (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 809                data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 810        dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 811
 812        data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
 813                DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
 814        dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
 815
 816        if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
 817            (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
 818            phy_shared_timings->clk_pre_inc_by_2)
 819                dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
 820                          DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
 821
 822        data = 0;
 823        if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
 824                data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
 825        dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
 826
 827        /* allow only ack-err-status to generate interrupt */
 828        dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
 829
 830        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
 831
 832        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
 833
 834        data = DSI_CTRL_CLK_EN;
 835
 836        DBG("lane number=%d", msm_host->lanes);
 837        data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
 838
 839        dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
 840                  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
 841
 842        if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
 843                dsi_write(msm_host, REG_DSI_LANE_CTRL,
 844                        DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
 845
 846        data |= DSI_CTRL_ENABLE;
 847
 848        dsi_write(msm_host, REG_DSI_CTRL, data);
 849}
 850
 851static void dsi_timing_setup(struct msm_dsi_host *msm_host)
 852{
 853        struct drm_display_mode *mode = msm_host->mode;
 854        u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
 855        u32 h_total = mode->htotal;
 856        u32 v_total = mode->vtotal;
 857        u32 hs_end = mode->hsync_end - mode->hsync_start;
 858        u32 vs_end = mode->vsync_end - mode->vsync_start;
 859        u32 ha_start = h_total - mode->hsync_start;
 860        u32 ha_end = ha_start + mode->hdisplay;
 861        u32 va_start = v_total - mode->vsync_start;
 862        u32 va_end = va_start + mode->vdisplay;
 863        u32 wc;
 864
 865        DBG("");
 866
 867        if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
 868                dsi_write(msm_host, REG_DSI_ACTIVE_H,
 869                        DSI_ACTIVE_H_START(ha_start) |
 870                        DSI_ACTIVE_H_END(ha_end));
 871                dsi_write(msm_host, REG_DSI_ACTIVE_V,
 872                        DSI_ACTIVE_V_START(va_start) |
 873                        DSI_ACTIVE_V_END(va_end));
 874                dsi_write(msm_host, REG_DSI_TOTAL,
 875                        DSI_TOTAL_H_TOTAL(h_total - 1) |
 876                        DSI_TOTAL_V_TOTAL(v_total - 1));
 877
 878                dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
 879                        DSI_ACTIVE_HSYNC_START(hs_start) |
 880                        DSI_ACTIVE_HSYNC_END(hs_end));
 881                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
 882                dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
 883                        DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
 884                        DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
 885        } else {                /* command mode */
 886                /* image data and 1 byte write_memory_start cmd */
 887                wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
 888
 889                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
 890                        DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
 891                        DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
 892                                        msm_host->channel) |
 893                        DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
 894                                        MIPI_DSI_DCS_LONG_WRITE));
 895
 896                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
 897                        DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
 898                        DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
 899        }
 900}
 901
 902static void dsi_sw_reset(struct msm_dsi_host *msm_host)
 903{
 904        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
 905        wmb(); /* clocks need to be enabled before reset */
 906
 907        dsi_write(msm_host, REG_DSI_RESET, 1);
 908        wmb(); /* make sure reset happen */
 909        dsi_write(msm_host, REG_DSI_RESET, 0);
 910}
 911
 912static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
 913                                        bool video_mode, bool enable)
 914{
 915        u32 dsi_ctrl;
 916
 917        dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
 918
 919        if (!enable) {
 920                dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
 921                                DSI_CTRL_CMD_MODE_EN);
 922                dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
 923                                        DSI_IRQ_MASK_VIDEO_DONE, 0);
 924        } else {
 925                if (video_mode) {
 926                        dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
 927                } else {                /* command mode */
 928                        dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
 929                        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
 930                }
 931                dsi_ctrl |= DSI_CTRL_ENABLE;
 932        }
 933
 934        dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
 935}
 936
 937static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
 938{
 939        u32 data;
 940
 941        data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
 942
 943        if (mode == 0)
 944                data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
 945        else
 946                data |= DSI_CMD_DMA_CTRL_LOW_POWER;
 947
 948        dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
 949}
 950
 951static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
 952{
 953        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
 954
 955        reinit_completion(&msm_host->video_comp);
 956
 957        wait_for_completion_timeout(&msm_host->video_comp,
 958                        msecs_to_jiffies(70));
 959
 960        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
 961}
 962
 963static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
 964{
 965        if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
 966                return;
 967
 968        if (msm_host->power_on) {
 969                dsi_wait4video_done(msm_host);
 970                /* delay 4 ms to skip BLLP */
 971                usleep_range(2000, 4000);
 972        }
 973}
 974
 975/* dsi_cmd */
 976static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
 977{
 978        struct drm_device *dev = msm_host->dev;
 979        struct msm_drm_private *priv = dev->dev_private;
 980        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 981        int ret;
 982        uint64_t iova;
 983
 984        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
 985                msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
 986                if (IS_ERR(msm_host->tx_gem_obj)) {
 987                        ret = PTR_ERR(msm_host->tx_gem_obj);
 988                        pr_err("%s: failed to allocate gem, %d\n",
 989                                __func__, ret);
 990                        msm_host->tx_gem_obj = NULL;
 991                        return ret;
 992                }
 993
 994                ret = msm_gem_get_iova(msm_host->tx_gem_obj,
 995                                priv->kms->aspace, &iova);
 996                mutex_unlock(&dev->struct_mutex);
 997                if (ret) {
 998                        pr_err("%s: failed to get iova, %d\n", __func__, ret);
 999                        return ret;
1000                }
1001
1002                if (iova & 0x07) {
1003                        pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
1004                        return -EINVAL;
1005                }
1006
1007                msm_host->tx_size = msm_host->tx_gem_obj->size;
1008        } else {
1009                msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1010                                        &msm_host->tx_buf_paddr, GFP_KERNEL);
1011                if (!msm_host->tx_buf) {
1012                        ret = -ENOMEM;
1013                        pr_err("%s: failed to allocate tx buf, %d\n",
1014                                __func__, ret);
1015                        return ret;
1016                }
1017
1018                msm_host->tx_size = size;
1019        }
1020
1021        return 0;
1022}
1023
1024static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1025{
1026        struct drm_device *dev = msm_host->dev;
1027
1028        if (msm_host->tx_gem_obj) {
1029                msm_gem_put_iova(msm_host->tx_gem_obj, 0);
1030                mutex_lock(&dev->struct_mutex);
1031                msm_gem_free_object(msm_host->tx_gem_obj);
1032                msm_host->tx_gem_obj = NULL;
1033                mutex_unlock(&dev->struct_mutex);
1034        }
1035
1036        if (msm_host->tx_buf)
1037                dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1038                        msm_host->tx_buf_paddr);
1039}
1040
1041/*
1042 * prepare cmd buffer to be txed
1043 */
1044static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1045                           const struct mipi_dsi_msg *msg)
1046{
1047        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1048        struct mipi_dsi_packet packet;
1049        int len;
1050        int ret;
1051        u8 *data;
1052
1053        ret = mipi_dsi_create_packet(&packet, msg);
1054        if (ret) {
1055                pr_err("%s: create packet failed, %d\n", __func__, ret);
1056                return ret;
1057        }
1058        len = (packet.size + 3) & (~0x3);
1059
1060        if (len > msm_host->tx_size) {
1061                pr_err("%s: packet size is too big\n", __func__);
1062                return -EINVAL;
1063        }
1064
1065        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1066                data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
1067                if (IS_ERR(data)) {
1068                        ret = PTR_ERR(data);
1069                        pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1070                        return ret;
1071                }
1072        } else {
1073                data = msm_host->tx_buf;
1074        }
1075
1076        /* MSM specific command format in memory */
1077        data[0] = packet.header[1];
1078        data[1] = packet.header[2];
1079        data[2] = packet.header[0];
1080        data[3] = BIT(7); /* Last packet */
1081        if (mipi_dsi_packet_format_is_long(msg->type))
1082                data[3] |= BIT(6);
1083        if (msg->rx_buf && msg->rx_len)
1084                data[3] |= BIT(5);
1085
1086        /* Long packet */
1087        if (packet.payload && packet.payload_length)
1088                memcpy(data + 4, packet.payload, packet.payload_length);
1089
1090        /* Append 0xff to the end */
1091        if (packet.size < len)
1092                memset(data + packet.size, 0xff, len - packet.size);
1093
1094        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
1095                msm_gem_put_vaddr(msm_host->tx_gem_obj);
1096
1097        return len;
1098}
1099
1100/*
1101 * dsi_short_read1_resp: 1 parameter
1102 */
1103static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1104{
1105        u8 *data = msg->rx_buf;
1106        if (data && (msg->rx_len >= 1)) {
1107                *data = buf[1]; /* strip out dcs type */
1108                return 1;
1109        } else {
1110                pr_err("%s: read data does not match with rx_buf len %zu\n",
1111                        __func__, msg->rx_len);
1112                return -EINVAL;
1113        }
1114}
1115
1116/*
1117 * dsi_short_read2_resp: 2 parameter
1118 */
1119static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1120{
1121        u8 *data = msg->rx_buf;
1122        if (data && (msg->rx_len >= 2)) {
1123                data[0] = buf[1]; /* strip out dcs type */
1124                data[1] = buf[2];
1125                return 2;
1126        } else {
1127                pr_err("%s: read data does not match with rx_buf len %zu\n",
1128                        __func__, msg->rx_len);
1129                return -EINVAL;
1130        }
1131}
1132
1133static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1134{
1135        /* strip out 4 byte dcs header */
1136        if (msg->rx_buf && msg->rx_len)
1137                memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1138
1139        return msg->rx_len;
1140}
1141
1142static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1143{
1144        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1145        struct drm_device *dev = msm_host->dev;
1146        struct msm_drm_private *priv = dev->dev_private;
1147        int ret;
1148        uint64_t dma_base;
1149        bool triggered;
1150
1151        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1152                ret = msm_gem_get_iova(msm_host->tx_gem_obj,
1153                                priv->kms->aspace, &dma_base);
1154                if (ret) {
1155                        pr_err("%s: failed to get iova: %d\n", __func__, ret);
1156                        return ret;
1157                }
1158        } else {
1159                dma_base = msm_host->tx_buf_paddr;
1160        }
1161
1162        reinit_completion(&msm_host->dma_comp);
1163
1164        dsi_wait4video_eng_busy(msm_host);
1165
1166        triggered = msm_dsi_manager_cmd_xfer_trigger(
1167                                                msm_host->id, dma_base, len);
1168        if (triggered) {
1169                ret = wait_for_completion_timeout(&msm_host->dma_comp,
1170                                        msecs_to_jiffies(200));
1171                DBG("ret=%d", ret);
1172                if (ret == 0)
1173                        ret = -ETIMEDOUT;
1174                else
1175                        ret = len;
1176        } else
1177                ret = len;
1178
1179        return ret;
1180}
1181
1182static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1183                        u8 *buf, int rx_byte, int pkt_size)
1184{
1185        u32 *lp, *temp, data;
1186        int i, j = 0, cnt;
1187        u32 read_cnt;
1188        u8 reg[16];
1189        int repeated_bytes = 0;
1190        int buf_offset = buf - msm_host->rx_buf;
1191
1192        lp = (u32 *)buf;
1193        temp = (u32 *)reg;
1194        cnt = (rx_byte + 3) >> 2;
1195        if (cnt > 4)
1196                cnt = 4; /* 4 x 32 bits registers only */
1197
1198        if (rx_byte == 4)
1199                read_cnt = 4;
1200        else
1201                read_cnt = pkt_size + 6;
1202
1203        /*
1204         * In case of multiple reads from the panel, after the first read, there
1205         * is possibility that there are some bytes in the payload repeating in
1206         * the RDBK_DATA registers. Since we read all the parameters from the
1207         * panel right from the first byte for every pass. We need to skip the
1208         * repeating bytes and then append the new parameters to the rx buffer.
1209         */
1210        if (read_cnt > 16) {
1211                int bytes_shifted;
1212                /* Any data more than 16 bytes will be shifted out.
1213                 * The temp read buffer should already contain these bytes.
1214                 * The remaining bytes in read buffer are the repeated bytes.
1215                 */
1216                bytes_shifted = read_cnt - 16;
1217                repeated_bytes = buf_offset - bytes_shifted;
1218        }
1219
1220        for (i = cnt - 1; i >= 0; i--) {
1221                data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1222                *temp++ = ntohl(data); /* to host byte order */
1223                DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1224        }
1225
1226        for (i = repeated_bytes; i < 16; i++)
1227                buf[j++] = reg[i];
1228
1229        return j;
1230}
1231
1232static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1233                                const struct mipi_dsi_msg *msg)
1234{
1235        int len, ret;
1236        int bllp_len = msm_host->mode->hdisplay *
1237                        dsi_get_bpp(msm_host->format) / 8;
1238
1239        len = dsi_cmd_dma_add(msm_host, msg);
1240        if (!len) {
1241                pr_err("%s: failed to add cmd type = 0x%x\n",
1242                        __func__,  msg->type);
1243                return -EINVAL;
1244        }
1245
1246        /* for video mode, do not send cmds more than
1247        * one pixel line, since it only transmit it
1248        * during BLLP.
1249        */
1250        /* TODO: if the command is sent in LP mode, the bit rate is only
1251         * half of esc clk rate. In this case, if the video is already
1252         * actively streaming, we need to check more carefully if the
1253         * command can be fit into one BLLP.
1254         */
1255        if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1256                pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1257                        __func__, len);
1258                return -EINVAL;
1259        }
1260
1261        ret = dsi_cmd_dma_tx(msm_host, len);
1262        if (ret < len) {
1263                pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1264                        __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1265                return -ECOMM;
1266        }
1267
1268        return len;
1269}
1270
1271static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1272{
1273        u32 data0, data1;
1274
1275        data0 = dsi_read(msm_host, REG_DSI_CTRL);
1276        data1 = data0;
1277        data1 &= ~DSI_CTRL_ENABLE;
1278        dsi_write(msm_host, REG_DSI_CTRL, data1);
1279        /*
1280         * dsi controller need to be disabled before
1281         * clocks turned on
1282         */
1283        wmb();
1284
1285        dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1286        wmb();  /* make sure clocks enabled */
1287
1288        /* dsi controller can only be reset while clocks are running */
1289        dsi_write(msm_host, REG_DSI_RESET, 1);
1290        wmb();  /* make sure reset happen */
1291        dsi_write(msm_host, REG_DSI_RESET, 0);
1292        wmb();  /* controller out of reset */
1293        dsi_write(msm_host, REG_DSI_CTRL, data0);
1294        wmb();  /* make sure dsi controller enabled again */
1295}
1296
1297static void dsi_hpd_worker(struct work_struct *work)
1298{
1299        struct msm_dsi_host *msm_host =
1300                container_of(work, struct msm_dsi_host, hpd_work);
1301
1302        drm_helper_hpd_irq_event(msm_host->dev);
1303}
1304
1305static void dsi_err_worker(struct work_struct *work)
1306{
1307        struct msm_dsi_host *msm_host =
1308                container_of(work, struct msm_dsi_host, err_work);
1309        u32 status = msm_host->err_work_state;
1310
1311        pr_err_ratelimited("%s: status=%x\n", __func__, status);
1312        if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1313                dsi_sw_reset_restore(msm_host);
1314
1315        /* It is safe to clear here because error irq is disabled. */
1316        msm_host->err_work_state = 0;
1317
1318        /* enable dsi error interrupt */
1319        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1320}
1321
1322static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1323{
1324        u32 status;
1325
1326        status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1327
1328        if (status) {
1329                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1330                /* Writing of an extra 0 needed to clear error bits */
1331                dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1332                msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1333        }
1334}
1335
1336static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1337{
1338        u32 status;
1339
1340        status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1341
1342        if (status) {
1343                dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1344                msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1345        }
1346}
1347
1348static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1349{
1350        u32 status;
1351
1352        status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1353
1354        if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1355                        DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1356                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1357                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1358                        DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1359                dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1360                msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1361        }
1362}
1363
1364static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1365{
1366        u32 status;
1367
1368        status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1369
1370        /* fifo underflow, overflow */
1371        if (status) {
1372                dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1373                msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1374                if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1375                        msm_host->err_work_state |=
1376                                        DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1377        }
1378}
1379
1380static void dsi_status(struct msm_dsi_host *msm_host)
1381{
1382        u32 status;
1383
1384        status = dsi_read(msm_host, REG_DSI_STATUS0);
1385
1386        if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1387                dsi_write(msm_host, REG_DSI_STATUS0, status);
1388                msm_host->err_work_state |=
1389                        DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1390        }
1391}
1392
1393static void dsi_clk_status(struct msm_dsi_host *msm_host)
1394{
1395        u32 status;
1396
1397        status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1398
1399        if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1400                dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1401                msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1402        }
1403}
1404
1405static void dsi_error(struct msm_dsi_host *msm_host)
1406{
1407        /* disable dsi error interrupt */
1408        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1409
1410        dsi_clk_status(msm_host);
1411        dsi_fifo_status(msm_host);
1412        dsi_ack_err_status(msm_host);
1413        dsi_timeout_status(msm_host);
1414        dsi_status(msm_host);
1415        dsi_dln0_phy_err(msm_host);
1416
1417        queue_work(msm_host->workqueue, &msm_host->err_work);
1418}
1419
1420static irqreturn_t dsi_host_irq(int irq, void *ptr)
1421{
1422        struct msm_dsi_host *msm_host = ptr;
1423        u32 isr;
1424        unsigned long flags;
1425
1426        if (!msm_host->ctrl_base)
1427                return IRQ_HANDLED;
1428
1429        spin_lock_irqsave(&msm_host->intr_lock, flags);
1430        isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1431        dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1432        spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1433
1434        DBG("isr=0x%x, id=%d", isr, msm_host->id);
1435
1436        if (isr & DSI_IRQ_ERROR)
1437                dsi_error(msm_host);
1438
1439        if (isr & DSI_IRQ_VIDEO_DONE)
1440                complete(&msm_host->video_comp);
1441
1442        if (isr & DSI_IRQ_CMD_DMA_DONE)
1443                complete(&msm_host->dma_comp);
1444
1445        return IRQ_HANDLED;
1446}
1447
1448static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1449                        struct device *panel_device)
1450{
1451        msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1452                                                         "disp-enable",
1453                                                         GPIOD_OUT_LOW);
1454        if (IS_ERR(msm_host->disp_en_gpio)) {
1455                DBG("cannot get disp-enable-gpios %ld",
1456                                PTR_ERR(msm_host->disp_en_gpio));
1457                return PTR_ERR(msm_host->disp_en_gpio);
1458        }
1459
1460        msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1461                                                                GPIOD_IN);
1462        if (IS_ERR(msm_host->te_gpio)) {
1463                DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1464                return PTR_ERR(msm_host->te_gpio);
1465        }
1466
1467        return 0;
1468}
1469
1470static int dsi_host_attach(struct mipi_dsi_host *host,
1471                                        struct mipi_dsi_device *dsi)
1472{
1473        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1474        int ret;
1475
1476        if (dsi->lanes > msm_host->num_data_lanes)
1477                return -EINVAL;
1478
1479        msm_host->channel = dsi->channel;
1480        msm_host->lanes = dsi->lanes;
1481        msm_host->format = dsi->format;
1482        msm_host->mode_flags = dsi->mode_flags;
1483
1484        msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
1485
1486        /* Some gpios defined in panel DT need to be controlled by host */
1487        ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1488        if (ret)
1489                return ret;
1490
1491        DBG("id=%d", msm_host->id);
1492        if (msm_host->dev)
1493                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1494
1495        return 0;
1496}
1497
1498static int dsi_host_detach(struct mipi_dsi_host *host,
1499                                        struct mipi_dsi_device *dsi)
1500{
1501        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1502
1503        msm_host->device_node = NULL;
1504
1505        DBG("id=%d", msm_host->id);
1506        if (msm_host->dev)
1507                queue_work(msm_host->workqueue, &msm_host->hpd_work);
1508
1509        return 0;
1510}
1511
1512static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1513                                        const struct mipi_dsi_msg *msg)
1514{
1515        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1516        int ret;
1517
1518        if (!msg || !msm_host->power_on)
1519                return -EINVAL;
1520
1521        mutex_lock(&msm_host->cmd_mutex);
1522        ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1523        mutex_unlock(&msm_host->cmd_mutex);
1524
1525        return ret;
1526}
1527
1528static struct mipi_dsi_host_ops dsi_host_ops = {
1529        .attach = dsi_host_attach,
1530        .detach = dsi_host_detach,
1531        .transfer = dsi_host_transfer,
1532};
1533
1534/*
1535 * List of supported physical to logical lane mappings.
1536 * For example, the 2nd entry represents the following mapping:
1537 *
1538 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
1539 */
1540static const int supported_data_lane_swaps[][4] = {
1541        { 0, 1, 2, 3 },
1542        { 3, 0, 1, 2 },
1543        { 2, 3, 0, 1 },
1544        { 1, 2, 3, 0 },
1545        { 0, 3, 2, 1 },
1546        { 1, 0, 3, 2 },
1547        { 2, 1, 0, 3 },
1548        { 3, 2, 1, 0 },
1549};
1550
1551static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1552                                    struct device_node *ep)
1553{
1554        struct device *dev = &msm_host->pdev->dev;
1555        struct property *prop;
1556        u32 lane_map[4];
1557        int ret, i, len, num_lanes;
1558
1559        prop = of_find_property(ep, "data-lanes", &len);
1560        if (!prop) {
1561                dev_dbg(dev,
1562                        "failed to find data lane mapping, using default\n");
1563                return 0;
1564        }
1565
1566        num_lanes = len / sizeof(u32);
1567
1568        if (num_lanes < 1 || num_lanes > 4) {
1569                dev_err(dev, "bad number of data lanes\n");
1570                return -EINVAL;
1571        }
1572
1573        msm_host->num_data_lanes = num_lanes;
1574
1575        ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1576                                         num_lanes);
1577        if (ret) {
1578                dev_err(dev, "failed to read lane data\n");
1579                return ret;
1580        }
1581
1582        /*
1583         * compare DT specified physical-logical lane mappings with the ones
1584         * supported by hardware
1585         */
1586        for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
1587                const int *swap = supported_data_lane_swaps[i];
1588                int j;
1589
1590                /*
1591                 * the data-lanes array we get from DT has a logical->physical
1592                 * mapping. The "data lane swap" register field represents
1593                 * supported configurations in a physical->logical mapping.
1594                 * Translate the DT mapping to what we understand and find a
1595                 * configuration that works.
1596                 */
1597                for (j = 0; j < num_lanes; j++) {
1598                        if (lane_map[j] < 0 || lane_map[j] > 3)
1599                                dev_err(dev, "bad physical lane entry %u\n",
1600                                        lane_map[j]);
1601
1602                        if (swap[lane_map[j]] != j)
1603                                break;
1604                }
1605
1606                if (j == num_lanes) {
1607                        msm_host->dlane_swap = i;
1608                        return 0;
1609                }
1610        }
1611
1612        return -EINVAL;
1613}
1614
1615static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1616{
1617        struct device *dev = &msm_host->pdev->dev;
1618        struct device_node *np = dev->of_node;
1619        struct device_node *endpoint, *device_node;
1620        int ret = 0;
1621
1622        /*
1623         * Get the endpoint of the output port of the DSI host. In our case,
1624         * this is mapped to port number with reg = 1. Don't return an error if
1625         * the remote endpoint isn't defined. It's possible that there is
1626         * nothing connected to the dsi output.
1627         */
1628        endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1629        if (!endpoint) {
1630                dev_dbg(dev, "%s: no endpoint\n", __func__);
1631                return 0;
1632        }
1633
1634        ret = dsi_host_parse_lane_data(msm_host, endpoint);
1635        if (ret) {
1636                dev_err(dev, "%s: invalid lane configuration %d\n",
1637                        __func__, ret);
1638                goto err;
1639        }
1640
1641        /* Get panel node from the output port's endpoint data */
1642        device_node = of_graph_get_remote_node(np, 1, 0);
1643        if (!device_node) {
1644                dev_dbg(dev, "%s: no valid device\n", __func__);
1645                goto err;
1646        }
1647
1648        msm_host->device_node = device_node;
1649
1650        if (of_property_read_bool(np, "syscon-sfpb")) {
1651                msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1652                                        "syscon-sfpb");
1653                if (IS_ERR(msm_host->sfpb)) {
1654                        dev_err(dev, "%s: failed to get sfpb regmap\n",
1655                                __func__);
1656                        ret = PTR_ERR(msm_host->sfpb);
1657                }
1658        }
1659
1660        of_node_put(device_node);
1661
1662err:
1663        of_node_put(endpoint);
1664
1665        return ret;
1666}
1667
1668static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1669{
1670        struct platform_device *pdev = msm_host->pdev;
1671        const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1672        struct resource *res;
1673        int i;
1674
1675        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1676        if (!res)
1677                return -EINVAL;
1678
1679        for (i = 0; i < cfg->num_dsi; i++) {
1680                if (cfg->io_start[i] == res->start)
1681                        return i;
1682        }
1683
1684        return -EINVAL;
1685}
1686
1687int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1688{
1689        struct msm_dsi_host *msm_host = NULL;
1690        struct platform_device *pdev = msm_dsi->pdev;
1691        int ret;
1692
1693        msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1694        if (!msm_host) {
1695                pr_err("%s: FAILED: cannot alloc dsi host\n",
1696                       __func__);
1697                ret = -ENOMEM;
1698                goto fail;
1699        }
1700
1701        msm_host->pdev = pdev;
1702
1703        ret = dsi_host_parse_dt(msm_host);
1704        if (ret) {
1705                pr_err("%s: failed to parse dt\n", __func__);
1706                goto fail;
1707        }
1708
1709        msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1710        if (IS_ERR(msm_host->ctrl_base)) {
1711                pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1712                ret = PTR_ERR(msm_host->ctrl_base);
1713                goto fail;
1714        }
1715
1716        msm_host->cfg_hnd = dsi_get_config(msm_host);
1717        if (!msm_host->cfg_hnd) {
1718                ret = -EINVAL;
1719                pr_err("%s: get config failed\n", __func__);
1720                goto fail;
1721        }
1722
1723        msm_host->id = dsi_host_get_id(msm_host);
1724        if (msm_host->id < 0) {
1725                ret = msm_host->id;
1726                pr_err("%s: unable to identify DSI host index\n", __func__);
1727                goto fail;
1728        }
1729
1730        /* fixup base address by io offset */
1731        msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1732
1733        ret = dsi_regulator_init(msm_host);
1734        if (ret) {
1735                pr_err("%s: regulator init failed\n", __func__);
1736                goto fail;
1737        }
1738
1739        ret = dsi_clk_init(msm_host);
1740        if (ret) {
1741                pr_err("%s: unable to initialize dsi clks\n", __func__);
1742                goto fail;
1743        }
1744
1745        msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1746        if (!msm_host->rx_buf) {
1747                ret = -ENOMEM;
1748                pr_err("%s: alloc rx temp buf failed\n", __func__);
1749                goto fail;
1750        }
1751
1752        init_completion(&msm_host->dma_comp);
1753        init_completion(&msm_host->video_comp);
1754        mutex_init(&msm_host->dev_mutex);
1755        mutex_init(&msm_host->cmd_mutex);
1756        mutex_init(&msm_host->clk_mutex);
1757        spin_lock_init(&msm_host->intr_lock);
1758
1759        /* setup workqueue */
1760        msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1761        INIT_WORK(&msm_host->err_work, dsi_err_worker);
1762        INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1763
1764        msm_dsi->host = &msm_host->base;
1765        msm_dsi->id = msm_host->id;
1766
1767        DBG("Dsi Host %d initialized", msm_host->id);
1768        return 0;
1769
1770fail:
1771        return ret;
1772}
1773
1774void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1775{
1776        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1777
1778        DBG("");
1779        dsi_tx_buf_free(msm_host);
1780        if (msm_host->workqueue) {
1781                flush_workqueue(msm_host->workqueue);
1782                destroy_workqueue(msm_host->workqueue);
1783                msm_host->workqueue = NULL;
1784        }
1785
1786        mutex_destroy(&msm_host->clk_mutex);
1787        mutex_destroy(&msm_host->cmd_mutex);
1788        mutex_destroy(&msm_host->dev_mutex);
1789}
1790
1791int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1792                                        struct drm_device *dev)
1793{
1794        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1795        struct platform_device *pdev = msm_host->pdev;
1796        int ret;
1797
1798        msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1799        if (msm_host->irq < 0) {
1800                ret = msm_host->irq;
1801                dev_err(dev->dev, "failed to get irq: %d\n", ret);
1802                return ret;
1803        }
1804
1805        ret = devm_request_irq(&pdev->dev, msm_host->irq,
1806                        dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1807                        "dsi_isr", msm_host);
1808        if (ret < 0) {
1809                dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1810                                msm_host->irq, ret);
1811                return ret;
1812        }
1813
1814        msm_host->dev = dev;
1815        ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1816        if (ret) {
1817                pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1818                return ret;
1819        }
1820
1821        return 0;
1822}
1823
1824int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1825{
1826        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1827        int ret;
1828
1829        /* Register mipi dsi host */
1830        if (!msm_host->registered) {
1831                host->dev = &msm_host->pdev->dev;
1832                host->ops = &dsi_host_ops;
1833                ret = mipi_dsi_host_register(host);
1834                if (ret)
1835                        return ret;
1836
1837                msm_host->registered = true;
1838
1839                /* If the panel driver has not been probed after host register,
1840                 * we should defer the host's probe.
1841                 * It makes sure panel is connected when fbcon detects
1842                 * connector status and gets the proper display mode to
1843                 * create framebuffer.
1844                 * Don't try to defer if there is nothing connected to the dsi
1845                 * output
1846                 */
1847                if (check_defer && msm_host->device_node) {
1848                        if (!of_drm_find_panel(msm_host->device_node))
1849                                if (!of_drm_find_bridge(msm_host->device_node))
1850                                        return -EPROBE_DEFER;
1851                }
1852        }
1853
1854        return 0;
1855}
1856
1857void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1858{
1859        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1860
1861        if (msm_host->registered) {
1862                mipi_dsi_host_unregister(host);
1863                host->dev = NULL;
1864                host->ops = NULL;
1865                msm_host->registered = false;
1866        }
1867}
1868
1869int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1870                                const struct mipi_dsi_msg *msg)
1871{
1872        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1873
1874        /* TODO: make sure dsi_cmd_mdp is idle.
1875         * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1876         * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1877         * How to handle the old versions? Wait for mdp cmd done?
1878         */
1879
1880        /*
1881         * mdss interrupt is generated in mdp core clock domain
1882         * mdp clock need to be enabled to receive dsi interrupt
1883         */
1884        dsi_clk_ctrl(msm_host, 1);
1885
1886        /* TODO: vote for bus bandwidth */
1887
1888        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1889                dsi_set_tx_power_mode(0, msm_host);
1890
1891        msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1892        dsi_write(msm_host, REG_DSI_CTRL,
1893                msm_host->dma_cmd_ctrl_restore |
1894                DSI_CTRL_CMD_MODE_EN |
1895                DSI_CTRL_ENABLE);
1896        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1897
1898        return 0;
1899}
1900
1901void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1902                                const struct mipi_dsi_msg *msg)
1903{
1904        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1905
1906        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1907        dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1908
1909        if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1910                dsi_set_tx_power_mode(1, msm_host);
1911
1912        /* TODO: unvote for bus bandwidth */
1913
1914        dsi_clk_ctrl(msm_host, 0);
1915}
1916
1917int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1918                                const struct mipi_dsi_msg *msg)
1919{
1920        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1921
1922        return dsi_cmds2buf_tx(msm_host, msg);
1923}
1924
1925int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1926                                const struct mipi_dsi_msg *msg)
1927{
1928        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1929        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1930        int data_byte, rx_byte, dlen, end;
1931        int short_response, diff, pkt_size, ret = 0;
1932        char cmd;
1933        int rlen = msg->rx_len;
1934        u8 *buf;
1935
1936        if (rlen <= 2) {
1937                short_response = 1;
1938                pkt_size = rlen;
1939                rx_byte = 4;
1940        } else {
1941                short_response = 0;
1942                data_byte = 10; /* first read */
1943                if (rlen < data_byte)
1944                        pkt_size = rlen;
1945                else
1946                        pkt_size = data_byte;
1947                rx_byte = data_byte + 6; /* 4 header + 2 crc */
1948        }
1949
1950        buf = msm_host->rx_buf;
1951        end = 0;
1952        while (!end) {
1953                u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1954                struct mipi_dsi_msg max_pkt_size_msg = {
1955                        .channel = msg->channel,
1956                        .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1957                        .tx_len = 2,
1958                        .tx_buf = tx,
1959                };
1960
1961                DBG("rlen=%d pkt_size=%d rx_byte=%d",
1962                        rlen, pkt_size, rx_byte);
1963
1964                ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1965                if (ret < 2) {
1966                        pr_err("%s: Set max pkt size failed, %d\n",
1967                                __func__, ret);
1968                        return -EINVAL;
1969                }
1970
1971                if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
1972                        (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1973                        /* Clear the RDBK_DATA registers */
1974                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1975                                        DSI_RDBK_DATA_CTRL_CLR);
1976                        wmb(); /* make sure the RDBK registers are cleared */
1977                        dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1978                        wmb(); /* release cleared status before transfer */
1979                }
1980
1981                ret = dsi_cmds2buf_tx(msm_host, msg);
1982                if (ret < msg->tx_len) {
1983                        pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1984                        return ret;
1985                }
1986
1987                /*
1988                 * once cmd_dma_done interrupt received,
1989                 * return data from client is ready and stored
1990                 * at RDBK_DATA register already
1991                 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1992                 * after that dcs header lost during shift into registers
1993                 */
1994                dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
1995
1996                if (dlen <= 0)
1997                        return 0;
1998
1999                if (short_response)
2000                        break;
2001
2002                if (rlen <= data_byte) {
2003                        diff = data_byte - rlen;
2004                        end = 1;
2005                } else {
2006                        diff = 0;
2007                        rlen -= data_byte;
2008                }
2009
2010                if (!end) {
2011                        dlen -= 2; /* 2 crc */
2012                        dlen -= diff;
2013                        buf += dlen;    /* next start position */
2014                        data_byte = 14; /* NOT first read */
2015                        if (rlen < data_byte)
2016                                pkt_size += rlen;
2017                        else
2018                                pkt_size += data_byte;
2019                        DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
2020                }
2021        }
2022
2023        /*
2024         * For single Long read, if the requested rlen < 10,
2025         * we need to shift the start position of rx
2026         * data buffer to skip the bytes which are not
2027         * updated.
2028         */
2029        if (pkt_size < 10 && !short_response)
2030                buf = msm_host->rx_buf + (10 - rlen);
2031        else
2032                buf = msm_host->rx_buf;
2033
2034        cmd = buf[0];
2035        switch (cmd) {
2036        case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
2037                pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
2038                ret = 0;
2039                break;
2040        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
2041        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
2042                ret = dsi_short_read1_resp(buf, msg);
2043                break;
2044        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
2045        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
2046                ret = dsi_short_read2_resp(buf, msg);
2047                break;
2048        case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
2049        case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
2050                ret = dsi_long_read_resp(buf, msg);
2051                break;
2052        default:
2053                pr_warn("%s:Invalid response cmd\n", __func__);
2054                ret = 0;
2055        }
2056
2057        return ret;
2058}
2059
2060void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
2061                                  u32 len)
2062{
2063        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2064
2065        dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
2066        dsi_write(msm_host, REG_DSI_DMA_LEN, len);
2067        dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
2068
2069        /* Make sure trigger happens */
2070        wmb();
2071}
2072
2073int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
2074        struct msm_dsi_pll *src_pll)
2075{
2076        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2077        const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2078        struct clk *byte_clk_provider, *pixel_clk_provider;
2079        int ret;
2080
2081        ret = msm_dsi_pll_get_clk_provider(src_pll,
2082                                &byte_clk_provider, &pixel_clk_provider);
2083        if (ret) {
2084                pr_info("%s: can't get provider from pll, don't set parent\n",
2085                        __func__);
2086                return 0;
2087        }
2088
2089        ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
2090        if (ret) {
2091                pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
2092                        __func__, ret);
2093                goto exit;
2094        }
2095
2096        ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
2097        if (ret) {
2098                pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
2099                        __func__, ret);
2100                goto exit;
2101        }
2102
2103        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
2104                ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
2105                if (ret) {
2106                        pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2107                                __func__, ret);
2108                        goto exit;
2109                }
2110
2111                ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2112                if (ret) {
2113                        pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2114                                __func__, ret);
2115                        goto exit;
2116                }
2117        }
2118
2119exit:
2120        return ret;
2121}
2122
2123void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
2124{
2125        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2126
2127        DBG("");
2128        dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
2129        /* Make sure fully reset */
2130        wmb();
2131        udelay(1000);
2132        dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
2133        udelay(100);
2134}
2135
2136void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2137        struct msm_dsi_phy_clk_request *clk_req)
2138{
2139        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2140        int ret;
2141
2142        ret = dsi_calc_clk_rate(msm_host);
2143        if (ret) {
2144                pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2145                return;
2146        }
2147
2148        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2149        clk_req->escclk_rate = msm_host->esc_clk_rate;
2150}
2151
2152int msm_dsi_host_enable(struct mipi_dsi_host *host)
2153{
2154        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2155
2156        dsi_op_mode_config(msm_host,
2157                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
2158
2159        /* TODO: clock should be turned off for command mode,
2160         * and only turned on before MDP START.
2161         * This part of code should be enabled once mdp driver support it.
2162         */
2163        /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
2164                dsi_clk_ctrl(msm_host, 0); */
2165
2166        return 0;
2167}
2168
2169int msm_dsi_host_disable(struct mipi_dsi_host *host)
2170{
2171        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2172
2173        dsi_op_mode_config(msm_host,
2174                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2175
2176        /* Since we have disabled INTF, the video engine won't stop so that
2177         * the cmd engine will be blocked.
2178         * Reset to disable video engine so that we can send off cmd.
2179         */
2180        dsi_sw_reset(msm_host);
2181
2182        return 0;
2183}
2184
2185static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2186{
2187        enum sfpb_ahb_arb_master_port_en en;
2188
2189        if (!msm_host->sfpb)
2190                return;
2191
2192        en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2193
2194        regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2195                        SFPB_GPREG_MASTER_PORT_EN__MASK,
2196                        SFPB_GPREG_MASTER_PORT_EN(en));
2197}
2198
2199int msm_dsi_host_power_on(struct mipi_dsi_host *host,
2200                        struct msm_dsi_phy_shared_timings *phy_shared_timings)
2201{
2202        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2203        int ret = 0;
2204
2205        mutex_lock(&msm_host->dev_mutex);
2206        if (msm_host->power_on) {
2207                DBG("dsi host already on");
2208                goto unlock_ret;
2209        }
2210
2211        msm_dsi_sfpb_config(msm_host, true);
2212
2213        ret = dsi_host_regulator_enable(msm_host);
2214        if (ret) {
2215                pr_err("%s:Failed to enable vregs.ret=%d\n",
2216                        __func__, ret);
2217                goto unlock_ret;
2218        }
2219
2220        ret = dsi_clk_ctrl(msm_host, 1);
2221        if (ret) {
2222                pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
2223                goto fail_disable_reg;
2224        }
2225
2226        ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2227        if (ret) {
2228                pr_err("%s: failed to set pinctrl default state, %d\n",
2229                        __func__, ret);
2230                goto fail_disable_clk;
2231        }
2232
2233        dsi_timing_setup(msm_host);
2234        dsi_sw_reset(msm_host);
2235        dsi_ctrl_config(msm_host, true, phy_shared_timings);
2236
2237        if (msm_host->disp_en_gpio)
2238                gpiod_set_value(msm_host->disp_en_gpio, 1);
2239
2240        msm_host->power_on = true;
2241        mutex_unlock(&msm_host->dev_mutex);
2242
2243        return 0;
2244
2245fail_disable_clk:
2246        dsi_clk_ctrl(msm_host, 0);
2247fail_disable_reg:
2248        dsi_host_regulator_disable(msm_host);
2249unlock_ret:
2250        mutex_unlock(&msm_host->dev_mutex);
2251        return ret;
2252}
2253
2254int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2255{
2256        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2257
2258        mutex_lock(&msm_host->dev_mutex);
2259        if (!msm_host->power_on) {
2260                DBG("dsi host already off");
2261                goto unlock_ret;
2262        }
2263
2264        dsi_ctrl_config(msm_host, false, NULL);
2265
2266        if (msm_host->disp_en_gpio)
2267                gpiod_set_value(msm_host->disp_en_gpio, 0);
2268
2269        pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2270
2271        dsi_clk_ctrl(msm_host, 0);
2272
2273        dsi_host_regulator_disable(msm_host);
2274
2275        msm_dsi_sfpb_config(msm_host, false);
2276
2277        DBG("-");
2278
2279        msm_host->power_on = false;
2280
2281unlock_ret:
2282        mutex_unlock(&msm_host->dev_mutex);
2283        return 0;
2284}
2285
2286int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2287                                        struct drm_display_mode *mode)
2288{
2289        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2290
2291        if (msm_host->mode) {
2292                drm_mode_destroy(msm_host->dev, msm_host->mode);
2293                msm_host->mode = NULL;
2294        }
2295
2296        msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2297        if (!msm_host->mode) {
2298                pr_err("%s: cannot duplicate mode\n", __func__);
2299                return -ENOMEM;
2300        }
2301
2302        return 0;
2303}
2304
2305struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
2306                                unsigned long *panel_flags)
2307{
2308        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2309        struct drm_panel *panel;
2310
2311        panel = of_drm_find_panel(msm_host->device_node);
2312        if (panel_flags)
2313                        *panel_flags = msm_host->mode_flags;
2314
2315        return panel;
2316}
2317
2318struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2319{
2320        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2321
2322        return of_drm_find_bridge(msm_host->device_node);
2323}
2324