linux/drivers/gpu/drm/omapdrm/dss/dsi.c
<<
>>
Prefs
   1/*
   2 * linux/drivers/video/omap2/dss/dsi.c
   3 *
   4 * Copyright (C) 2009 Nokia Corporation
   5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published by
   9 * the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#define DSS_SUBSYS_NAME "DSI"
  21
  22#include <linux/kernel.h>
  23#include <linux/io.h>
  24#include <linux/clk.h>
  25#include <linux/device.h>
  26#include <linux/err.h>
  27#include <linux/interrupt.h>
  28#include <linux/delay.h>
  29#include <linux/mutex.h>
  30#include <linux/module.h>
  31#include <linux/semaphore.h>
  32#include <linux/seq_file.h>
  33#include <linux/platform_device.h>
  34#include <linux/regulator/consumer.h>
  35#include <linux/wait.h>
  36#include <linux/workqueue.h>
  37#include <linux/sched.h>
  38#include <linux/slab.h>
  39#include <linux/debugfs.h>
  40#include <linux/pm_runtime.h>
  41#include <linux/of.h>
  42#include <linux/of_platform.h>
  43#include <linux/component.h>
  44
  45#include <video/omapdss.h>
  46#include <video/mipi_display.h>
  47
  48#include "dss.h"
  49#include "dss_features.h"
  50
  51#define DSI_CATCH_MISSING_TE
  52
  53struct dsi_reg { u16 module; u16 idx; };
  54
  55#define DSI_REG(mod, idx)               ((const struct dsi_reg) { mod, idx })
  56
  57/* DSI Protocol Engine */
  58
  59#define DSI_PROTO                       0
  60#define DSI_PROTO_SZ                    0x200
  61
  62#define DSI_REVISION                    DSI_REG(DSI_PROTO, 0x0000)
  63#define DSI_SYSCONFIG                   DSI_REG(DSI_PROTO, 0x0010)
  64#define DSI_SYSSTATUS                   DSI_REG(DSI_PROTO, 0x0014)
  65#define DSI_IRQSTATUS                   DSI_REG(DSI_PROTO, 0x0018)
  66#define DSI_IRQENABLE                   DSI_REG(DSI_PROTO, 0x001C)
  67#define DSI_CTRL                        DSI_REG(DSI_PROTO, 0x0040)
  68#define DSI_GNQ                         DSI_REG(DSI_PROTO, 0x0044)
  69#define DSI_COMPLEXIO_CFG1              DSI_REG(DSI_PROTO, 0x0048)
  70#define DSI_COMPLEXIO_IRQ_STATUS        DSI_REG(DSI_PROTO, 0x004C)
  71#define DSI_COMPLEXIO_IRQ_ENABLE        DSI_REG(DSI_PROTO, 0x0050)
  72#define DSI_CLK_CTRL                    DSI_REG(DSI_PROTO, 0x0054)
  73#define DSI_TIMING1                     DSI_REG(DSI_PROTO, 0x0058)
  74#define DSI_TIMING2                     DSI_REG(DSI_PROTO, 0x005C)
  75#define DSI_VM_TIMING1                  DSI_REG(DSI_PROTO, 0x0060)
  76#define DSI_VM_TIMING2                  DSI_REG(DSI_PROTO, 0x0064)
  77#define DSI_VM_TIMING3                  DSI_REG(DSI_PROTO, 0x0068)
  78#define DSI_CLK_TIMING                  DSI_REG(DSI_PROTO, 0x006C)
  79#define DSI_TX_FIFO_VC_SIZE             DSI_REG(DSI_PROTO, 0x0070)
  80#define DSI_RX_FIFO_VC_SIZE             DSI_REG(DSI_PROTO, 0x0074)
  81#define DSI_COMPLEXIO_CFG2              DSI_REG(DSI_PROTO, 0x0078)
  82#define DSI_RX_FIFO_VC_FULLNESS         DSI_REG(DSI_PROTO, 0x007C)
  83#define DSI_VM_TIMING4                  DSI_REG(DSI_PROTO, 0x0080)
  84#define DSI_TX_FIFO_VC_EMPTINESS        DSI_REG(DSI_PROTO, 0x0084)
  85#define DSI_VM_TIMING5                  DSI_REG(DSI_PROTO, 0x0088)
  86#define DSI_VM_TIMING6                  DSI_REG(DSI_PROTO, 0x008C)
  87#define DSI_VM_TIMING7                  DSI_REG(DSI_PROTO, 0x0090)
  88#define DSI_STOPCLK_TIMING              DSI_REG(DSI_PROTO, 0x0094)
  89#define DSI_VC_CTRL(n)                  DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
  90#define DSI_VC_TE(n)                    DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
  91#define DSI_VC_LONG_PACKET_HEADER(n)    DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
  92#define DSI_VC_LONG_PACKET_PAYLOAD(n)   DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
  93#define DSI_VC_SHORT_PACKET_HEADER(n)   DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
  94#define DSI_VC_IRQSTATUS(n)             DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
  95#define DSI_VC_IRQENABLE(n)             DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
  96
  97/* DSIPHY_SCP */
  98
  99#define DSI_PHY                         1
 100#define DSI_PHY_OFFSET                  0x200
 101#define DSI_PHY_SZ                      0x40
 102
 103#define DSI_DSIPHY_CFG0                 DSI_REG(DSI_PHY, 0x0000)
 104#define DSI_DSIPHY_CFG1                 DSI_REG(DSI_PHY, 0x0004)
 105#define DSI_DSIPHY_CFG2                 DSI_REG(DSI_PHY, 0x0008)
 106#define DSI_DSIPHY_CFG5                 DSI_REG(DSI_PHY, 0x0014)
 107#define DSI_DSIPHY_CFG10                DSI_REG(DSI_PHY, 0x0028)
 108
 109/* DSI_PLL_CTRL_SCP */
 110
 111#define DSI_PLL                         2
 112#define DSI_PLL_OFFSET                  0x300
 113#define DSI_PLL_SZ                      0x20
 114
 115#define DSI_PLL_CONTROL                 DSI_REG(DSI_PLL, 0x0000)
 116#define DSI_PLL_STATUS                  DSI_REG(DSI_PLL, 0x0004)
 117#define DSI_PLL_GO                      DSI_REG(DSI_PLL, 0x0008)
 118#define DSI_PLL_CONFIGURATION1          DSI_REG(DSI_PLL, 0x000C)
 119#define DSI_PLL_CONFIGURATION2          DSI_REG(DSI_PLL, 0x0010)
 120
 121#define REG_GET(dsidev, idx, start, end) \
 122        FLD_GET(dsi_read_reg(dsidev, idx), start, end)
 123
 124#define REG_FLD_MOD(dsidev, idx, val, start, end) \
 125        dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
 126
 127/* Global interrupts */
 128#define DSI_IRQ_VC0             (1 << 0)
 129#define DSI_IRQ_VC1             (1 << 1)
 130#define DSI_IRQ_VC2             (1 << 2)
 131#define DSI_IRQ_VC3             (1 << 3)
 132#define DSI_IRQ_WAKEUP          (1 << 4)
 133#define DSI_IRQ_RESYNC          (1 << 5)
 134#define DSI_IRQ_PLL_LOCK        (1 << 7)
 135#define DSI_IRQ_PLL_UNLOCK      (1 << 8)
 136#define DSI_IRQ_PLL_RECALL      (1 << 9)
 137#define DSI_IRQ_COMPLEXIO_ERR   (1 << 10)
 138#define DSI_IRQ_HS_TX_TIMEOUT   (1 << 14)
 139#define DSI_IRQ_LP_RX_TIMEOUT   (1 << 15)
 140#define DSI_IRQ_TE_TRIGGER      (1 << 16)
 141#define DSI_IRQ_ACK_TRIGGER     (1 << 17)
 142#define DSI_IRQ_SYNC_LOST       (1 << 18)
 143#define DSI_IRQ_LDO_POWER_GOOD  (1 << 19)
 144#define DSI_IRQ_TA_TIMEOUT      (1 << 20)
 145#define DSI_IRQ_ERROR_MASK \
 146        (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
 147        DSI_IRQ_TA_TIMEOUT)
 148#define DSI_IRQ_CHANNEL_MASK    0xf
 149
 150/* Virtual channel interrupts */
 151#define DSI_VC_IRQ_CS           (1 << 0)
 152#define DSI_VC_IRQ_ECC_CORR     (1 << 1)
 153#define DSI_VC_IRQ_PACKET_SENT  (1 << 2)
 154#define DSI_VC_IRQ_FIFO_TX_OVF  (1 << 3)
 155#define DSI_VC_IRQ_FIFO_RX_OVF  (1 << 4)
 156#define DSI_VC_IRQ_BTA          (1 << 5)
 157#define DSI_VC_IRQ_ECC_NO_CORR  (1 << 6)
 158#define DSI_VC_IRQ_FIFO_TX_UDF  (1 << 7)
 159#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
 160#define DSI_VC_IRQ_ERROR_MASK \
 161        (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
 162        DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
 163        DSI_VC_IRQ_FIFO_TX_UDF)
 164
 165/* ComplexIO interrupts */
 166#define DSI_CIO_IRQ_ERRSYNCESC1         (1 << 0)
 167#define DSI_CIO_IRQ_ERRSYNCESC2         (1 << 1)
 168#define DSI_CIO_IRQ_ERRSYNCESC3         (1 << 2)
 169#define DSI_CIO_IRQ_ERRSYNCESC4         (1 << 3)
 170#define DSI_CIO_IRQ_ERRSYNCESC5         (1 << 4)
 171#define DSI_CIO_IRQ_ERRESC1             (1 << 5)
 172#define DSI_CIO_IRQ_ERRESC2             (1 << 6)
 173#define DSI_CIO_IRQ_ERRESC3             (1 << 7)
 174#define DSI_CIO_IRQ_ERRESC4             (1 << 8)
 175#define DSI_CIO_IRQ_ERRESC5             (1 << 9)
 176#define DSI_CIO_IRQ_ERRCONTROL1         (1 << 10)
 177#define DSI_CIO_IRQ_ERRCONTROL2         (1 << 11)
 178#define DSI_CIO_IRQ_ERRCONTROL3         (1 << 12)
 179#define DSI_CIO_IRQ_ERRCONTROL4         (1 << 13)
 180#define DSI_CIO_IRQ_ERRCONTROL5         (1 << 14)
 181#define DSI_CIO_IRQ_STATEULPS1          (1 << 15)
 182#define DSI_CIO_IRQ_STATEULPS2          (1 << 16)
 183#define DSI_CIO_IRQ_STATEULPS3          (1 << 17)
 184#define DSI_CIO_IRQ_STATEULPS4          (1 << 18)
 185#define DSI_CIO_IRQ_STATEULPS5          (1 << 19)
 186#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1  (1 << 20)
 187#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1  (1 << 21)
 188#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2  (1 << 22)
 189#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2  (1 << 23)
 190#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3  (1 << 24)
 191#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3  (1 << 25)
 192#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4  (1 << 26)
 193#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4  (1 << 27)
 194#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5  (1 << 28)
 195#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5  (1 << 29)
 196#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0  (1 << 30)
 197#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1  (1 << 31)
 198#define DSI_CIO_IRQ_ERROR_MASK \
 199        (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
 200         DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
 201         DSI_CIO_IRQ_ERRSYNCESC5 | \
 202         DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
 203         DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
 204         DSI_CIO_IRQ_ERRESC5 | \
 205         DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
 206         DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
 207         DSI_CIO_IRQ_ERRCONTROL5 | \
 208         DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
 209         DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
 210         DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
 211         DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
 212         DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
 213
 214typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
 215
 216static int dsi_display_init_dispc(struct platform_device *dsidev,
 217        enum omap_channel channel);
 218static void dsi_display_uninit_dispc(struct platform_device *dsidev,
 219        enum omap_channel channel);
 220
 221static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
 222
 223/* DSI PLL HSDIV indices */
 224#define HSDIV_DISPC     0
 225#define HSDIV_DSI       1
 226
 227#define DSI_MAX_NR_ISRS                2
 228#define DSI_MAX_NR_LANES        5
 229
 230enum dsi_lane_function {
 231        DSI_LANE_UNUSED = 0,
 232        DSI_LANE_CLK,
 233        DSI_LANE_DATA1,
 234        DSI_LANE_DATA2,
 235        DSI_LANE_DATA3,
 236        DSI_LANE_DATA4,
 237};
 238
 239struct dsi_lane_config {
 240        enum dsi_lane_function function;
 241        u8 polarity;
 242};
 243
 244struct dsi_isr_data {
 245        omap_dsi_isr_t  isr;
 246        void            *arg;
 247        u32             mask;
 248};
 249
 250enum fifo_size {
 251        DSI_FIFO_SIZE_0         = 0,
 252        DSI_FIFO_SIZE_32        = 1,
 253        DSI_FIFO_SIZE_64        = 2,
 254        DSI_FIFO_SIZE_96        = 3,
 255        DSI_FIFO_SIZE_128       = 4,
 256};
 257
 258enum dsi_vc_source {
 259        DSI_VC_SOURCE_L4 = 0,
 260        DSI_VC_SOURCE_VP,
 261};
 262
 263struct dsi_irq_stats {
 264        unsigned long last_reset;
 265        unsigned irq_count;
 266        unsigned dsi_irqs[32];
 267        unsigned vc_irqs[4][32];
 268        unsigned cio_irqs[32];
 269};
 270
 271struct dsi_isr_tables {
 272        struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
 273        struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
 274        struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
 275};
 276
 277struct dsi_clk_calc_ctx {
 278        struct platform_device *dsidev;
 279        struct dss_pll *pll;
 280
 281        /* inputs */
 282
 283        const struct omap_dss_dsi_config *config;
 284
 285        unsigned long req_pck_min, req_pck_nom, req_pck_max;
 286
 287        /* outputs */
 288
 289        struct dss_pll_clock_info dsi_cinfo;
 290        struct dispc_clock_info dispc_cinfo;
 291
 292        struct omap_video_timings dispc_vm;
 293        struct omap_dss_dsi_videomode_timings dsi_vm;
 294};
 295
 296struct dsi_lp_clock_info {
 297        unsigned long lp_clk;
 298        u16 lp_clk_div;
 299};
 300
 301struct dsi_data {
 302        struct platform_device *pdev;
 303        void __iomem *proto_base;
 304        void __iomem *phy_base;
 305        void __iomem *pll_base;
 306
 307        int module_id;
 308
 309        int irq;
 310
 311        bool is_enabled;
 312
 313        struct clk *dss_clk;
 314
 315        struct dispc_clock_info user_dispc_cinfo;
 316        struct dss_pll_clock_info user_dsi_cinfo;
 317
 318        struct dsi_lp_clock_info user_lp_cinfo;
 319        struct dsi_lp_clock_info current_lp_cinfo;
 320
 321        struct dss_pll pll;
 322
 323        bool vdds_dsi_enabled;
 324        struct regulator *vdds_dsi_reg;
 325
 326        struct {
 327                enum dsi_vc_source source;
 328                struct omap_dss_device *dssdev;
 329                enum fifo_size tx_fifo_size;
 330                enum fifo_size rx_fifo_size;
 331                int vc_id;
 332        } vc[4];
 333
 334        struct mutex lock;
 335        struct semaphore bus_lock;
 336
 337        spinlock_t irq_lock;
 338        struct dsi_isr_tables isr_tables;
 339        /* space for a copy used by the interrupt handler */
 340        struct dsi_isr_tables isr_tables_copy;
 341
 342        int update_channel;
 343#ifdef DSI_PERF_MEASURE
 344        unsigned update_bytes;
 345#endif
 346
 347        bool te_enabled;
 348        bool ulps_enabled;
 349
 350        void (*framedone_callback)(int, void *);
 351        void *framedone_data;
 352
 353        struct delayed_work framedone_timeout_work;
 354
 355#ifdef DSI_CATCH_MISSING_TE
 356        struct timer_list te_timer;
 357#endif
 358
 359        unsigned long cache_req_pck;
 360        unsigned long cache_clk_freq;
 361        struct dss_pll_clock_info cache_cinfo;
 362
 363        u32             errors;
 364        spinlock_t      errors_lock;
 365#ifdef DSI_PERF_MEASURE
 366        ktime_t perf_setup_time;
 367        ktime_t perf_start_time;
 368#endif
 369        int debug_read;
 370        int debug_write;
 371
 372#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 373        spinlock_t irq_stats_lock;
 374        struct dsi_irq_stats irq_stats;
 375#endif
 376
 377        unsigned num_lanes_supported;
 378        unsigned line_buffer_size;
 379
 380        struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
 381        unsigned num_lanes_used;
 382
 383        unsigned scp_clk_refcount;
 384
 385        struct dss_lcd_mgr_config mgr_config;
 386        struct omap_video_timings timings;
 387        enum omap_dss_dsi_pixel_format pix_fmt;
 388        enum omap_dss_dsi_mode mode;
 389        struct omap_dss_dsi_videomode_timings vm_timings;
 390
 391        struct omap_dss_device output;
 392};
 393
 394struct dsi_packet_sent_handler_data {
 395        struct platform_device *dsidev;
 396        struct completion *completion;
 397};
 398
 399struct dsi_module_id_data {
 400        u32 address;
 401        int id;
 402};
 403
 404static const struct of_device_id dsi_of_match[];
 405
 406#ifdef DSI_PERF_MEASURE
 407static bool dsi_perf;
 408module_param(dsi_perf, bool, 0644);
 409#endif
 410
 411static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
 412{
 413        return dev_get_drvdata(&dsidev->dev);
 414}
 415
 416static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
 417{
 418        return to_platform_device(dssdev->dev);
 419}
 420
 421static struct platform_device *dsi_get_dsidev_from_id(int module)
 422{
 423        struct omap_dss_device *out;
 424        enum omap_dss_output_id id;
 425
 426        switch (module) {
 427        case 0:
 428                id = OMAP_DSS_OUTPUT_DSI1;
 429                break;
 430        case 1:
 431                id = OMAP_DSS_OUTPUT_DSI2;
 432                break;
 433        default:
 434                return NULL;
 435        }
 436
 437        out = omap_dss_get_output(id);
 438
 439        return out ? to_platform_device(out->dev) : NULL;
 440}
 441
 442static inline void dsi_write_reg(struct platform_device *dsidev,
 443                const struct dsi_reg idx, u32 val)
 444{
 445        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 446        void __iomem *base;
 447
 448        switch(idx.module) {
 449                case DSI_PROTO: base = dsi->proto_base; break;
 450                case DSI_PHY: base = dsi->phy_base; break;
 451                case DSI_PLL: base = dsi->pll_base; break;
 452                default: return;
 453        }
 454
 455        __raw_writel(val, base + idx.idx);
 456}
 457
 458static inline u32 dsi_read_reg(struct platform_device *dsidev,
 459                const struct dsi_reg idx)
 460{
 461        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 462        void __iomem *base;
 463
 464        switch(idx.module) {
 465                case DSI_PROTO: base = dsi->proto_base; break;
 466                case DSI_PHY: base = dsi->phy_base; break;
 467                case DSI_PLL: base = dsi->pll_base; break;
 468                default: return 0;
 469        }
 470
 471        return __raw_readl(base + idx.idx);
 472}
 473
 474static void dsi_bus_lock(struct omap_dss_device *dssdev)
 475{
 476        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 477        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 478
 479        down(&dsi->bus_lock);
 480}
 481
 482static void dsi_bus_unlock(struct omap_dss_device *dssdev)
 483{
 484        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 485        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 486
 487        up(&dsi->bus_lock);
 488}
 489
 490static bool dsi_bus_is_locked(struct platform_device *dsidev)
 491{
 492        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 493
 494        return dsi->bus_lock.count == 0;
 495}
 496
 497static void dsi_completion_handler(void *data, u32 mask)
 498{
 499        complete((struct completion *)data);
 500}
 501
 502static inline int wait_for_bit_change(struct platform_device *dsidev,
 503                const struct dsi_reg idx, int bitnum, int value)
 504{
 505        unsigned long timeout;
 506        ktime_t wait;
 507        int t;
 508
 509        /* first busyloop to see if the bit changes right away */
 510        t = 100;
 511        while (t-- > 0) {
 512                if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
 513                        return value;
 514        }
 515
 516        /* then loop for 500ms, sleeping for 1ms in between */
 517        timeout = jiffies + msecs_to_jiffies(500);
 518        while (time_before(jiffies, timeout)) {
 519                if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
 520                        return value;
 521
 522                wait = ns_to_ktime(1000 * 1000);
 523                set_current_state(TASK_UNINTERRUPTIBLE);
 524                schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
 525        }
 526
 527        return !value;
 528}
 529
 530u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
 531{
 532        switch (fmt) {
 533        case OMAP_DSS_DSI_FMT_RGB888:
 534        case OMAP_DSS_DSI_FMT_RGB666:
 535                return 24;
 536        case OMAP_DSS_DSI_FMT_RGB666_PACKED:
 537                return 18;
 538        case OMAP_DSS_DSI_FMT_RGB565:
 539                return 16;
 540        default:
 541                BUG();
 542                return 0;
 543        }
 544}
 545
 546#ifdef DSI_PERF_MEASURE
 547static void dsi_perf_mark_setup(struct platform_device *dsidev)
 548{
 549        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 550        dsi->perf_setup_time = ktime_get();
 551}
 552
 553static void dsi_perf_mark_start(struct platform_device *dsidev)
 554{
 555        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 556        dsi->perf_start_time = ktime_get();
 557}
 558
 559static void dsi_perf_show(struct platform_device *dsidev, const char *name)
 560{
 561        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 562        ktime_t t, setup_time, trans_time;
 563        u32 total_bytes;
 564        u32 setup_us, trans_us, total_us;
 565
 566        if (!dsi_perf)
 567                return;
 568
 569        t = ktime_get();
 570
 571        setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
 572        setup_us = (u32)ktime_to_us(setup_time);
 573        if (setup_us == 0)
 574                setup_us = 1;
 575
 576        trans_time = ktime_sub(t, dsi->perf_start_time);
 577        trans_us = (u32)ktime_to_us(trans_time);
 578        if (trans_us == 0)
 579                trans_us = 1;
 580
 581        total_us = setup_us + trans_us;
 582
 583        total_bytes = dsi->update_bytes;
 584
 585        printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
 586                        "%u bytes, %u kbytes/sec\n",
 587                        name,
 588                        setup_us,
 589                        trans_us,
 590                        total_us,
 591                        1000*1000 / total_us,
 592                        total_bytes,
 593                        total_bytes * 1000 / total_us);
 594}
 595#else
 596static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
 597{
 598}
 599
 600static inline void dsi_perf_mark_start(struct platform_device *dsidev)
 601{
 602}
 603
 604static inline void dsi_perf_show(struct platform_device *dsidev,
 605                const char *name)
 606{
 607}
 608#endif
 609
 610static int verbose_irq;
 611
 612static void print_irq_status(u32 status)
 613{
 614        if (status == 0)
 615                return;
 616
 617        if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
 618                return;
 619
 620#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""
 621
 622        pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
 623                status,
 624                verbose_irq ? PIS(VC0) : "",
 625                verbose_irq ? PIS(VC1) : "",
 626                verbose_irq ? PIS(VC2) : "",
 627                verbose_irq ? PIS(VC3) : "",
 628                PIS(WAKEUP),
 629                PIS(RESYNC),
 630                PIS(PLL_LOCK),
 631                PIS(PLL_UNLOCK),
 632                PIS(PLL_RECALL),
 633                PIS(COMPLEXIO_ERR),
 634                PIS(HS_TX_TIMEOUT),
 635                PIS(LP_RX_TIMEOUT),
 636                PIS(TE_TRIGGER),
 637                PIS(ACK_TRIGGER),
 638                PIS(SYNC_LOST),
 639                PIS(LDO_POWER_GOOD),
 640                PIS(TA_TIMEOUT));
 641#undef PIS
 642}
 643
 644static void print_irq_status_vc(int channel, u32 status)
 645{
 646        if (status == 0)
 647                return;
 648
 649        if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
 650                return;
 651
 652#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
 653
 654        pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
 655                channel,
 656                status,
 657                PIS(CS),
 658                PIS(ECC_CORR),
 659                PIS(ECC_NO_CORR),
 660                verbose_irq ? PIS(PACKET_SENT) : "",
 661                PIS(BTA),
 662                PIS(FIFO_TX_OVF),
 663                PIS(FIFO_RX_OVF),
 664                PIS(FIFO_TX_UDF),
 665                PIS(PP_BUSY_CHANGE));
 666#undef PIS
 667}
 668
 669static void print_irq_status_cio(u32 status)
 670{
 671        if (status == 0)
 672                return;
 673
 674#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""
 675
 676        pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
 677                status,
 678                PIS(ERRSYNCESC1),
 679                PIS(ERRSYNCESC2),
 680                PIS(ERRSYNCESC3),
 681                PIS(ERRESC1),
 682                PIS(ERRESC2),
 683                PIS(ERRESC3),
 684                PIS(ERRCONTROL1),
 685                PIS(ERRCONTROL2),
 686                PIS(ERRCONTROL3),
 687                PIS(STATEULPS1),
 688                PIS(STATEULPS2),
 689                PIS(STATEULPS3),
 690                PIS(ERRCONTENTIONLP0_1),
 691                PIS(ERRCONTENTIONLP1_1),
 692                PIS(ERRCONTENTIONLP0_2),
 693                PIS(ERRCONTENTIONLP1_2),
 694                PIS(ERRCONTENTIONLP0_3),
 695                PIS(ERRCONTENTIONLP1_3),
 696                PIS(ULPSACTIVENOT_ALL0),
 697                PIS(ULPSACTIVENOT_ALL1));
 698#undef PIS
 699}
 700
 701#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 702static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
 703                u32 *vcstatus, u32 ciostatus)
 704{
 705        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 706        int i;
 707
 708        spin_lock(&dsi->irq_stats_lock);
 709
 710        dsi->irq_stats.irq_count++;
 711        dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
 712
 713        for (i = 0; i < 4; ++i)
 714                dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
 715
 716        dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
 717
 718        spin_unlock(&dsi->irq_stats_lock);
 719}
 720#else
 721#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
 722#endif
 723
 724static int debug_irq;
 725
 726static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
 727                u32 *vcstatus, u32 ciostatus)
 728{
 729        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 730        int i;
 731
 732        if (irqstatus & DSI_IRQ_ERROR_MASK) {
 733                DSSERR("DSI error, irqstatus %x\n", irqstatus);
 734                print_irq_status(irqstatus);
 735                spin_lock(&dsi->errors_lock);
 736                dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
 737                spin_unlock(&dsi->errors_lock);
 738        } else if (debug_irq) {
 739                print_irq_status(irqstatus);
 740        }
 741
 742        for (i = 0; i < 4; ++i) {
 743                if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
 744                        DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
 745                                       i, vcstatus[i]);
 746                        print_irq_status_vc(i, vcstatus[i]);
 747                } else if (debug_irq) {
 748                        print_irq_status_vc(i, vcstatus[i]);
 749                }
 750        }
 751
 752        if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
 753                DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
 754                print_irq_status_cio(ciostatus);
 755        } else if (debug_irq) {
 756                print_irq_status_cio(ciostatus);
 757        }
 758}
 759
 760static void dsi_call_isrs(struct dsi_isr_data *isr_array,
 761                unsigned isr_array_size, u32 irqstatus)
 762{
 763        struct dsi_isr_data *isr_data;
 764        int i;
 765
 766        for (i = 0; i < isr_array_size; i++) {
 767                isr_data = &isr_array[i];
 768                if (isr_data->isr && isr_data->mask & irqstatus)
 769                        isr_data->isr(isr_data->arg, irqstatus);
 770        }
 771}
 772
 773static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
 774                u32 irqstatus, u32 *vcstatus, u32 ciostatus)
 775{
 776        int i;
 777
 778        dsi_call_isrs(isr_tables->isr_table,
 779                        ARRAY_SIZE(isr_tables->isr_table),
 780                        irqstatus);
 781
 782        for (i = 0; i < 4; ++i) {
 783                if (vcstatus[i] == 0)
 784                        continue;
 785                dsi_call_isrs(isr_tables->isr_table_vc[i],
 786                                ARRAY_SIZE(isr_tables->isr_table_vc[i]),
 787                                vcstatus[i]);
 788        }
 789
 790        if (ciostatus != 0)
 791                dsi_call_isrs(isr_tables->isr_table_cio,
 792                                ARRAY_SIZE(isr_tables->isr_table_cio),
 793                                ciostatus);
 794}
 795
 796static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
 797{
 798        struct platform_device *dsidev;
 799        struct dsi_data *dsi;
 800        u32 irqstatus, vcstatus[4], ciostatus;
 801        int i;
 802
 803        dsidev = (struct platform_device *) arg;
 804        dsi = dsi_get_dsidrv_data(dsidev);
 805
 806        if (!dsi->is_enabled)
 807                return IRQ_NONE;
 808
 809        spin_lock(&dsi->irq_lock);
 810
 811        irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
 812
 813        /* IRQ is not for us */
 814        if (!irqstatus) {
 815                spin_unlock(&dsi->irq_lock);
 816                return IRQ_NONE;
 817        }
 818
 819        dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
 820        /* flush posted write */
 821        dsi_read_reg(dsidev, DSI_IRQSTATUS);
 822
 823        for (i = 0; i < 4; ++i) {
 824                if ((irqstatus & (1 << i)) == 0) {
 825                        vcstatus[i] = 0;
 826                        continue;
 827                }
 828
 829                vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
 830
 831                dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
 832                /* flush posted write */
 833                dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
 834        }
 835
 836        if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
 837                ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
 838
 839                dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
 840                /* flush posted write */
 841                dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
 842        } else {
 843                ciostatus = 0;
 844        }
 845
 846#ifdef DSI_CATCH_MISSING_TE
 847        if (irqstatus & DSI_IRQ_TE_TRIGGER)
 848                del_timer(&dsi->te_timer);
 849#endif
 850
 851        /* make a copy and unlock, so that isrs can unregister
 852         * themselves */
 853        memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
 854                sizeof(dsi->isr_tables));
 855
 856        spin_unlock(&dsi->irq_lock);
 857
 858        dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
 859
 860        dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
 861
 862        dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
 863
 864        return IRQ_HANDLED;
 865}
 866
 867/* dsi->irq_lock has to be locked by the caller */
 868static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
 869                struct dsi_isr_data *isr_array,
 870                unsigned isr_array_size, u32 default_mask,
 871                const struct dsi_reg enable_reg,
 872                const struct dsi_reg status_reg)
 873{
 874        struct dsi_isr_data *isr_data;
 875        u32 mask;
 876        u32 old_mask;
 877        int i;
 878
 879        mask = default_mask;
 880
 881        for (i = 0; i < isr_array_size; i++) {
 882                isr_data = &isr_array[i];
 883
 884                if (isr_data->isr == NULL)
 885                        continue;
 886
 887                mask |= isr_data->mask;
 888        }
 889
 890        old_mask = dsi_read_reg(dsidev, enable_reg);
 891        /* clear the irqstatus for newly enabled irqs */
 892        dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
 893        dsi_write_reg(dsidev, enable_reg, mask);
 894
 895        /* flush posted writes */
 896        dsi_read_reg(dsidev, enable_reg);
 897        dsi_read_reg(dsidev, status_reg);
 898}
 899
 900/* dsi->irq_lock has to be locked by the caller */
 901static void _omap_dsi_set_irqs(struct platform_device *dsidev)
 902{
 903        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 904        u32 mask = DSI_IRQ_ERROR_MASK;
 905#ifdef DSI_CATCH_MISSING_TE
 906        mask |= DSI_IRQ_TE_TRIGGER;
 907#endif
 908        _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
 909                        ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
 910                        DSI_IRQENABLE, DSI_IRQSTATUS);
 911}
 912
 913/* dsi->irq_lock has to be locked by the caller */
 914static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
 915{
 916        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 917
 918        _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
 919                        ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
 920                        DSI_VC_IRQ_ERROR_MASK,
 921                        DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
 922}
 923
 924/* dsi->irq_lock has to be locked by the caller */
 925static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
 926{
 927        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 928
 929        _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
 930                        ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
 931                        DSI_CIO_IRQ_ERROR_MASK,
 932                        DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
 933}
 934
 935static void _dsi_initialize_irq(struct platform_device *dsidev)
 936{
 937        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 938        unsigned long flags;
 939        int vc;
 940
 941        spin_lock_irqsave(&dsi->irq_lock, flags);
 942
 943        memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
 944
 945        _omap_dsi_set_irqs(dsidev);
 946        for (vc = 0; vc < 4; ++vc)
 947                _omap_dsi_set_irqs_vc(dsidev, vc);
 948        _omap_dsi_set_irqs_cio(dsidev);
 949
 950        spin_unlock_irqrestore(&dsi->irq_lock, flags);
 951}
 952
 953static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
 954                struct dsi_isr_data *isr_array, unsigned isr_array_size)
 955{
 956        struct dsi_isr_data *isr_data;
 957        int free_idx;
 958        int i;
 959
 960        BUG_ON(isr == NULL);
 961
 962        /* check for duplicate entry and find a free slot */
 963        free_idx = -1;
 964        for (i = 0; i < isr_array_size; i++) {
 965                isr_data = &isr_array[i];
 966
 967                if (isr_data->isr == isr && isr_data->arg == arg &&
 968                                isr_data->mask == mask) {
 969                        return -EINVAL;
 970                }
 971
 972                if (isr_data->isr == NULL && free_idx == -1)
 973                        free_idx = i;
 974        }
 975
 976        if (free_idx == -1)
 977                return -EBUSY;
 978
 979        isr_data = &isr_array[free_idx];
 980        isr_data->isr = isr;
 981        isr_data->arg = arg;
 982        isr_data->mask = mask;
 983
 984        return 0;
 985}
 986
 987static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
 988                struct dsi_isr_data *isr_array, unsigned isr_array_size)
 989{
 990        struct dsi_isr_data *isr_data;
 991        int i;
 992
 993        for (i = 0; i < isr_array_size; i++) {
 994                isr_data = &isr_array[i];
 995                if (isr_data->isr != isr || isr_data->arg != arg ||
 996                                isr_data->mask != mask)
 997                        continue;
 998
 999                isr_data->isr = NULL;
1000                isr_data->arg = NULL;
1001                isr_data->mask = 0;
1002
1003                return 0;
1004        }
1005
1006        return -EINVAL;
1007}
1008
1009static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
1010                void *arg, u32 mask)
1011{
1012        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1013        unsigned long flags;
1014        int r;
1015
1016        spin_lock_irqsave(&dsi->irq_lock, flags);
1017
1018        r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
1019                        ARRAY_SIZE(dsi->isr_tables.isr_table));
1020
1021        if (r == 0)
1022                _omap_dsi_set_irqs(dsidev);
1023
1024        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1025
1026        return r;
1027}
1028
1029static int dsi_unregister_isr(struct platform_device *dsidev,
1030                omap_dsi_isr_t isr, void *arg, u32 mask)
1031{
1032        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1033        unsigned long flags;
1034        int r;
1035
1036        spin_lock_irqsave(&dsi->irq_lock, flags);
1037
1038        r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
1039                        ARRAY_SIZE(dsi->isr_tables.isr_table));
1040
1041        if (r == 0)
1042                _omap_dsi_set_irqs(dsidev);
1043
1044        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1045
1046        return r;
1047}
1048
1049static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
1050                omap_dsi_isr_t isr, void *arg, u32 mask)
1051{
1052        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1053        unsigned long flags;
1054        int r;
1055
1056        spin_lock_irqsave(&dsi->irq_lock, flags);
1057
1058        r = _dsi_register_isr(isr, arg, mask,
1059                        dsi->isr_tables.isr_table_vc[channel],
1060                        ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1061
1062        if (r == 0)
1063                _omap_dsi_set_irqs_vc(dsidev, channel);
1064
1065        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1066
1067        return r;
1068}
1069
1070static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
1071                omap_dsi_isr_t isr, void *arg, u32 mask)
1072{
1073        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1074        unsigned long flags;
1075        int r;
1076
1077        spin_lock_irqsave(&dsi->irq_lock, flags);
1078
1079        r = _dsi_unregister_isr(isr, arg, mask,
1080                        dsi->isr_tables.isr_table_vc[channel],
1081                        ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1082
1083        if (r == 0)
1084                _omap_dsi_set_irqs_vc(dsidev, channel);
1085
1086        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1087
1088        return r;
1089}
1090
1091static int dsi_register_isr_cio(struct platform_device *dsidev,
1092                omap_dsi_isr_t isr, void *arg, u32 mask)
1093{
1094        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1095        unsigned long flags;
1096        int r;
1097
1098        spin_lock_irqsave(&dsi->irq_lock, flags);
1099
1100        r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1101                        ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1102
1103        if (r == 0)
1104                _omap_dsi_set_irqs_cio(dsidev);
1105
1106        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1107
1108        return r;
1109}
1110
1111static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1112                omap_dsi_isr_t isr, void *arg, u32 mask)
1113{
1114        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1115        unsigned long flags;
1116        int r;
1117
1118        spin_lock_irqsave(&dsi->irq_lock, flags);
1119
1120        r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1121                        ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1122
1123        if (r == 0)
1124                _omap_dsi_set_irqs_cio(dsidev);
1125
1126        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1127
1128        return r;
1129}
1130
1131static u32 dsi_get_errors(struct platform_device *dsidev)
1132{
1133        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1134        unsigned long flags;
1135        u32 e;
1136        spin_lock_irqsave(&dsi->errors_lock, flags);
1137        e = dsi->errors;
1138        dsi->errors = 0;
1139        spin_unlock_irqrestore(&dsi->errors_lock, flags);
1140        return e;
1141}
1142
1143static int dsi_runtime_get(struct platform_device *dsidev)
1144{
1145        int r;
1146        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1147
1148        DSSDBG("dsi_runtime_get\n");
1149
1150        r = pm_runtime_get_sync(&dsi->pdev->dev);
1151        WARN_ON(r < 0);
1152        return r < 0 ? r : 0;
1153}
1154
1155static void dsi_runtime_put(struct platform_device *dsidev)
1156{
1157        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1158        int r;
1159
1160        DSSDBG("dsi_runtime_put\n");
1161
1162        r = pm_runtime_put_sync(&dsi->pdev->dev);
1163        WARN_ON(r < 0 && r != -ENOSYS);
1164}
1165
1166static int dsi_regulator_init(struct platform_device *dsidev)
1167{
1168        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1169        struct regulator *vdds_dsi;
1170        int r;
1171
1172        if (dsi->vdds_dsi_reg != NULL)
1173                return 0;
1174
1175        vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd");
1176
1177        if (IS_ERR(vdds_dsi)) {
1178                if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
1179                        DSSERR("can't get DSI VDD regulator\n");
1180                return PTR_ERR(vdds_dsi);
1181        }
1182
1183        if (regulator_can_change_voltage(vdds_dsi)) {
1184                r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
1185                if (r) {
1186                        devm_regulator_put(vdds_dsi);
1187                        DSSERR("can't set the DSI regulator voltage\n");
1188                        return r;
1189                }
1190        }
1191
1192        dsi->vdds_dsi_reg = vdds_dsi;
1193
1194        return 0;
1195}
1196
1197static void _dsi_print_reset_status(struct platform_device *dsidev)
1198{
1199        u32 l;
1200        int b0, b1, b2;
1201
1202        /* A dummy read using the SCP interface to any DSIPHY register is
1203         * required after DSIPHY reset to complete the reset of the DSI complex
1204         * I/O. */
1205        l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1206
1207        if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1208                b0 = 28;
1209                b1 = 27;
1210                b2 = 26;
1211        } else {
1212                b0 = 24;
1213                b1 = 25;
1214                b2 = 26;
1215        }
1216
1217#define DSI_FLD_GET(fld, start, end)\
1218        FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
1219
1220        pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
1221                DSI_FLD_GET(PLL_STATUS, 0, 0),
1222                DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
1223                DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
1224                DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
1225                DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
1226                DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
1227                DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
1228                DSI_FLD_GET(DSIPHY_CFG5, 31, 31));
1229
1230#undef DSI_FLD_GET
1231}
1232
1233static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1234{
1235        DSSDBG("dsi_if_enable(%d)\n", enable);
1236
1237        enable = enable ? 1 : 0;
1238        REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1239
1240        if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1241                        DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1242                        return -EIO;
1243        }
1244
1245        return 0;
1246}
1247
1248static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1249{
1250        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1251
1252        return dsi->pll.cinfo.clkout[HSDIV_DISPC];
1253}
1254
1255static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1256{
1257        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1258
1259        return dsi->pll.cinfo.clkout[HSDIV_DSI];
1260}
1261
1262static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1263{
1264        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1265
1266        return dsi->pll.cinfo.clkdco / 16;
1267}
1268
1269static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1270{
1271        unsigned long r;
1272        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1273
1274        if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
1275                /* DSI FCLK source is DSS_CLK_FCK */
1276                r = clk_get_rate(dsi->dss_clk);
1277        } else {
1278                /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1279                r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1280        }
1281
1282        return r;
1283}
1284
1285static int dsi_lp_clock_calc(unsigned long dsi_fclk,
1286                unsigned long lp_clk_min, unsigned long lp_clk_max,
1287                struct dsi_lp_clock_info *lp_cinfo)
1288{
1289        unsigned lp_clk_div;
1290        unsigned long lp_clk;
1291
1292        lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
1293        lp_clk = dsi_fclk / 2 / lp_clk_div;
1294
1295        if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
1296                return -EINVAL;
1297
1298        lp_cinfo->lp_clk_div = lp_clk_div;
1299        lp_cinfo->lp_clk = lp_clk;
1300
1301        return 0;
1302}
1303
1304static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
1305{
1306        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1307        unsigned long dsi_fclk;
1308        unsigned lp_clk_div;
1309        unsigned long lp_clk;
1310        unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
1311
1312
1313        lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
1314
1315        if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
1316                return -EINVAL;
1317
1318        dsi_fclk = dsi_fclk_rate(dsidev);
1319
1320        lp_clk = dsi_fclk / 2 / lp_clk_div;
1321
1322        DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1323        dsi->current_lp_cinfo.lp_clk = lp_clk;
1324        dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
1325
1326        /* LP_CLK_DIVISOR */
1327        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1328
1329        /* LP_RX_SYNCHRO_ENABLE */
1330        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1331
1332        return 0;
1333}
1334
1335static void dsi_enable_scp_clk(struct platform_device *dsidev)
1336{
1337        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1338
1339        if (dsi->scp_clk_refcount++ == 0)
1340                REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1341}
1342
1343static void dsi_disable_scp_clk(struct platform_device *dsidev)
1344{
1345        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1346
1347        WARN_ON(dsi->scp_clk_refcount == 0);
1348        if (--dsi->scp_clk_refcount == 0)
1349                REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1350}
1351
1352enum dsi_pll_power_state {
1353        DSI_PLL_POWER_OFF       = 0x0,
1354        DSI_PLL_POWER_ON_HSCLK  = 0x1,
1355        DSI_PLL_POWER_ON_ALL    = 0x2,
1356        DSI_PLL_POWER_ON_DIV    = 0x3,
1357};
1358
1359static int dsi_pll_power(struct platform_device *dsidev,
1360                enum dsi_pll_power_state state)
1361{
1362        int t = 0;
1363
1364        /* DSI-PLL power command 0x3 is not working */
1365        if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1366                        state == DSI_PLL_POWER_ON_DIV)
1367                state = DSI_PLL_POWER_ON_ALL;
1368
1369        /* PLL_PWR_CMD */
1370        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1371
1372        /* PLL_PWR_STATUS */
1373        while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1374                if (++t > 1000) {
1375                        DSSERR("Failed to set DSI PLL power mode to %d\n",
1376                                        state);
1377                        return -ENODEV;
1378                }
1379                udelay(1);
1380        }
1381
1382        return 0;
1383}
1384
1385
1386static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo)
1387{
1388        unsigned long max_dsi_fck;
1389
1390        max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
1391
1392        cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
1393        cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
1394}
1395
1396static int dsi_pll_enable(struct dss_pll *pll)
1397{
1398        struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
1399        struct platform_device *dsidev = dsi->pdev;
1400        int r = 0;
1401
1402        DSSDBG("PLL init\n");
1403
1404        r = dsi_regulator_init(dsidev);
1405        if (r)
1406                return r;
1407
1408        r = dsi_runtime_get(dsidev);
1409        if (r)
1410                return r;
1411
1412        /*
1413         * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1414         */
1415        dsi_enable_scp_clk(dsidev);
1416
1417        if (!dsi->vdds_dsi_enabled) {
1418                r = regulator_enable(dsi->vdds_dsi_reg);
1419                if (r)
1420                        goto err0;
1421                dsi->vdds_dsi_enabled = true;
1422        }
1423
1424        /* XXX PLL does not come out of reset without this... */
1425        dispc_pck_free_enable(1);
1426
1427        if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1428                DSSERR("PLL not coming out of reset.\n");
1429                r = -ENODEV;
1430                dispc_pck_free_enable(0);
1431                goto err1;
1432        }
1433
1434        /* XXX ... but if left on, we get problems when planes do not
1435         * fill the whole display. No idea about this */
1436        dispc_pck_free_enable(0);
1437
1438        r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
1439
1440        if (r)
1441                goto err1;
1442
1443        DSSDBG("PLL init done\n");
1444
1445        return 0;
1446err1:
1447        if (dsi->vdds_dsi_enabled) {
1448                regulator_disable(dsi->vdds_dsi_reg);
1449                dsi->vdds_dsi_enabled = false;
1450        }
1451err0:
1452        dsi_disable_scp_clk(dsidev);
1453        dsi_runtime_put(dsidev);
1454        return r;
1455}
1456
1457static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1458{
1459        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1460
1461        dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1462        if (disconnect_lanes) {
1463                WARN_ON(!dsi->vdds_dsi_enabled);
1464                regulator_disable(dsi->vdds_dsi_reg);
1465                dsi->vdds_dsi_enabled = false;
1466        }
1467
1468        dsi_disable_scp_clk(dsidev);
1469        dsi_runtime_put(dsidev);
1470
1471        DSSDBG("PLL uninit done\n");
1472}
1473
1474static void dsi_pll_disable(struct dss_pll *pll)
1475{
1476        struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
1477        struct platform_device *dsidev = dsi->pdev;
1478
1479        dsi_pll_uninit(dsidev, true);
1480}
1481
1482static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1483                struct seq_file *s)
1484{
1485        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1486        struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1487        enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1488        int dsi_module = dsi->module_id;
1489        struct dss_pll *pll = &dsi->pll;
1490
1491        dispc_clk_src = dss_get_dispc_clk_source();
1492        dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1493
1494        if (dsi_runtime_get(dsidev))
1495                return;
1496
1497        seq_printf(s,   "- DSI%d PLL -\n", dsi_module + 1);
1498
1499        seq_printf(s,   "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin));
1500
1501        seq_printf(s,   "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n);
1502
1503        seq_printf(s,   "CLKIN4DDR\t%-16lum %u\n",
1504                        cinfo->clkdco, cinfo->m);
1505
1506        seq_printf(s,   "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
1507                        dss_feat_get_clk_source_name(dsi_module == 0 ?
1508                                OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
1509                                OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
1510                        cinfo->clkout[HSDIV_DISPC],
1511                        cinfo->mX[HSDIV_DISPC],
1512                        dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1513                        "off" : "on");
1514
1515        seq_printf(s,   "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
1516                        dss_feat_get_clk_source_name(dsi_module == 0 ?
1517                                OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
1518                                OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
1519                        cinfo->clkout[HSDIV_DSI],
1520                        cinfo->mX[HSDIV_DSI],
1521                        dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1522                        "off" : "on");
1523
1524        seq_printf(s,   "- DSI%d -\n", dsi_module + 1);
1525
1526        seq_printf(s,   "dsi fclk source = %s (%s)\n",
1527                        dss_get_generic_clk_source_name(dsi_clk_src),
1528                        dss_feat_get_clk_source_name(dsi_clk_src));
1529
1530        seq_printf(s,   "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1531
1532        seq_printf(s,   "DDR_CLK\t\t%lu\n",
1533                        cinfo->clkdco / 4);
1534
1535        seq_printf(s,   "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1536
1537        seq_printf(s,   "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
1538
1539        dsi_runtime_put(dsidev);
1540}
1541
1542void dsi_dump_clocks(struct seq_file *s)
1543{
1544        struct platform_device *dsidev;
1545        int i;
1546
1547        for  (i = 0; i < MAX_NUM_DSI; i++) {
1548                dsidev = dsi_get_dsidev_from_id(i);
1549                if (dsidev)
1550                        dsi_dump_dsidev_clocks(dsidev, s);
1551        }
1552}
1553
1554#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1555static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1556                struct seq_file *s)
1557{
1558        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1559        unsigned long flags;
1560        struct dsi_irq_stats stats;
1561
1562        spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1563
1564        stats = dsi->irq_stats;
1565        memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1566        dsi->irq_stats.last_reset = jiffies;
1567
1568        spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1569
1570        seq_printf(s, "period %u ms\n",
1571                        jiffies_to_msecs(jiffies - stats.last_reset));
1572
1573        seq_printf(s, "irqs %d\n", stats.irq_count);
1574#define PIS(x) \
1575        seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1576
1577        seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1578        PIS(VC0);
1579        PIS(VC1);
1580        PIS(VC2);
1581        PIS(VC3);
1582        PIS(WAKEUP);
1583        PIS(RESYNC);
1584        PIS(PLL_LOCK);
1585        PIS(PLL_UNLOCK);
1586        PIS(PLL_RECALL);
1587        PIS(COMPLEXIO_ERR);
1588        PIS(HS_TX_TIMEOUT);
1589        PIS(LP_RX_TIMEOUT);
1590        PIS(TE_TRIGGER);
1591        PIS(ACK_TRIGGER);
1592        PIS(SYNC_LOST);
1593        PIS(LDO_POWER_GOOD);
1594        PIS(TA_TIMEOUT);
1595#undef PIS
1596
1597#define PIS(x) \
1598        seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1599                        stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1600                        stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1601                        stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1602                        stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1603
1604        seq_printf(s, "-- VC interrupts --\n");
1605        PIS(CS);
1606        PIS(ECC_CORR);
1607        PIS(PACKET_SENT);
1608        PIS(FIFO_TX_OVF);
1609        PIS(FIFO_RX_OVF);
1610        PIS(BTA);
1611        PIS(ECC_NO_CORR);
1612        PIS(FIFO_TX_UDF);
1613        PIS(PP_BUSY_CHANGE);
1614#undef PIS
1615
1616#define PIS(x) \
1617        seq_printf(s, "%-20s %10d\n", #x, \
1618                        stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1619
1620        seq_printf(s, "-- CIO interrupts --\n");
1621        PIS(ERRSYNCESC1);
1622        PIS(ERRSYNCESC2);
1623        PIS(ERRSYNCESC3);
1624        PIS(ERRESC1);
1625        PIS(ERRESC2);
1626        PIS(ERRESC3);
1627        PIS(ERRCONTROL1);
1628        PIS(ERRCONTROL2);
1629        PIS(ERRCONTROL3);
1630        PIS(STATEULPS1);
1631        PIS(STATEULPS2);
1632        PIS(STATEULPS3);
1633        PIS(ERRCONTENTIONLP0_1);
1634        PIS(ERRCONTENTIONLP1_1);
1635        PIS(ERRCONTENTIONLP0_2);
1636        PIS(ERRCONTENTIONLP1_2);
1637        PIS(ERRCONTENTIONLP0_3);
1638        PIS(ERRCONTENTIONLP1_3);
1639        PIS(ULPSACTIVENOT_ALL0);
1640        PIS(ULPSACTIVENOT_ALL1);
1641#undef PIS
1642}
1643
1644static void dsi1_dump_irqs(struct seq_file *s)
1645{
1646        struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1647
1648        dsi_dump_dsidev_irqs(dsidev, s);
1649}
1650
1651static void dsi2_dump_irqs(struct seq_file *s)
1652{
1653        struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1654
1655        dsi_dump_dsidev_irqs(dsidev, s);
1656}
1657#endif
1658
1659static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1660                struct seq_file *s)
1661{
1662#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1663
1664        if (dsi_runtime_get(dsidev))
1665                return;
1666        dsi_enable_scp_clk(dsidev);
1667
1668        DUMPREG(DSI_REVISION);
1669        DUMPREG(DSI_SYSCONFIG);
1670        DUMPREG(DSI_SYSSTATUS);
1671        DUMPREG(DSI_IRQSTATUS);
1672        DUMPREG(DSI_IRQENABLE);
1673        DUMPREG(DSI_CTRL);
1674        DUMPREG(DSI_COMPLEXIO_CFG1);
1675        DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1676        DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1677        DUMPREG(DSI_CLK_CTRL);
1678        DUMPREG(DSI_TIMING1);
1679        DUMPREG(DSI_TIMING2);
1680        DUMPREG(DSI_VM_TIMING1);
1681        DUMPREG(DSI_VM_TIMING2);
1682        DUMPREG(DSI_VM_TIMING3);
1683        DUMPREG(DSI_CLK_TIMING);
1684        DUMPREG(DSI_TX_FIFO_VC_SIZE);
1685        DUMPREG(DSI_RX_FIFO_VC_SIZE);
1686        DUMPREG(DSI_COMPLEXIO_CFG2);
1687        DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1688        DUMPREG(DSI_VM_TIMING4);
1689        DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1690        DUMPREG(DSI_VM_TIMING5);
1691        DUMPREG(DSI_VM_TIMING6);
1692        DUMPREG(DSI_VM_TIMING7);
1693        DUMPREG(DSI_STOPCLK_TIMING);
1694
1695        DUMPREG(DSI_VC_CTRL(0));
1696        DUMPREG(DSI_VC_TE(0));
1697        DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1698        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1699        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1700        DUMPREG(DSI_VC_IRQSTATUS(0));
1701        DUMPREG(DSI_VC_IRQENABLE(0));
1702
1703        DUMPREG(DSI_VC_CTRL(1));
1704        DUMPREG(DSI_VC_TE(1));
1705        DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1706        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1707        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1708        DUMPREG(DSI_VC_IRQSTATUS(1));
1709        DUMPREG(DSI_VC_IRQENABLE(1));
1710
1711        DUMPREG(DSI_VC_CTRL(2));
1712        DUMPREG(DSI_VC_TE(2));
1713        DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1714        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1715        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1716        DUMPREG(DSI_VC_IRQSTATUS(2));
1717        DUMPREG(DSI_VC_IRQENABLE(2));
1718
1719        DUMPREG(DSI_VC_CTRL(3));
1720        DUMPREG(DSI_VC_TE(3));
1721        DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1722        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1723        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1724        DUMPREG(DSI_VC_IRQSTATUS(3));
1725        DUMPREG(DSI_VC_IRQENABLE(3));
1726
1727        DUMPREG(DSI_DSIPHY_CFG0);
1728        DUMPREG(DSI_DSIPHY_CFG1);
1729        DUMPREG(DSI_DSIPHY_CFG2);
1730        DUMPREG(DSI_DSIPHY_CFG5);
1731
1732        DUMPREG(DSI_PLL_CONTROL);
1733        DUMPREG(DSI_PLL_STATUS);
1734        DUMPREG(DSI_PLL_GO);
1735        DUMPREG(DSI_PLL_CONFIGURATION1);
1736        DUMPREG(DSI_PLL_CONFIGURATION2);
1737
1738        dsi_disable_scp_clk(dsidev);
1739        dsi_runtime_put(dsidev);
1740#undef DUMPREG
1741}
1742
1743static void dsi1_dump_regs(struct seq_file *s)
1744{
1745        struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1746
1747        dsi_dump_dsidev_regs(dsidev, s);
1748}
1749
1750static void dsi2_dump_regs(struct seq_file *s)
1751{
1752        struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1753
1754        dsi_dump_dsidev_regs(dsidev, s);
1755}
1756
1757enum dsi_cio_power_state {
1758        DSI_COMPLEXIO_POWER_OFF         = 0x0,
1759        DSI_COMPLEXIO_POWER_ON          = 0x1,
1760        DSI_COMPLEXIO_POWER_ULPS        = 0x2,
1761};
1762
1763static int dsi_cio_power(struct platform_device *dsidev,
1764                enum dsi_cio_power_state state)
1765{
1766        int t = 0;
1767
1768        /* PWR_CMD */
1769        REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
1770
1771        /* PWR_STATUS */
1772        while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
1773                        26, 25) != state) {
1774                if (++t > 1000) {
1775                        DSSERR("failed to set complexio power state to "
1776                                        "%d\n", state);
1777                        return -ENODEV;
1778                }
1779                udelay(1);
1780        }
1781
1782        return 0;
1783}
1784
1785static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
1786{
1787        int val;
1788
1789        /* line buffer on OMAP3 is 1024 x 24bits */
1790        /* XXX: for some reason using full buffer size causes
1791         * considerable TX slowdown with update sizes that fill the
1792         * whole buffer */
1793        if (!dss_has_feature(FEAT_DSI_GNQ))
1794                return 1023 * 3;
1795
1796        val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
1797
1798        switch (val) {
1799        case 1:
1800                return 512 * 3;         /* 512x24 bits */
1801        case 2:
1802                return 682 * 3;         /* 682x24 bits */
1803        case 3:
1804                return 853 * 3;         /* 853x24 bits */
1805        case 4:
1806                return 1024 * 3;        /* 1024x24 bits */
1807        case 5:
1808                return 1194 * 3;        /* 1194x24 bits */
1809        case 6:
1810                return 1365 * 3;        /* 1365x24 bits */
1811        case 7:
1812                return 1920 * 3;        /* 1920x24 bits */
1813        default:
1814                BUG();
1815                return 0;
1816        }
1817}
1818
1819static int dsi_set_lane_config(struct platform_device *dsidev)
1820{
1821        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1822        static const u8 offsets[] = { 0, 4, 8, 12, 16 };
1823        static const enum dsi_lane_function functions[] = {
1824                DSI_LANE_CLK,
1825                DSI_LANE_DATA1,
1826                DSI_LANE_DATA2,
1827                DSI_LANE_DATA3,
1828                DSI_LANE_DATA4,
1829        };
1830        u32 r;
1831        int i;
1832
1833        r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1834
1835        for (i = 0; i < dsi->num_lanes_used; ++i) {
1836                unsigned offset = offsets[i];
1837                unsigned polarity, lane_number;
1838                unsigned t;
1839
1840                for (t = 0; t < dsi->num_lanes_supported; ++t)
1841                        if (dsi->lanes[t].function == functions[i])
1842                                break;
1843
1844                if (t == dsi->num_lanes_supported)
1845                        return -EINVAL;
1846
1847                lane_number = t;
1848                polarity = dsi->lanes[t].polarity;
1849
1850                r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
1851                r = FLD_MOD(r, polarity, offset + 3, offset + 3);
1852        }
1853
1854        /* clear the unused lanes */
1855        for (; i < dsi->num_lanes_supported; ++i) {
1856                unsigned offset = offsets[i];
1857
1858                r = FLD_MOD(r, 0, offset + 2, offset);
1859                r = FLD_MOD(r, 0, offset + 3, offset + 3);
1860        }
1861
1862        dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
1863
1864        return 0;
1865}
1866
1867static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
1868{
1869        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1870
1871        /* convert time in ns to ddr ticks, rounding up */
1872        unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
1873        return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1874}
1875
1876static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
1877{
1878        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1879
1880        unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
1881        return ddr * 1000 * 1000 / (ddr_clk / 1000);
1882}
1883
1884static void dsi_cio_timings(struct platform_device *dsidev)
1885{
1886        u32 r;
1887        u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
1888        u32 tlpx_half, tclk_trail, tclk_zero;
1889        u32 tclk_prepare;
1890
1891        /* calculate timings */
1892
1893        /* 1 * DDR_CLK = 2 * UI */
1894
1895        /* min 40ns + 4*UI      max 85ns + 6*UI */
1896        ths_prepare = ns2ddr(dsidev, 70) + 2;
1897
1898        /* min 145ns + 10*UI */
1899        ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
1900
1901        /* min max(8*UI, 60ns+4*UI) */
1902        ths_trail = ns2ddr(dsidev, 60) + 5;
1903
1904        /* min 100ns */
1905        ths_exit = ns2ddr(dsidev, 145);
1906
1907        /* tlpx min 50n */
1908        tlpx_half = ns2ddr(dsidev, 25);
1909
1910        /* min 60ns */
1911        tclk_trail = ns2ddr(dsidev, 60) + 2;
1912
1913        /* min 38ns, max 95ns */
1914        tclk_prepare = ns2ddr(dsidev, 65);
1915
1916        /* min tclk-prepare + tclk-zero = 300ns */
1917        tclk_zero = ns2ddr(dsidev, 260);
1918
1919        DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1920                ths_prepare, ddr2ns(dsidev, ths_prepare),
1921                ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
1922        DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1923                        ths_trail, ddr2ns(dsidev, ths_trail),
1924                        ths_exit, ddr2ns(dsidev, ths_exit));
1925
1926        DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1927                        "tclk_zero %u (%uns)\n",
1928                        tlpx_half, ddr2ns(dsidev, tlpx_half),
1929                        tclk_trail, ddr2ns(dsidev, tclk_trail),
1930                        tclk_zero, ddr2ns(dsidev, tclk_zero));
1931        DSSDBG("tclk_prepare %u (%uns)\n",
1932                        tclk_prepare, ddr2ns(dsidev, tclk_prepare));
1933
1934        /* program timings */
1935
1936        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
1937        r = FLD_MOD(r, ths_prepare, 31, 24);
1938        r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1939        r = FLD_MOD(r, ths_trail, 15, 8);
1940        r = FLD_MOD(r, ths_exit, 7, 0);
1941        dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
1942
1943        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
1944        r = FLD_MOD(r, tlpx_half, 20, 16);
1945        r = FLD_MOD(r, tclk_trail, 15, 8);
1946        r = FLD_MOD(r, tclk_zero, 7, 0);
1947
1948        if (dss_has_feature(FEAT_DSI_PHY_DCC)) {
1949                r = FLD_MOD(r, 0, 21, 21);      /* DCCEN = disable */
1950                r = FLD_MOD(r, 1, 22, 22);      /* CLKINP_DIVBY2EN = enable */
1951                r = FLD_MOD(r, 1, 23, 23);      /* CLKINP_SEL = enable */
1952        }
1953
1954        dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
1955
1956        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
1957        r = FLD_MOD(r, tclk_prepare, 7, 0);
1958        dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
1959}
1960
1961/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
1962static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
1963                unsigned mask_p, unsigned mask_n)
1964{
1965        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1966        int i;
1967        u32 l;
1968        u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
1969
1970        l = 0;
1971
1972        for (i = 0; i < dsi->num_lanes_supported; ++i) {
1973                unsigned p = dsi->lanes[i].polarity;
1974
1975                if (mask_p & (1 << i))
1976                        l |= 1 << (i * 2 + (p ? 0 : 1));
1977
1978                if (mask_n & (1 << i))
1979                        l |= 1 << (i * 2 + (p ? 1 : 0));
1980        }
1981
1982        /*
1983         * Bits in REGLPTXSCPDAT4TO0DXDY:
1984         * 17: DY0 18: DX0
1985         * 19: DY1 20: DX1
1986         * 21: DY2 22: DX2
1987         * 23: DY3 24: DX3
1988         * 25: DY4 26: DX4
1989         */
1990
1991        /* Set the lane override configuration */
1992
1993        /* REGLPTXSCPDAT4TO0DXDY */
1994        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
1995
1996        /* Enable lane override */
1997
1998        /* ENLPTXSCPDAT */
1999        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2000}
2001
2002static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2003{
2004        /* Disable lane override */
2005        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2006        /* Reset the lane override configuration */
2007        /* REGLPTXSCPDAT4TO0DXDY */
2008        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2009}
2010
2011static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
2012{
2013        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2014        int t, i;
2015        bool in_use[DSI_MAX_NR_LANES];
2016        static const u8 offsets_old[] = { 28, 27, 26 };
2017        static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
2018        const u8 *offsets;
2019
2020        if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
2021                offsets = offsets_old;
2022        else
2023                offsets = offsets_new;
2024
2025        for (i = 0; i < dsi->num_lanes_supported; ++i)
2026                in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
2027
2028        t = 100000;
2029        while (true) {
2030                u32 l;
2031                int ok;
2032
2033                l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2034
2035                ok = 0;
2036                for (i = 0; i < dsi->num_lanes_supported; ++i) {
2037                        if (!in_use[i] || (l & (1 << offsets[i])))
2038                                ok++;
2039                }
2040
2041                if (ok == dsi->num_lanes_supported)
2042                        break;
2043
2044                if (--t == 0) {
2045                        for (i = 0; i < dsi->num_lanes_supported; ++i) {
2046                                if (!in_use[i] || (l & (1 << offsets[i])))
2047                                        continue;
2048
2049                                DSSERR("CIO TXCLKESC%d domain not coming " \
2050                                                "out of reset\n", i);
2051                        }
2052                        return -EIO;
2053                }
2054        }
2055
2056        return 0;
2057}
2058
2059/* return bitmask of enabled lanes, lane0 being the lsb */
2060static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
2061{
2062        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2063        unsigned mask = 0;
2064        int i;
2065
2066        for (i = 0; i < dsi->num_lanes_supported; ++i) {
2067                if (dsi->lanes[i].function != DSI_LANE_UNUSED)
2068                        mask |= 1 << i;
2069        }
2070
2071        return mask;
2072}
2073
2074static int dsi_cio_init(struct platform_device *dsidev)
2075{
2076        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2077        int r;
2078        u32 l;
2079
2080        DSSDBG("DSI CIO init starts");
2081
2082        r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2083        if (r)
2084                return r;
2085
2086        dsi_enable_scp_clk(dsidev);
2087
2088        /* A dummy read using the SCP interface to any DSIPHY register is
2089         * required after DSIPHY reset to complete the reset of the DSI complex
2090         * I/O. */
2091        dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2092
2093        if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2094                DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2095                r = -EIO;
2096                goto err_scp_clk_dom;
2097        }
2098
2099        r = dsi_set_lane_config(dsidev);
2100        if (r)
2101                goto err_scp_clk_dom;
2102
2103        /* set TX STOP MODE timer to maximum for this operation */
2104        l = dsi_read_reg(dsidev, DSI_TIMING1);
2105        l = FLD_MOD(l, 1, 15, 15);      /* FORCE_TX_STOP_MODE_IO */
2106        l = FLD_MOD(l, 1, 14, 14);      /* STOP_STATE_X16_IO */
2107        l = FLD_MOD(l, 1, 13, 13);      /* STOP_STATE_X4_IO */
2108        l = FLD_MOD(l, 0x1fff, 12, 0);  /* STOP_STATE_COUNTER_IO */
2109        dsi_write_reg(dsidev, DSI_TIMING1, l);
2110
2111        if (dsi->ulps_enabled) {
2112                unsigned mask_p;
2113                int i;
2114
2115                DSSDBG("manual ulps exit\n");
2116
2117                /* ULPS is exited by Mark-1 state for 1ms, followed by
2118                 * stop state. DSS HW cannot do this via the normal
2119                 * ULPS exit sequence, as after reset the DSS HW thinks
2120                 * that we are not in ULPS mode, and refuses to send the
2121                 * sequence. So we need to send the ULPS exit sequence
2122                 * manually by setting positive lines high and negative lines
2123                 * low for 1ms.
2124                 */
2125
2126                mask_p = 0;
2127
2128                for (i = 0; i < dsi->num_lanes_supported; ++i) {
2129                        if (dsi->lanes[i].function == DSI_LANE_UNUSED)
2130                                continue;
2131                        mask_p |= 1 << i;
2132                }
2133
2134                dsi_cio_enable_lane_override(dsidev, mask_p, 0);
2135        }
2136
2137        r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2138        if (r)
2139                goto err_cio_pwr;
2140
2141        if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2142                DSSERR("CIO PWR clock domain not coming out of reset.\n");
2143                r = -ENODEV;
2144                goto err_cio_pwr_dom;
2145        }
2146
2147        dsi_if_enable(dsidev, true);
2148        dsi_if_enable(dsidev, false);
2149        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2150
2151        r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
2152        if (r)
2153                goto err_tx_clk_esc_rst;
2154
2155        if (dsi->ulps_enabled) {
2156                /* Keep Mark-1 state for 1ms (as per DSI spec) */
2157                ktime_t wait = ns_to_ktime(1000 * 1000);
2158                set_current_state(TASK_UNINTERRUPTIBLE);
2159                schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2160
2161                /* Disable the override. The lanes should be set to Mark-11
2162                 * state by the HW */
2163                dsi_cio_disable_lane_override(dsidev);
2164        }
2165
2166        /* FORCE_TX_STOP_MODE_IO */
2167        REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2168
2169        dsi_cio_timings(dsidev);
2170
2171        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
2172                /* DDR_CLK_ALWAYS_ON */
2173                REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
2174                        dsi->vm_timings.ddr_clk_always_on, 13, 13);
2175        }
2176
2177        dsi->ulps_enabled = false;
2178
2179        DSSDBG("CIO init done\n");
2180
2181        return 0;
2182
2183err_tx_clk_esc_rst:
2184        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2185err_cio_pwr_dom:
2186        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2187err_cio_pwr:
2188        if (dsi->ulps_enabled)
2189                dsi_cio_disable_lane_override(dsidev);
2190err_scp_clk_dom:
2191        dsi_disable_scp_clk(dsidev);
2192        dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2193        return r;
2194}
2195
2196static void dsi_cio_uninit(struct platform_device *dsidev)
2197{
2198        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2199
2200        /* DDR_CLK_ALWAYS_ON */
2201        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
2202
2203        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2204        dsi_disable_scp_clk(dsidev);
2205        dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2206}
2207
2208static void dsi_config_tx_fifo(struct platform_device *dsidev,
2209                enum fifo_size size1, enum fifo_size size2,
2210                enum fifo_size size3, enum fifo_size size4)
2211{
2212        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2213        u32 r = 0;
2214        int add = 0;
2215        int i;
2216
2217        dsi->vc[0].tx_fifo_size = size1;
2218        dsi->vc[1].tx_fifo_size = size2;
2219        dsi->vc[2].tx_fifo_size = size3;
2220        dsi->vc[3].tx_fifo_size = size4;
2221
2222        for (i = 0; i < 4; i++) {
2223                u8 v;
2224                int size = dsi->vc[i].tx_fifo_size;
2225
2226                if (add + size > 4) {
2227                        DSSERR("Illegal FIFO configuration\n");
2228                        BUG();
2229                        return;
2230                }
2231
2232                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2233                r |= v << (8 * i);
2234                /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2235                add += size;
2236        }
2237
2238        dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2239}
2240
2241static void dsi_config_rx_fifo(struct platform_device *dsidev,
2242                enum fifo_size size1, enum fifo_size size2,
2243                enum fifo_size size3, enum fifo_size size4)
2244{
2245        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2246        u32 r = 0;
2247        int add = 0;
2248        int i;
2249
2250        dsi->vc[0].rx_fifo_size = size1;
2251        dsi->vc[1].rx_fifo_size = size2;
2252        dsi->vc[2].rx_fifo_size = size3;
2253        dsi->vc[3].rx_fifo_size = size4;
2254
2255        for (i = 0; i < 4; i++) {
2256                u8 v;
2257                int size = dsi->vc[i].rx_fifo_size;
2258
2259                if (add + size > 4) {
2260                        DSSERR("Illegal FIFO configuration\n");
2261                        BUG();
2262                        return;
2263                }
2264
2265                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2266                r |= v << (8 * i);
2267                /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2268                add += size;
2269        }
2270
2271        dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2272}
2273
2274static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2275{
2276        u32 r;
2277
2278        r = dsi_read_reg(dsidev, DSI_TIMING1);
2279        r = FLD_MOD(r, 1, 15, 15);      /* FORCE_TX_STOP_MODE_IO */
2280        dsi_write_reg(dsidev, DSI_TIMING1, r);
2281
2282        if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2283                DSSERR("TX_STOP bit not going down\n");
2284                return -EIO;
2285        }
2286
2287        return 0;
2288}
2289
2290static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2291{
2292        return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2293}
2294
2295static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2296{
2297        struct dsi_packet_sent_handler_data *vp_data =
2298                (struct dsi_packet_sent_handler_data *) data;
2299        struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2300        const int channel = dsi->update_channel;
2301        u8 bit = dsi->te_enabled ? 30 : 31;
2302
2303        if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2304                complete(vp_data->completion);
2305}
2306
2307static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2308{
2309        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2310        DECLARE_COMPLETION_ONSTACK(completion);
2311        struct dsi_packet_sent_handler_data vp_data = {
2312                .dsidev = dsidev,
2313                .completion = &completion
2314        };
2315        int r = 0;
2316        u8 bit;
2317
2318        bit = dsi->te_enabled ? 30 : 31;
2319
2320        r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2321                &vp_data, DSI_VC_IRQ_PACKET_SENT);
2322        if (r)
2323                goto err0;
2324
2325        /* Wait for completion only if TE_EN/TE_START is still set */
2326        if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2327                if (wait_for_completion_timeout(&completion,
2328                                msecs_to_jiffies(10)) == 0) {
2329                        DSSERR("Failed to complete previous frame transfer\n");
2330                        r = -EIO;
2331                        goto err1;
2332                }
2333        }
2334
2335        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2336                &vp_data, DSI_VC_IRQ_PACKET_SENT);
2337
2338        return 0;
2339err1:
2340        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2341                &vp_data, DSI_VC_IRQ_PACKET_SENT);
2342err0:
2343        return r;
2344}
2345
2346static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2347{
2348        struct dsi_packet_sent_handler_data *l4_data =
2349                (struct dsi_packet_sent_handler_data *) data;
2350        struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2351        const int channel = dsi->update_channel;
2352
2353        if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2354                complete(l4_data->completion);
2355}
2356
2357static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2358{
2359        DECLARE_COMPLETION_ONSTACK(completion);
2360        struct dsi_packet_sent_handler_data l4_data = {
2361                .dsidev = dsidev,
2362                .completion = &completion
2363        };
2364        int r = 0;
2365
2366        r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2367                &l4_data, DSI_VC_IRQ_PACKET_SENT);
2368        if (r)
2369                goto err0;
2370
2371        /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2372        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2373                if (wait_for_completion_timeout(&completion,
2374                                msecs_to_jiffies(10)) == 0) {
2375                        DSSERR("Failed to complete previous l4 transfer\n");
2376                        r = -EIO;
2377                        goto err1;
2378                }
2379        }
2380
2381        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2382                &l4_data, DSI_VC_IRQ_PACKET_SENT);
2383
2384        return 0;
2385err1:
2386        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2387                &l4_data, DSI_VC_IRQ_PACKET_SENT);
2388err0:
2389        return r;
2390}
2391
2392static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2393{
2394        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2395
2396        WARN_ON(!dsi_bus_is_locked(dsidev));
2397
2398        WARN_ON(in_interrupt());
2399
2400        if (!dsi_vc_is_enabled(dsidev, channel))
2401                return 0;
2402
2403        switch (dsi->vc[channel].source) {
2404        case DSI_VC_SOURCE_VP:
2405                return dsi_sync_vc_vp(dsidev, channel);
2406        case DSI_VC_SOURCE_L4:
2407                return dsi_sync_vc_l4(dsidev, channel);
2408        default:
2409                BUG();
2410                return -EINVAL;
2411        }
2412}
2413
2414static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2415                bool enable)
2416{
2417        DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2418                        channel, enable);
2419
2420        enable = enable ? 1 : 0;
2421
2422        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2423
2424        if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2425                0, enable) != enable) {
2426                        DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2427                        return -EIO;
2428        }
2429
2430        return 0;
2431}
2432
2433static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2434{
2435        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2436        u32 r;
2437
2438        DSSDBG("Initial config of virtual channel %d", channel);
2439
2440        r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2441
2442        if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2443                DSSERR("VC(%d) busy when trying to configure it!\n",
2444                                channel);
2445
2446        r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2447        r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN  */
2448        r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2449        r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2450        r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2451        r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2452        r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2453        if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2454                r = FLD_MOD(r, 3, 11, 10);      /* OCP_WIDTH = 32 bit */
2455
2456        r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2457        r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2458
2459        dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2460
2461        dsi->vc[channel].source = DSI_VC_SOURCE_L4;
2462}
2463
2464static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
2465                enum dsi_vc_source source)
2466{
2467        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2468
2469        if (dsi->vc[channel].source == source)
2470                return 0;
2471
2472        DSSDBG("Source config of virtual channel %d", channel);
2473
2474        dsi_sync_vc(dsidev, channel);
2475
2476        dsi_vc_enable(dsidev, channel, 0);
2477
2478        /* VC_BUSY */
2479        if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2480                DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2481                return -EIO;
2482        }
2483
2484        /* SOURCE, 0 = L4, 1 = video port */
2485        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
2486
2487        /* DCS_CMD_ENABLE */
2488        if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
2489                bool enable = source == DSI_VC_SOURCE_VP;
2490                REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
2491        }
2492
2493        dsi_vc_enable(dsidev, channel, 1);
2494
2495        dsi->vc[channel].source = source;
2496
2497        return 0;
2498}
2499
2500static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2501                bool enable)
2502{
2503        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2504        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2505
2506        DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2507
2508        WARN_ON(!dsi_bus_is_locked(dsidev));
2509
2510        dsi_vc_enable(dsidev, channel, 0);
2511        dsi_if_enable(dsidev, 0);
2512
2513        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2514
2515        dsi_vc_enable(dsidev, channel, 1);
2516        dsi_if_enable(dsidev, 1);
2517
2518        dsi_force_tx_stop_mode_io(dsidev);
2519
2520        /* start the DDR clock by sending a NULL packet */
2521        if (dsi->vm_timings.ddr_clk_always_on && enable)
2522                dsi_vc_send_null(dssdev, channel);
2523}
2524
2525static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2526{
2527        while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2528                u32 val;
2529                val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2530                DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2531                                (val >> 0) & 0xff,
2532                                (val >> 8) & 0xff,
2533                                (val >> 16) & 0xff,
2534                                (val >> 24) & 0xff);
2535        }
2536}
2537
2538static void dsi_show_rx_ack_with_err(u16 err)
2539{
2540        DSSERR("\tACK with ERROR (%#x):\n", err);
2541        if (err & (1 << 0))
2542                DSSERR("\t\tSoT Error\n");
2543        if (err & (1 << 1))
2544                DSSERR("\t\tSoT Sync Error\n");
2545        if (err & (1 << 2))
2546                DSSERR("\t\tEoT Sync Error\n");
2547        if (err & (1 << 3))
2548                DSSERR("\t\tEscape Mode Entry Command Error\n");
2549        if (err & (1 << 4))
2550                DSSERR("\t\tLP Transmit Sync Error\n");
2551        if (err & (1 << 5))
2552                DSSERR("\t\tHS Receive Timeout Error\n");
2553        if (err & (1 << 6))
2554                DSSERR("\t\tFalse Control Error\n");
2555        if (err & (1 << 7))
2556                DSSERR("\t\t(reserved7)\n");
2557        if (err & (1 << 8))
2558                DSSERR("\t\tECC Error, single-bit (corrected)\n");
2559        if (err & (1 << 9))
2560                DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2561        if (err & (1 << 10))
2562                DSSERR("\t\tChecksum Error\n");
2563        if (err & (1 << 11))
2564                DSSERR("\t\tData type not recognized\n");
2565        if (err & (1 << 12))
2566                DSSERR("\t\tInvalid VC ID\n");
2567        if (err & (1 << 13))
2568                DSSERR("\t\tInvalid Transmission Length\n");
2569        if (err & (1 << 14))
2570                DSSERR("\t\t(reserved14)\n");
2571        if (err & (1 << 15))
2572                DSSERR("\t\tDSI Protocol Violation\n");
2573}
2574
2575static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2576                int channel)
2577{
2578        /* RX_FIFO_NOT_EMPTY */
2579        while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2580                u32 val;
2581                u8 dt;
2582                val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2583                DSSERR("\trawval %#08x\n", val);
2584                dt = FLD_GET(val, 5, 0);
2585                if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
2586                        u16 err = FLD_GET(val, 23, 8);
2587                        dsi_show_rx_ack_with_err(err);
2588                } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
2589                        DSSERR("\tDCS short response, 1 byte: %#x\n",
2590                                        FLD_GET(val, 23, 8));
2591                } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
2592                        DSSERR("\tDCS short response, 2 byte: %#x\n",
2593                                        FLD_GET(val, 23, 8));
2594                } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
2595                        DSSERR("\tDCS long response, len %d\n",
2596                                        FLD_GET(val, 23, 8));
2597                        dsi_vc_flush_long_data(dsidev, channel);
2598                } else {
2599                        DSSERR("\tunknown datatype 0x%02x\n", dt);
2600                }
2601        }
2602        return 0;
2603}
2604
2605static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2606{
2607        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2608
2609        if (dsi->debug_write || dsi->debug_read)
2610                DSSDBG("dsi_vc_send_bta %d\n", channel);
2611
2612        WARN_ON(!dsi_bus_is_locked(dsidev));
2613
2614        /* RX_FIFO_NOT_EMPTY */
2615        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2616                DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2617                dsi_vc_flush_receive_data(dsidev, channel);
2618        }
2619
2620        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2621
2622        /* flush posted write */
2623        dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2624
2625        return 0;
2626}
2627
2628static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2629{
2630        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2631        DECLARE_COMPLETION_ONSTACK(completion);
2632        int r = 0;
2633        u32 err;
2634
2635        r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2636                        &completion, DSI_VC_IRQ_BTA);
2637        if (r)
2638                goto err0;
2639
2640        r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2641                        DSI_IRQ_ERROR_MASK);
2642        if (r)
2643                goto err1;
2644
2645        r = dsi_vc_send_bta(dsidev, channel);
2646        if (r)
2647                goto err2;
2648
2649        if (wait_for_completion_timeout(&completion,
2650                                msecs_to_jiffies(500)) == 0) {
2651                DSSERR("Failed to receive BTA\n");
2652                r = -EIO;
2653                goto err2;
2654        }
2655
2656        err = dsi_get_errors(dsidev);
2657        if (err) {
2658                DSSERR("Error while sending BTA: %x\n", err);
2659                r = -EIO;
2660                goto err2;
2661        }
2662err2:
2663        dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2664                        DSI_IRQ_ERROR_MASK);
2665err1:
2666        dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2667                        &completion, DSI_VC_IRQ_BTA);
2668err0:
2669        return r;
2670}
2671
2672static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2673                int channel, u8 data_type, u16 len, u8 ecc)
2674{
2675        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2676        u32 val;
2677        u8 data_id;
2678
2679        WARN_ON(!dsi_bus_is_locked(dsidev));
2680
2681        data_id = data_type | dsi->vc[channel].vc_id << 6;
2682
2683        val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2684                FLD_VAL(ecc, 31, 24);
2685
2686        dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2687}
2688
2689static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2690                int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2691{
2692        u32 val;
2693
2694        val = b4 << 24 | b3 << 16 | b2 << 8  | b1 << 0;
2695
2696/*      DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2697                        b1, b2, b3, b4, val); */
2698
2699        dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2700}
2701
2702static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2703                u8 data_type, u8 *data, u16 len, u8 ecc)
2704{
2705        /*u32 val; */
2706        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2707        int i;
2708        u8 *p;
2709        int r = 0;
2710        u8 b1, b2, b3, b4;
2711
2712        if (dsi->debug_write)
2713                DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2714
2715        /* len + header */
2716        if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
2717                DSSERR("unable to send long packet: packet too long.\n");
2718                return -EINVAL;
2719        }
2720
2721        dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
2722
2723        dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
2724
2725        p = data;
2726        for (i = 0; i < len >> 2; i++) {
2727                if (dsi->debug_write)
2728                        DSSDBG("\tsending full packet %d\n", i);
2729
2730                b1 = *p++;
2731                b2 = *p++;
2732                b3 = *p++;
2733                b4 = *p++;
2734
2735                dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
2736        }
2737
2738        i = len % 4;
2739        if (i) {
2740                b1 = 0; b2 = 0; b3 = 0;
2741
2742                if (dsi->debug_write)
2743                        DSSDBG("\tsending remainder bytes %d\n", i);
2744
2745                switch (i) {
2746                case 3:
2747                        b1 = *p++;
2748                        b2 = *p++;
2749                        b3 = *p++;
2750                        break;
2751                case 2:
2752                        b1 = *p++;
2753                        b2 = *p++;
2754                        break;
2755                case 1:
2756                        b1 = *p++;
2757                        break;
2758                }
2759
2760                dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
2761        }
2762
2763        return r;
2764}
2765
2766static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
2767                u8 data_type, u16 data, u8 ecc)
2768{
2769        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2770        u32 r;
2771        u8 data_id;
2772
2773        WARN_ON(!dsi_bus_is_locked(dsidev));
2774
2775        if (dsi->debug_write)
2776                DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
2777                                channel,
2778                                data_type, data & 0xff, (data >> 8) & 0xff);
2779
2780        dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
2781
2782        if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
2783                DSSERR("ERROR FIFO FULL, aborting transfer\n");
2784                return -EINVAL;
2785        }
2786
2787        data_id = data_type | dsi->vc[channel].vc_id << 6;
2788
2789        r = (data_id << 0) | (data << 8) | (ecc << 24);
2790
2791        dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
2792
2793        return 0;
2794}
2795
2796static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
2797{
2798        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2799
2800        return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
2801                0, 0);
2802}
2803
2804static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
2805                int channel, u8 *data, int len, enum dss_dsi_content_type type)
2806{
2807        int r;
2808
2809        if (len == 0) {
2810                BUG_ON(type == DSS_DSI_CONTENT_DCS);
2811                r = dsi_vc_send_short(dsidev, channel,
2812                                MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
2813        } else if (len == 1) {
2814                r = dsi_vc_send_short(dsidev, channel,
2815                                type == DSS_DSI_CONTENT_GENERIC ?
2816                                MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
2817                                MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
2818        } else if (len == 2) {
2819                r = dsi_vc_send_short(dsidev, channel,
2820                                type == DSS_DSI_CONTENT_GENERIC ?
2821                                MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
2822                                MIPI_DSI_DCS_SHORT_WRITE_PARAM,
2823                                data[0] | (data[1] << 8), 0);
2824        } else {
2825                r = dsi_vc_send_long(dsidev, channel,
2826                                type == DSS_DSI_CONTENT_GENERIC ?
2827                                MIPI_DSI_GENERIC_LONG_WRITE :
2828                                MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
2829        }
2830
2831        return r;
2832}
2833
2834static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
2835                u8 *data, int len)
2836{
2837        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2838
2839        return dsi_vc_write_nosync_common(dsidev, channel, data, len,
2840                        DSS_DSI_CONTENT_DCS);
2841}
2842
2843static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
2844                u8 *data, int len)
2845{
2846        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2847
2848        return dsi_vc_write_nosync_common(dsidev, channel, data, len,
2849                        DSS_DSI_CONTENT_GENERIC);
2850}
2851
2852static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
2853                u8 *data, int len, enum dss_dsi_content_type type)
2854{
2855        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2856        int r;
2857
2858        r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
2859        if (r)
2860                goto err;
2861
2862        r = dsi_vc_send_bta_sync(dssdev, channel);
2863        if (r)
2864                goto err;
2865
2866        /* RX_FIFO_NOT_EMPTY */
2867        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2868                DSSERR("rx fifo not empty after write, dumping data:\n");
2869                dsi_vc_flush_receive_data(dsidev, channel);
2870                r = -EIO;
2871                goto err;
2872        }
2873
2874        return 0;
2875err:
2876        DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
2877                        channel, data[0], len);
2878        return r;
2879}
2880
2881static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
2882                int len)
2883{
2884        return dsi_vc_write_common(dssdev, channel, data, len,
2885                        DSS_DSI_CONTENT_DCS);
2886}
2887
2888static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
2889                int len)
2890{
2891        return dsi_vc_write_common(dssdev, channel, data, len,
2892                        DSS_DSI_CONTENT_GENERIC);
2893}
2894
2895static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
2896                int channel, u8 dcs_cmd)
2897{
2898        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2899        int r;
2900
2901        if (dsi->debug_read)
2902                DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
2903                        channel, dcs_cmd);
2904
2905        r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
2906        if (r) {
2907                DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
2908                        " failed\n", channel, dcs_cmd);
2909                return r;
2910        }
2911
2912        return 0;
2913}
2914
2915static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
2916                int channel, u8 *reqdata, int reqlen)
2917{
2918        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2919        u16 data;
2920        u8 data_type;
2921        int r;
2922
2923        if (dsi->debug_read)
2924                DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
2925                        channel, reqlen);
2926
2927        if (reqlen == 0) {
2928                data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
2929                data = 0;
2930        } else if (reqlen == 1) {
2931                data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
2932                data = reqdata[0];
2933        } else if (reqlen == 2) {
2934                data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
2935                data = reqdata[0] | (reqdata[1] << 8);
2936        } else {
2937                BUG();
2938                return -EINVAL;
2939        }
2940
2941        r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
2942        if (r) {
2943                DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
2944                        " failed\n", channel, reqlen);
2945                return r;
2946        }
2947
2948        return 0;
2949}
2950
2951static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
2952                u8 *buf, int buflen, enum dss_dsi_content_type type)
2953{
2954        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2955        u32 val;
2956        u8 dt;
2957        int r;
2958
2959        /* RX_FIFO_NOT_EMPTY */
2960        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
2961                DSSERR("RX fifo empty when trying to read.\n");
2962                r = -EIO;
2963                goto err;
2964        }
2965
2966        val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2967        if (dsi->debug_read)
2968                DSSDBG("\theader: %08x\n", val);
2969        dt = FLD_GET(val, 5, 0);
2970        if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
2971                u16 err = FLD_GET(val, 23, 8);
2972                dsi_show_rx_ack_with_err(err);
2973                r = -EIO;
2974                goto err;
2975
2976        } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
2977                        MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
2978                        MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
2979                u8 data = FLD_GET(val, 15, 8);
2980                if (dsi->debug_read)
2981                        DSSDBG("\t%s short response, 1 byte: %02x\n",
2982                                type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
2983                                "DCS", data);
2984
2985                if (buflen < 1) {
2986                        r = -EIO;
2987                        goto err;
2988                }
2989
2990                buf[0] = data;
2991
2992                return 1;
2993        } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
2994                        MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
2995                        MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
2996                u16 data = FLD_GET(val, 23, 8);
2997                if (dsi->debug_read)
2998                        DSSDBG("\t%s short response, 2 byte: %04x\n",
2999                                type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3000                                "DCS", data);
3001
3002                if (buflen < 2) {
3003                        r = -EIO;
3004                        goto err;
3005                }
3006
3007                buf[0] = data & 0xff;
3008                buf[1] = (data >> 8) & 0xff;
3009
3010                return 2;
3011        } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3012                        MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
3013                        MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
3014                int w;
3015                int len = FLD_GET(val, 23, 8);
3016                if (dsi->debug_read)
3017                        DSSDBG("\t%s long response, len %d\n",
3018                                type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3019                                "DCS", len);
3020
3021                if (len > buflen) {
3022                        r = -EIO;
3023                        goto err;
3024                }
3025
3026                /* two byte checksum ends the packet, not included in len */
3027                for (w = 0; w < len + 2;) {
3028                        int b;
3029                        val = dsi_read_reg(dsidev,
3030                                DSI_VC_SHORT_PACKET_HEADER(channel));
3031                        if (dsi->debug_read)
3032                                DSSDBG("\t\t%02x %02x %02x %02x\n",
3033                                                (val >> 0) & 0xff,
3034                                                (val >> 8) & 0xff,
3035                                                (val >> 16) & 0xff,
3036                                                (val >> 24) & 0xff);
3037
3038                        for (b = 0; b < 4; ++b) {
3039                                if (w < len)
3040                                        buf[w] = (val >> (b * 8)) & 0xff;
3041                                /* we discard the 2 byte checksum */
3042                                ++w;
3043                        }
3044                }
3045
3046                return len;
3047        } else {
3048                DSSERR("\tunknown datatype 0x%02x\n", dt);
3049                r = -EIO;
3050                goto err;
3051        }
3052
3053err:
3054        DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3055                type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
3056
3057        return r;
3058}
3059
3060static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3061                u8 *buf, int buflen)
3062{
3063        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3064        int r;
3065
3066        r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
3067        if (r)
3068                goto err;
3069
3070        r = dsi_vc_send_bta_sync(dssdev, channel);
3071        if (r)
3072                goto err;
3073
3074        r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3075                DSS_DSI_CONTENT_DCS);
3076        if (r < 0)
3077                goto err;
3078
3079        if (r != buflen) {
3080                r = -EIO;
3081                goto err;
3082        }
3083
3084        return 0;
3085err:
3086        DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
3087        return r;
3088}
3089
3090static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
3091                u8 *reqdata, int reqlen, u8 *buf, int buflen)
3092{
3093        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3094        int r;
3095
3096        r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
3097        if (r)
3098                return r;
3099
3100        r = dsi_vc_send_bta_sync(dssdev, channel);
3101        if (r)
3102                return r;
3103
3104        r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3105                DSS_DSI_CONTENT_GENERIC);
3106        if (r < 0)
3107                return r;
3108
3109        if (r != buflen) {
3110                r = -EIO;
3111                return r;
3112        }
3113
3114        return 0;
3115}
3116
3117static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3118                u16 len)
3119{
3120        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3121
3122        return dsi_vc_send_short(dsidev, channel,
3123                        MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
3124}
3125
3126static int dsi_enter_ulps(struct platform_device *dsidev)
3127{
3128        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3129        DECLARE_COMPLETION_ONSTACK(completion);
3130        int r, i;
3131        unsigned mask;
3132
3133        DSSDBG("Entering ULPS");
3134
3135        WARN_ON(!dsi_bus_is_locked(dsidev));
3136
3137        WARN_ON(dsi->ulps_enabled);
3138
3139        if (dsi->ulps_enabled)
3140                return 0;
3141
3142        /* DDR_CLK_ALWAYS_ON */
3143        if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3144                dsi_if_enable(dsidev, 0);
3145                REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
3146                dsi_if_enable(dsidev, 1);
3147        }
3148
3149        dsi_sync_vc(dsidev, 0);
3150        dsi_sync_vc(dsidev, 1);
3151        dsi_sync_vc(dsidev, 2);
3152        dsi_sync_vc(dsidev, 3);
3153
3154        dsi_force_tx_stop_mode_io(dsidev);
3155
3156        dsi_vc_enable(dsidev, 0, false);
3157        dsi_vc_enable(dsidev, 1, false);
3158        dsi_vc_enable(dsidev, 2, false);
3159        dsi_vc_enable(dsidev, 3, false);
3160
3161        if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) {      /* HS_BUSY */
3162                DSSERR("HS busy when enabling ULPS\n");
3163                return -EIO;
3164        }
3165
3166        if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) {      /* LP_BUSY */
3167                DSSERR("LP busy when enabling ULPS\n");
3168                return -EIO;
3169        }
3170
3171        r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3172                        DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3173        if (r)
3174                return r;
3175
3176        mask = 0;
3177
3178        for (i = 0; i < dsi->num_lanes_supported; ++i) {
3179                if (dsi->lanes[i].function == DSI_LANE_UNUSED)
3180                        continue;
3181                mask |= 1 << i;
3182        }
3183        /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3184        /* LANEx_ULPS_SIG2 */
3185        REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
3186
3187        /* flush posted write and wait for SCP interface to finish the write */
3188        dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3189
3190        if (wait_for_completion_timeout(&completion,
3191                                msecs_to_jiffies(1000)) == 0) {
3192                DSSERR("ULPS enable timeout\n");
3193                r = -EIO;
3194                goto err;
3195        }
3196
3197        dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3198                        DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3199
3200        /* Reset LANEx_ULPS_SIG2 */
3201        REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
3202
3203        /* flush posted write and wait for SCP interface to finish the write */
3204        dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3205
3206        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3207
3208        dsi_if_enable(dsidev, false);
3209
3210        dsi->ulps_enabled = true;
3211
3212        return 0;
3213
3214err:
3215        dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3216                        DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3217        return r;
3218}
3219
3220static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3221                unsigned ticks, bool x4, bool x16)
3222{
3223        unsigned long fck;
3224        unsigned long total_ticks;
3225        u32 r;
3226
3227        BUG_ON(ticks > 0x1fff);
3228
3229        /* ticks in DSI_FCK */
3230        fck = dsi_fclk_rate(dsidev);
3231
3232        r = dsi_read_reg(dsidev, DSI_TIMING2);
3233        r = FLD_MOD(r, 1, 15, 15);      /* LP_RX_TO */
3234        r = FLD_MOD(r, x16 ? 1 : 0, 14, 14);    /* LP_RX_TO_X16 */
3235        r = FLD_MOD(r, x4 ? 1 : 0, 13, 13);     /* LP_RX_TO_X4 */
3236        r = FLD_MOD(r, ticks, 12, 0);   /* LP_RX_COUNTER */
3237        dsi_write_reg(dsidev, DSI_TIMING2, r);
3238
3239        total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3240
3241        DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3242                        total_ticks,
3243                        ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3244                        (total_ticks * 1000) / (fck / 1000 / 1000));
3245}
3246
3247static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3248                bool x8, bool x16)
3249{
3250        unsigned long fck;
3251        unsigned long total_ticks;
3252        u32 r;
3253
3254        BUG_ON(ticks > 0x1fff);
3255
3256        /* ticks in DSI_FCK */
3257        fck = dsi_fclk_rate(dsidev);
3258
3259        r = dsi_read_reg(dsidev, DSI_TIMING1);
3260        r = FLD_MOD(r, 1, 31, 31);      /* TA_TO */
3261        r = FLD_MOD(r, x16 ? 1 : 0, 30, 30);    /* TA_TO_X16 */
3262        r = FLD_MOD(r, x8 ? 1 : 0, 29, 29);     /* TA_TO_X8 */
3263        r = FLD_MOD(r, ticks, 28, 16);  /* TA_TO_COUNTER */
3264        dsi_write_reg(dsidev, DSI_TIMING1, r);
3265
3266        total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3267
3268        DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3269                        total_ticks,
3270                        ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3271                        (total_ticks * 1000) / (fck / 1000 / 1000));
3272}
3273
3274static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3275                unsigned ticks, bool x4, bool x16)
3276{
3277        unsigned long fck;
3278        unsigned long total_ticks;
3279        u32 r;
3280
3281        BUG_ON(ticks > 0x1fff);
3282
3283        /* ticks in DSI_FCK */
3284        fck = dsi_fclk_rate(dsidev);
3285
3286        r = dsi_read_reg(dsidev, DSI_TIMING1);
3287        r = FLD_MOD(r, 1, 15, 15);      /* FORCE_TX_STOP_MODE_IO */
3288        r = FLD_MOD(r, x16 ? 1 : 0, 14, 14);    /* STOP_STATE_X16_IO */
3289        r = FLD_MOD(r, x4 ? 1 : 0, 13, 13);     /* STOP_STATE_X4_IO */
3290        r = FLD_MOD(r, ticks, 12, 0);   /* STOP_STATE_COUNTER_IO */
3291        dsi_write_reg(dsidev, DSI_TIMING1, r);
3292
3293        total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3294
3295        DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3296                        total_ticks,
3297                        ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3298                        (total_ticks * 1000) / (fck / 1000 / 1000));
3299}
3300
3301static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3302                unsigned ticks, bool x4, bool x16)
3303{
3304        unsigned long fck;
3305        unsigned long total_ticks;
3306        u32 r;
3307
3308        BUG_ON(ticks > 0x1fff);
3309
3310        /* ticks in TxByteClkHS */
3311        fck = dsi_get_txbyteclkhs(dsidev);
3312
3313        r = dsi_read_reg(dsidev, DSI_TIMING2);
3314        r = FLD_MOD(r, 1, 31, 31);      /* HS_TX_TO */
3315        r = FLD_MOD(r, x16 ? 1 : 0, 30, 30);    /* HS_TX_TO_X16 */
3316        r = FLD_MOD(r, x4 ? 1 : 0, 29, 29);     /* HS_TX_TO_X8 (4 really) */
3317        r = FLD_MOD(r, ticks, 28, 16);  /* HS_TX_TO_COUNTER */
3318        dsi_write_reg(dsidev, DSI_TIMING2, r);
3319
3320        total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3321
3322        DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3323                        total_ticks,
3324                        ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3325                        (total_ticks * 1000) / (fck / 1000 / 1000));
3326}
3327
3328static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
3329{
3330        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3331        int num_line_buffers;
3332
3333        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3334                int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3335                struct omap_video_timings *timings = &dsi->timings;
3336                /*
3337                 * Don't use line buffers if width is greater than the video
3338                 * port's line buffer size
3339                 */
3340                if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
3341                        num_line_buffers = 0;
3342                else
3343                        num_line_buffers = 2;
3344        } else {
3345                /* Use maximum number of line buffers in command mode */
3346                num_line_buffers = 2;
3347        }
3348
3349        /* LINE_BUFFER */
3350        REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
3351}
3352
3353static void dsi_config_vp_sync_events(struct platform_device *dsidev)
3354{
3355        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3356        bool sync_end;
3357        u32 r;
3358
3359        if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
3360                sync_end = true;
3361        else
3362                sync_end = false;
3363
3364        r = dsi_read_reg(dsidev, DSI_CTRL);
3365        r = FLD_MOD(r, 1, 9, 9);                /* VP_DE_POL */
3366        r = FLD_MOD(r, 1, 10, 10);              /* VP_HSYNC_POL */
3367        r = FLD_MOD(r, 1, 11, 11);              /* VP_VSYNC_POL */
3368        r = FLD_MOD(r, 1, 15, 15);              /* VP_VSYNC_START */
3369        r = FLD_MOD(r, sync_end, 16, 16);       /* VP_VSYNC_END */
3370        r = FLD_MOD(r, 1, 17, 17);              /* VP_HSYNC_START */
3371        r = FLD_MOD(r, sync_end, 18, 18);       /* VP_HSYNC_END */
3372        dsi_write_reg(dsidev, DSI_CTRL, r);
3373}
3374
3375static void dsi_config_blanking_modes(struct platform_device *dsidev)
3376{
3377        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3378        int blanking_mode = dsi->vm_timings.blanking_mode;
3379        int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
3380        int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
3381        int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
3382        u32 r;
3383
3384        /*
3385         * 0 = TX FIFO packets sent or LPS in corresponding blanking periods
3386         * 1 = Long blanking packets are sent in corresponding blanking periods
3387         */
3388        r = dsi_read_reg(dsidev, DSI_CTRL);
3389        r = FLD_MOD(r, blanking_mode, 20, 20);          /* BLANKING_MODE */
3390        r = FLD_MOD(r, hfp_blanking_mode, 21, 21);      /* HFP_BLANKING */
3391        r = FLD_MOD(r, hbp_blanking_mode, 22, 22);      /* HBP_BLANKING */
3392        r = FLD_MOD(r, hsa_blanking_mode, 23, 23);      /* HSA_BLANKING */
3393        dsi_write_reg(dsidev, DSI_CTRL, r);
3394}
3395
3396/*
3397 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
3398 * results in maximum transition time for data and clock lanes to enter and
3399 * exit HS mode. Hence, this is the scenario where the least amount of command
3400 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
3401 * clock cycles that can be used to interleave command mode data in HS so that
3402 * all scenarios are satisfied.
3403 */
3404static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
3405                int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
3406{
3407        int transition;
3408
3409        /*
3410         * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
3411         * time of data lanes only, if it isn't set, we need to consider HS
3412         * transition time of both data and clock lanes. HS transition time
3413         * of Scenario 3 is considered.
3414         */
3415        if (ddr_alwon) {
3416                transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
3417        } else {
3418                int trans1, trans2;
3419                trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
3420                trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
3421                                enter_hs + 1;
3422                transition = max(trans1, trans2);
3423        }
3424
3425        return blank > transition ? blank - transition : 0;
3426}
3427
3428/*
3429 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
3430 * results in maximum transition time for data lanes to enter and exit LP mode.
3431 * Hence, this is the scenario where the least amount of command mode data can
3432 * be interleaved. We program the minimum amount of bytes that can be
3433 * interleaved in LP so that all scenarios are satisfied.
3434 */
3435static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
3436                int lp_clk_div, int tdsi_fclk)
3437{
3438        int trans_lp;   /* time required for a LP transition, in TXBYTECLKHS */
3439        int tlp_avail;  /* time left for interleaving commands, in CLKIN4DDR */
3440        int ttxclkesc;  /* period of LP transmit escape clock, in CLKIN4DDR */
3441        int thsbyte_clk = 16;   /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
3442        int lp_inter;   /* cmd mode data that can be interleaved, in bytes */
3443
3444        /* maximum LP transition time according to Scenario 1 */
3445        trans_lp = exit_hs + max(enter_hs, 2) + 1;
3446
3447        /* CLKIN4DDR = 16 * TXBYTECLKHS */
3448        tlp_avail = thsbyte_clk * (blank - trans_lp);
3449
3450        ttxclkesc = tdsi_fclk * lp_clk_div;
3451
3452        lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
3453                        26) / 16;
3454
3455        return max(lp_inter, 0);
3456}
3457
3458static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
3459{
3460        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3461        int blanking_mode;
3462        int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
3463        int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
3464        int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3465        int tclk_trail, ths_exit, exiths_clk;
3466        bool ddr_alwon;
3467        struct omap_video_timings *timings = &dsi->timings;
3468        int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3469        int ndl = dsi->num_lanes_used - 1;
3470        int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
3471        int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3472        int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3473        int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
3474        int bl_interleave_hs = 0, bl_interleave_lp = 0;
3475        u32 r;
3476
3477        r = dsi_read_reg(dsidev, DSI_CTRL);
3478        blanking_mode = FLD_GET(r, 20, 20);
3479        hfp_blanking_mode = FLD_GET(r, 21, 21);
3480        hbp_blanking_mode = FLD_GET(r, 22, 22);
3481        hsa_blanking_mode = FLD_GET(r, 23, 23);
3482
3483        r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3484        hbp = FLD_GET(r, 11, 0);
3485        hfp = FLD_GET(r, 23, 12);
3486        hsa = FLD_GET(r, 31, 24);
3487
3488        r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3489        ddr_clk_post = FLD_GET(r, 7, 0);
3490        ddr_clk_pre = FLD_GET(r, 15, 8);
3491
3492        r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
3493        exit_hs_mode_lat = FLD_GET(r, 15, 0);
3494        enter_hs_mode_lat = FLD_GET(r, 31, 16);
3495
3496        r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
3497        lp_clk_div = FLD_GET(r, 12, 0);
3498        ddr_alwon = FLD_GET(r, 13, 13);
3499
3500        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3501        ths_exit = FLD_GET(r, 7, 0);
3502
3503        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3504        tclk_trail = FLD_GET(r, 15, 8);
3505
3506        exiths_clk = ths_exit + tclk_trail;
3507
3508        width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3509        bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
3510
3511        if (!hsa_blanking_mode) {
3512                hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
3513                                        enter_hs_mode_lat, exit_hs_mode_lat,
3514                                        exiths_clk, ddr_clk_pre, ddr_clk_post);
3515                hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
3516                                        enter_hs_mode_lat, exit_hs_mode_lat,
3517                                        lp_clk_div, dsi_fclk_hsdiv);
3518        }
3519
3520        if (!hfp_blanking_mode) {
3521                hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
3522                                        enter_hs_mode_lat, exit_hs_mode_lat,
3523                                        exiths_clk, ddr_clk_pre, ddr_clk_post);
3524                hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
3525                                        enter_hs_mode_lat, exit_hs_mode_lat,
3526                                        lp_clk_div, dsi_fclk_hsdiv);
3527        }
3528
3529        if (!hbp_blanking_mode) {
3530                hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
3531                                        enter_hs_mode_lat, exit_hs_mode_lat,
3532                                        exiths_clk, ddr_clk_pre, ddr_clk_post);
3533
3534                hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
3535                                        enter_hs_mode_lat, exit_hs_mode_lat,
3536                                        lp_clk_div, dsi_fclk_hsdiv);
3537        }
3538
3539        if (!blanking_mode) {
3540                bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
3541                                        enter_hs_mode_lat, exit_hs_mode_lat,
3542                                        exiths_clk, ddr_clk_pre, ddr_clk_post);
3543
3544                bl_interleave_lp = dsi_compute_interleave_lp(bllp,
3545                                        enter_hs_mode_lat, exit_hs_mode_lat,
3546                                        lp_clk_div, dsi_fclk_hsdiv);
3547        }
3548
3549        DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3550                hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
3551                bl_interleave_hs);
3552
3553        DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3554                hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
3555                bl_interleave_lp);
3556
3557        r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
3558        r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
3559        r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
3560        r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
3561        dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
3562
3563        r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
3564        r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
3565        r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
3566        r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
3567        dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
3568
3569        r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
3570        r = FLD_MOD(r, bl_interleave_hs, 31, 15);
3571        r = FLD_MOD(r, bl_interleave_lp, 16, 0);
3572        dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
3573}
3574
3575static int dsi_proto_config(struct platform_device *dsidev)
3576{
3577        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3578        u32 r;
3579        int buswidth = 0;
3580
3581        dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3582                        DSI_FIFO_SIZE_32,
3583                        DSI_FIFO_SIZE_32,
3584                        DSI_FIFO_SIZE_32);
3585
3586        dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3587                        DSI_FIFO_SIZE_32,
3588                        DSI_FIFO_SIZE_32,
3589                        DSI_FIFO_SIZE_32);
3590
3591        /* XXX what values for the timeouts? */
3592        dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
3593        dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
3594        dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3595        dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3596
3597        switch (dsi_get_pixel_size(dsi->pix_fmt)) {
3598        case 16:
3599                buswidth = 0;
3600                break;
3601        case 18:
3602                buswidth = 1;
3603                break;
3604        case 24:
3605                buswidth = 2;
3606                break;
3607        default:
3608                BUG();
3609                return -EINVAL;
3610        }
3611
3612        r = dsi_read_reg(dsidev, DSI_CTRL);
3613        r = FLD_MOD(r, 1, 1, 1);        /* CS_RX_EN */
3614        r = FLD_MOD(r, 1, 2, 2);        /* ECC_RX_EN */
3615        r = FLD_MOD(r, 1, 3, 3);        /* TX_FIFO_ARBITRATION */
3616        r = FLD_MOD(r, 1, 4, 4);        /* VP_CLK_RATIO, always 1, see errata*/
3617        r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
3618        r = FLD_MOD(r, 0, 8, 8);        /* VP_CLK_POL */
3619        r = FLD_MOD(r, 1, 14, 14);      /* TRIGGER_RESET_MODE */
3620        r = FLD_MOD(r, 1, 19, 19);      /* EOT_ENABLE */
3621        if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
3622                r = FLD_MOD(r, 1, 24, 24);      /* DCS_CMD_ENABLE */
3623                /* DCS_CMD_CODE, 1=start, 0=continue */
3624                r = FLD_MOD(r, 0, 25, 25);
3625        }
3626
3627        dsi_write_reg(dsidev, DSI_CTRL, r);
3628
3629        dsi_config_vp_num_line_buffers(dsidev);
3630
3631        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3632                dsi_config_vp_sync_events(dsidev);
3633                dsi_config_blanking_modes(dsidev);
3634                dsi_config_cmd_mode_interleaving(dsidev);
3635        }
3636
3637        dsi_vc_initial_config(dsidev, 0);
3638        dsi_vc_initial_config(dsidev, 1);
3639        dsi_vc_initial_config(dsidev, 2);
3640        dsi_vc_initial_config(dsidev, 3);
3641
3642        return 0;
3643}
3644
3645static void dsi_proto_timings(struct platform_device *dsidev)
3646{
3647        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3648        unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3649        unsigned tclk_pre, tclk_post;
3650        unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
3651        unsigned ths_trail, ths_exit;
3652        unsigned ddr_clk_pre, ddr_clk_post;
3653        unsigned enter_hs_mode_lat, exit_hs_mode_lat;
3654        unsigned ths_eot;
3655        int ndl = dsi->num_lanes_used - 1;
3656        u32 r;
3657
3658        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3659        ths_prepare = FLD_GET(r, 31, 24);
3660        ths_prepare_ths_zero = FLD_GET(r, 23, 16);
3661        ths_zero = ths_prepare_ths_zero - ths_prepare;
3662        ths_trail = FLD_GET(r, 15, 8);
3663        ths_exit = FLD_GET(r, 7, 0);
3664
3665        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3666        tlpx = FLD_GET(r, 20, 16) * 2;
3667        tclk_trail = FLD_GET(r, 15, 8);
3668        tclk_zero = FLD_GET(r, 7, 0);
3669
3670        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
3671        tclk_prepare = FLD_GET(r, 7, 0);
3672
3673        /* min 8*UI */
3674        tclk_pre = 20;
3675        /* min 60ns + 52*UI */
3676        tclk_post = ns2ddr(dsidev, 60) + 26;
3677
3678        ths_eot = DIV_ROUND_UP(4, ndl);
3679
3680        ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
3681                        4);
3682        ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
3683
3684        BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
3685        BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
3686
3687        r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3688        r = FLD_MOD(r, ddr_clk_pre, 15, 8);
3689        r = FLD_MOD(r, ddr_clk_post, 7, 0);
3690        dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
3691
3692        DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
3693                        ddr_clk_pre,
3694                        ddr_clk_post);
3695
3696        enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
3697                DIV_ROUND_UP(ths_prepare, 4) +
3698                DIV_ROUND_UP(ths_zero + 3, 4);
3699
3700        exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
3701
3702        r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
3703                FLD_VAL(exit_hs_mode_lat, 15, 0);
3704        dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
3705
3706        DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3707                        enter_hs_mode_lat, exit_hs_mode_lat);
3708
3709         if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3710                /* TODO: Implement a video mode check_timings function */
3711                int hsa = dsi->vm_timings.hsa;
3712                int hfp = dsi->vm_timings.hfp;
3713                int hbp = dsi->vm_timings.hbp;
3714                int vsa = dsi->vm_timings.vsa;
3715                int vfp = dsi->vm_timings.vfp;
3716                int vbp = dsi->vm_timings.vbp;
3717                int window_sync = dsi->vm_timings.window_sync;
3718                bool hsync_end;
3719                struct omap_video_timings *timings = &dsi->timings;
3720                int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3721                int tl, t_he, width_bytes;
3722
3723                hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
3724                t_he = hsync_end ?
3725                        ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
3726
3727                width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3728
3729                /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
3730                tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
3731                        DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;
3732
3733                DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
3734                        hfp, hsync_end ? hsa : 0, tl);
3735                DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
3736                        vsa, timings->y_res);
3737
3738                r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3739                r = FLD_MOD(r, hbp, 11, 0);     /* HBP */
3740                r = FLD_MOD(r, hfp, 23, 12);    /* HFP */
3741                r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24);    /* HSA */
3742                dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
3743
3744                r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
3745                r = FLD_MOD(r, vbp, 7, 0);      /* VBP */
3746                r = FLD_MOD(r, vfp, 15, 8);     /* VFP */
3747                r = FLD_MOD(r, vsa, 23, 16);    /* VSA */
3748                r = FLD_MOD(r, window_sync, 27, 24);    /* WINDOW_SYNC */
3749                dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
3750
3751                r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
3752                r = FLD_MOD(r, timings->y_res, 14, 0);  /* VACT */
3753                r = FLD_MOD(r, tl, 31, 16);             /* TL */
3754                dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
3755        }
3756}
3757
3758static int dsi_configure_pins(struct omap_dss_device *dssdev,
3759                const struct omap_dsi_pin_config *pin_cfg)
3760{
3761        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3762        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3763        int num_pins;
3764        const int *pins;
3765        struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
3766        int num_lanes;
3767        int i;
3768
3769        static const enum dsi_lane_function functions[] = {
3770                DSI_LANE_CLK,
3771                DSI_LANE_DATA1,
3772                DSI_LANE_DATA2,
3773                DSI_LANE_DATA3,
3774                DSI_LANE_DATA4,
3775        };
3776
3777        num_pins = pin_cfg->num_pins;
3778        pins = pin_cfg->pins;
3779
3780        if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
3781                        || num_pins % 2 != 0)
3782                return -EINVAL;
3783
3784        for (i = 0; i < DSI_MAX_NR_LANES; ++i)
3785                lanes[i].function = DSI_LANE_UNUSED;
3786
3787        num_lanes = 0;
3788
3789        for (i = 0; i < num_pins; i += 2) {
3790                u8 lane, pol;
3791                int dx, dy;
3792
3793                dx = pins[i];
3794                dy = pins[i + 1];
3795
3796                if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
3797                        return -EINVAL;
3798
3799                if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
3800                        return -EINVAL;
3801
3802                if (dx & 1) {
3803                        if (dy != dx - 1)
3804                                return -EINVAL;
3805                        pol = 1;
3806                } else {
3807                        if (dy != dx + 1)
3808                                return -EINVAL;
3809                        pol = 0;
3810                }
3811
3812                lane = dx / 2;
3813
3814                lanes[lane].function = functions[i / 2];
3815                lanes[lane].polarity = pol;
3816                num_lanes++;
3817        }
3818
3819        memcpy(dsi->lanes, lanes, sizeof(dsi->lanes));
3820        dsi->num_lanes_used = num_lanes;
3821
3822        return 0;
3823}
3824
3825static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
3826{
3827        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3828        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3829        enum omap_channel dispc_channel = dssdev->dispc_channel;
3830        int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3831        struct omap_dss_device *out = &dsi->output;
3832        u8 data_type;
3833        u16 word_count;
3834        int r;
3835
3836        if (!out->dispc_channel_connected) {
3837                DSSERR("failed to enable display: no output/manager\n");
3838                return -ENODEV;
3839        }
3840
3841        r = dsi_display_init_dispc(dsidev, dispc_channel);
3842        if (r)
3843                goto err_init_dispc;
3844
3845        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3846                switch (dsi->pix_fmt) {
3847                case OMAP_DSS_DSI_FMT_RGB888:
3848                        data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
3849                        break;
3850                case OMAP_DSS_DSI_FMT_RGB666:
3851                        data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
3852                        break;
3853                case OMAP_DSS_DSI_FMT_RGB666_PACKED:
3854                        data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
3855                        break;
3856                case OMAP_DSS_DSI_FMT_RGB565:
3857                        data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
3858                        break;
3859                default:
3860                        r = -EINVAL;
3861                        goto err_pix_fmt;
3862                }
3863
3864                dsi_if_enable(dsidev, false);
3865                dsi_vc_enable(dsidev, channel, false);
3866
3867                /* MODE, 1 = video mode */
3868                REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
3869
3870                word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
3871
3872                dsi_vc_write_long_header(dsidev, channel, data_type,
3873                                word_count, 0);
3874
3875                dsi_vc_enable(dsidev, channel, true);
3876                dsi_if_enable(dsidev, true);
3877        }
3878
3879        r = dss_mgr_enable(dispc_channel);
3880        if (r)
3881                goto err_mgr_enable;
3882
3883        return 0;
3884
3885err_mgr_enable:
3886        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3887                dsi_if_enable(dsidev, false);
3888                dsi_vc_enable(dsidev, channel, false);
3889        }
3890err_pix_fmt:
3891        dsi_display_uninit_dispc(dsidev, dispc_channel);
3892err_init_dispc:
3893        return r;
3894}
3895
3896static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
3897{
3898        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3899        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3900        enum omap_channel dispc_channel = dssdev->dispc_channel;
3901
3902        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3903                dsi_if_enable(dsidev, false);
3904                dsi_vc_enable(dsidev, channel, false);
3905
3906                /* MODE, 0 = command mode */
3907                REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
3908
3909                dsi_vc_enable(dsidev, channel, true);
3910                dsi_if_enable(dsidev, true);
3911        }
3912
3913        dss_mgr_disable(dispc_channel);
3914
3915        dsi_display_uninit_dispc(dsidev, dispc_channel);
3916}
3917
3918static void dsi_update_screen_dispc(struct platform_device *dsidev)
3919{
3920        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3921        enum omap_channel dispc_channel = dsi->output.dispc_channel;
3922        unsigned bytespp;
3923        unsigned bytespl;
3924        unsigned bytespf;
3925        unsigned total_len;
3926        unsigned packet_payload;
3927        unsigned packet_len;
3928        u32 l;
3929        int r;
3930        const unsigned channel = dsi->update_channel;
3931        const unsigned line_buf_size = dsi->line_buffer_size;
3932        u16 w = dsi->timings.x_res;
3933        u16 h = dsi->timings.y_res;
3934
3935        DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
3936
3937        dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
3938
3939        bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
3940        bytespl = w * bytespp;
3941        bytespf = bytespl * h;
3942
3943        /* NOTE: packet_payload has to be equal to N * bytespl, where N is
3944         * number of lines in a packet.  See errata about VP_CLK_RATIO */
3945
3946        if (bytespf < line_buf_size)
3947                packet_payload = bytespf;
3948        else
3949                packet_payload = (line_buf_size) / bytespl * bytespl;
3950
3951        packet_len = packet_payload + 1;        /* 1 byte for DCS cmd */
3952        total_len = (bytespf / packet_payload) * packet_len;
3953
3954        if (bytespf % packet_payload)
3955                total_len += (bytespf % packet_payload) + 1;
3956
3957        l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3958        dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3959
3960        dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
3961                packet_len, 0);
3962
3963        if (dsi->te_enabled)
3964                l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3965        else
3966                l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3967        dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3968
3969        /* We put SIDLEMODE to no-idle for the duration of the transfer,
3970         * because DSS interrupts are not capable of waking up the CPU and the
3971         * framedone interrupt could be delayed for quite a long time. I think
3972         * the same goes for any DSS interrupts, but for some reason I have not
3973         * seen the problem anywhere else than here.
3974         */
3975        dispc_disable_sidle();
3976
3977        dsi_perf_mark_start(dsidev);
3978
3979        r = schedule_delayed_work(&dsi->framedone_timeout_work,
3980                msecs_to_jiffies(250));
3981        BUG_ON(r == 0);
3982
3983        dss_mgr_set_timings(dispc_channel, &dsi->timings);
3984
3985        dss_mgr_start_update(dispc_channel);
3986
3987        if (dsi->te_enabled) {
3988                /* disable LP_RX_TO, so that we can receive TE.  Time to wait
3989                 * for TE is longer than the timer allows */
3990                REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3991
3992                dsi_vc_send_bta(dsidev, channel);
3993
3994#ifdef DSI_CATCH_MISSING_TE
3995                mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3996#endif
3997        }
3998}
3999
4000#ifdef DSI_CATCH_MISSING_TE
4001static void dsi_te_timeout(unsigned long arg)
4002{
4003        DSSERR("TE not received for 250ms!\n");
4004}
4005#endif
4006
4007static void dsi_handle_framedone(struct platform_device *dsidev, int error)
4008{
4009        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4010
4011        /* SIDLEMODE back to smart-idle */
4012        dispc_enable_sidle();
4013
4014        if (dsi->te_enabled) {
4015                /* enable LP_RX_TO again after the TE */
4016                REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
4017        }
4018
4019        dsi->framedone_callback(error, dsi->framedone_data);
4020
4021        if (!error)
4022                dsi_perf_show(dsidev, "DISPC");
4023}
4024
4025static void dsi_framedone_timeout_work_callback(struct work_struct *work)
4026{
4027        struct dsi_data *dsi = container_of(work, struct dsi_data,
4028                        framedone_timeout_work.work);
4029        /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
4030         * 250ms which would conflict with this timeout work. What should be
4031         * done is first cancel the transfer on the HW, and then cancel the
4032         * possibly scheduled framedone work. However, cancelling the transfer
4033         * on the HW is buggy, and would probably require resetting the whole
4034         * DSI */
4035
4036        DSSERR("Framedone not received for 250ms!\n");
4037
4038        dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
4039}
4040
4041static void dsi_framedone_irq_callback(void *data)
4042{
4043        struct platform_device *dsidev = (struct platform_device *) data;
4044        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4045
4046        /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
4047         * turns itself off. However, DSI still has the pixels in its buffers,
4048         * and is sending the data.
4049         */
4050
4051        cancel_delayed_work(&dsi->framedone_timeout_work);
4052
4053        dsi_handle_framedone(dsidev, 0);
4054}
4055
4056static int dsi_update(struct omap_dss_device *dssdev, int channel,
4057                void (*callback)(int, void *), void *data)
4058{
4059        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4060        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4061        u16 dw, dh;
4062
4063        dsi_perf_mark_setup(dsidev);
4064
4065        dsi->update_channel = channel;
4066
4067        dsi->framedone_callback = callback;
4068        dsi->framedone_data = data;
4069
4070        dw = dsi->timings.x_res;
4071        dh = dsi->timings.y_res;
4072
4073#ifdef DSI_PERF_MEASURE
4074        dsi->update_bytes = dw * dh *
4075                dsi_get_pixel_size(dsi->pix_fmt) / 8;
4076#endif
4077        dsi_update_screen_dispc(dsidev);
4078
4079        return 0;
4080}
4081
4082/* Display funcs */
4083
4084static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
4085{
4086        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4087        struct dispc_clock_info dispc_cinfo;
4088        int r;
4089        unsigned long fck;
4090
4091        fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4092
4093        dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
4094        dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
4095
4096        r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4097        if (r) {
4098                DSSERR("Failed to calc dispc clocks\n");
4099                return r;
4100        }
4101
4102        dsi->mgr_config.clock_info = dispc_cinfo;
4103
4104        return 0;
4105}
4106
4107static int dsi_display_init_dispc(struct platform_device *dsidev,
4108                enum omap_channel channel)
4109{
4110        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4111        int r;
4112
4113        dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
4114                        OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
4115                        OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
4116
4117        if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4118                r = dss_mgr_register_framedone_handler(channel,
4119                                dsi_framedone_irq_callback, dsidev);
4120                if (r) {
4121                        DSSERR("can't register FRAMEDONE handler\n");
4122                        goto err;
4123                }
4124
4125                dsi->mgr_config.stallmode = true;
4126                dsi->mgr_config.fifohandcheck = true;
4127        } else {
4128                dsi->mgr_config.stallmode = false;
4129                dsi->mgr_config.fifohandcheck = false;
4130        }
4131
4132        /*
4133         * override interlace, logic level and edge related parameters in
4134         * omap_video_timings with default values
4135         */
4136        dsi->timings.interlace = false;
4137        dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4138        dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4139        dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
4140        dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
4141        dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
4142
4143        dss_mgr_set_timings(channel, &dsi->timings);
4144
4145        r = dsi_configure_dispc_clocks(dsidev);
4146        if (r)
4147                goto err1;
4148
4149        dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
4150        dsi->mgr_config.video_port_width =
4151                        dsi_get_pixel_size(dsi->pix_fmt);
4152        dsi->mgr_config.lcden_sig_polarity = 0;
4153
4154        dss_mgr_set_lcd_config(channel, &dsi->mgr_config);
4155
4156        return 0;
4157err1:
4158        if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4159                dss_mgr_unregister_framedone_handler(channel,
4160                                dsi_framedone_irq_callback, dsidev);
4161err:
4162        dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
4163        return r;
4164}
4165
4166static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4167                enum omap_channel channel)
4168{
4169        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4170
4171        if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4172                dss_mgr_unregister_framedone_handler(channel,
4173                                dsi_framedone_irq_callback, dsidev);
4174
4175        dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
4176}
4177
4178static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
4179{
4180        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4181        struct dss_pll_clock_info cinfo;
4182        int r;
4183
4184        cinfo = dsi->user_dsi_cinfo;
4185
4186        r = dss_pll_set_config(&dsi->pll, &cinfo);
4187        if (r) {
4188                DSSERR("Failed to set dsi clocks\n");
4189                return r;
4190        }
4191
4192        return 0;
4193}
4194
4195static int dsi_display_init_dsi(struct platform_device *dsidev)
4196{
4197        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4198        int r;
4199
4200        r = dss_pll_enable(&dsi->pll);
4201        if (r)
4202                goto err0;
4203
4204        r = dsi_configure_dsi_clocks(dsidev);
4205        if (r)
4206                goto err1;
4207
4208        dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
4209                        OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
4210                        OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
4211
4212        DSSDBG("PLL OK\n");
4213
4214        r = dsi_cio_init(dsidev);
4215        if (r)
4216                goto err2;
4217
4218        _dsi_print_reset_status(dsidev);
4219
4220        dsi_proto_timings(dsidev);
4221        dsi_set_lp_clk_divisor(dsidev);
4222
4223        if (1)
4224                _dsi_print_reset_status(dsidev);
4225
4226        r = dsi_proto_config(dsidev);
4227        if (r)
4228                goto err3;
4229
4230        /* enable interface */
4231        dsi_vc_enable(dsidev, 0, 1);
4232        dsi_vc_enable(dsidev, 1, 1);
4233        dsi_vc_enable(dsidev, 2, 1);
4234        dsi_vc_enable(dsidev, 3, 1);
4235        dsi_if_enable(dsidev, 1);
4236        dsi_force_tx_stop_mode_io(dsidev);
4237
4238        return 0;
4239err3:
4240        dsi_cio_uninit(dsidev);
4241err2:
4242        dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4243err1:
4244        dss_pll_disable(&dsi->pll);
4245err0:
4246        return r;
4247}
4248
4249static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4250                bool disconnect_lanes, bool enter_ulps)
4251{
4252        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4253
4254        if (enter_ulps && !dsi->ulps_enabled)
4255                dsi_enter_ulps(dsidev);
4256
4257        /* disable interface */
4258        dsi_if_enable(dsidev, 0);
4259        dsi_vc_enable(dsidev, 0, 0);
4260        dsi_vc_enable(dsidev, 1, 0);
4261        dsi_vc_enable(dsidev, 2, 0);
4262        dsi_vc_enable(dsidev, 3, 0);
4263
4264        dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4265        dsi_cio_uninit(dsidev);
4266        dsi_pll_uninit(dsidev, disconnect_lanes);
4267}
4268
4269static int dsi_display_enable(struct omap_dss_device *dssdev)
4270{
4271        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4272        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4273        int r = 0;
4274
4275        DSSDBG("dsi_display_enable\n");
4276
4277        WARN_ON(!dsi_bus_is_locked(dsidev));
4278
4279        mutex_lock(&dsi->lock);
4280
4281        r = dsi_runtime_get(dsidev);
4282        if (r)
4283                goto err_get_dsi;
4284
4285        _dsi_initialize_irq(dsidev);
4286
4287        r = dsi_display_init_dsi(dsidev);
4288        if (r)
4289                goto err_init_dsi;
4290
4291        mutex_unlock(&dsi->lock);
4292
4293        return 0;
4294
4295err_init_dsi:
4296        dsi_runtime_put(dsidev);
4297err_get_dsi:
4298        mutex_unlock(&dsi->lock);
4299        DSSDBG("dsi_display_enable FAILED\n");
4300        return r;
4301}
4302
4303static void dsi_display_disable(struct omap_dss_device *dssdev,
4304                bool disconnect_lanes, bool enter_ulps)
4305{
4306        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4307        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4308
4309        DSSDBG("dsi_display_disable\n");
4310
4311        WARN_ON(!dsi_bus_is_locked(dsidev));
4312
4313        mutex_lock(&dsi->lock);
4314
4315        dsi_sync_vc(dsidev, 0);
4316        dsi_sync_vc(dsidev, 1);
4317        dsi_sync_vc(dsidev, 2);
4318        dsi_sync_vc(dsidev, 3);
4319
4320        dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
4321
4322        dsi_runtime_put(dsidev);
4323
4324        mutex_unlock(&dsi->lock);
4325}
4326
4327static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4328{
4329        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4330        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4331
4332        dsi->te_enabled = enable;
4333        return 0;
4334}
4335
4336#ifdef PRINT_VERBOSE_VM_TIMINGS
4337static void print_dsi_vm(const char *str,
4338                const struct omap_dss_dsi_videomode_timings *t)
4339{
4340        unsigned long byteclk = t->hsclk / 4;
4341        int bl, wc, pps, tot;
4342
4343        wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
4344        pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
4345        bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
4346        tot = bl + pps;
4347
4348#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
4349
4350        pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
4351                        "%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
4352                        str,
4353                        byteclk,
4354                        t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
4355                        bl, pps, tot,
4356                        TO_DSI_T(t->hss),
4357                        TO_DSI_T(t->hsa),
4358                        TO_DSI_T(t->hse),
4359                        TO_DSI_T(t->hbp),
4360                        TO_DSI_T(pps),
4361                        TO_DSI_T(t->hfp),
4362
4363                        TO_DSI_T(bl),
4364                        TO_DSI_T(pps),
4365
4366                        TO_DSI_T(tot));
4367#undef TO_DSI_T
4368}
4369
4370static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
4371{
4372        unsigned long pck = t->pixelclock;
4373        int hact, bl, tot;
4374
4375        hact = t->x_res;
4376        bl = t->hsw + t->hbp + t->hfp;
4377        tot = hact + bl;
4378
4379#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
4380
4381        pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
4382                        "%u/%u/%u/%u = %u + %u = %u\n",
4383                        str,
4384                        pck,
4385                        t->hsw, t->hbp, hact, t->hfp,
4386                        bl, hact, tot,
4387                        TO_DISPC_T(t->hsw),
4388                        TO_DISPC_T(t->hbp),
4389                        TO_DISPC_T(hact),
4390                        TO_DISPC_T(t->hfp),
4391                        TO_DISPC_T(bl),
4392                        TO_DISPC_T(hact),
4393                        TO_DISPC_T(tot));
4394#undef TO_DISPC_T
4395}
4396
4397/* note: this is not quite accurate */
4398static void print_dsi_dispc_vm(const char *str,
4399                const struct omap_dss_dsi_videomode_timings *t)
4400{
4401        struct omap_video_timings vm = { 0 };
4402        unsigned long byteclk = t->hsclk / 4;
4403        unsigned long pck;
4404        u64 dsi_tput;
4405        int dsi_hact, dsi_htot;
4406
4407        dsi_tput = (u64)byteclk * t->ndl * 8;
4408        pck = (u32)div64_u64(dsi_tput, t->bitspp);
4409        dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
4410        dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
4411
4412        vm.pixelclock = pck;
4413        vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
4414        vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
4415        vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
4416        vm.x_res = t->hact;
4417
4418        print_dispc_vm(str, &vm);
4419}
4420#endif /* PRINT_VERBOSE_VM_TIMINGS */
4421
4422static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
4423                unsigned long pck, void *data)
4424{
4425        struct dsi_clk_calc_ctx *ctx = data;
4426        struct omap_video_timings *t = &ctx->dispc_vm;
4427
4428        ctx->dispc_cinfo.lck_div = lckd;
4429        ctx->dispc_cinfo.pck_div = pckd;
4430        ctx->dispc_cinfo.lck = lck;
4431        ctx->dispc_cinfo.pck = pck;
4432
4433        *t = *ctx->config->timings;
4434        t->pixelclock = pck;
4435        t->x_res = ctx->config->timings->x_res;
4436        t->y_res = ctx->config->timings->y_res;
4437        t->hsw = t->hfp = t->hbp = t->vsw = 1;
4438        t->vfp = t->vbp = 0;
4439
4440        return true;
4441}
4442
4443static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
4444                void *data)
4445{
4446        struct dsi_clk_calc_ctx *ctx = data;
4447
4448        ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
4449        ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
4450
4451        return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
4452                        dsi_cm_calc_dispc_cb, ctx);
4453}
4454
4455static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
4456                unsigned long clkdco, void *data)
4457{
4458        struct dsi_clk_calc_ctx *ctx = data;
4459
4460        ctx->dsi_cinfo.n = n;
4461        ctx->dsi_cinfo.m = m;
4462        ctx->dsi_cinfo.fint = fint;
4463        ctx->dsi_cinfo.clkdco = clkdco;
4464
4465        return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
4466                        dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4467                        dsi_cm_calc_hsdiv_cb, ctx);
4468}
4469
4470static bool dsi_cm_calc(struct dsi_data *dsi,
4471                const struct omap_dss_dsi_config *cfg,
4472                struct dsi_clk_calc_ctx *ctx)
4473{
4474        unsigned long clkin;
4475        int bitspp, ndl;
4476        unsigned long pll_min, pll_max;
4477        unsigned long pck, txbyteclk;
4478
4479        clkin = clk_get_rate(dsi->pll.clkin);
4480        bitspp = dsi_get_pixel_size(cfg->pixel_format);
4481        ndl = dsi->num_lanes_used - 1;
4482
4483        /*
4484         * Here we should calculate minimum txbyteclk to be able to send the
4485         * frame in time, and also to handle TE. That's not very simple, though,
4486         * especially as we go to LP between each pixel packet due to HW
4487         * "feature". So let's just estimate very roughly and multiply by 1.5.
4488         */
4489        pck = cfg->timings->pixelclock;
4490        pck = pck * 3 / 2;
4491        txbyteclk = pck * bitspp / 8 / ndl;
4492
4493        memset(ctx, 0, sizeof(*ctx));
4494        ctx->dsidev = dsi->pdev;
4495        ctx->pll = &dsi->pll;
4496        ctx->config = cfg;
4497        ctx->req_pck_min = pck;
4498        ctx->req_pck_nom = pck;
4499        ctx->req_pck_max = pck * 3 / 2;
4500
4501        pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
4502        pll_max = cfg->hs_clk_max * 4;
4503
4504        return dss_pll_calc(ctx->pll, clkin,
4505                        pll_min, pll_max,
4506                        dsi_cm_calc_pll_cb, ctx);
4507}
4508
4509static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
4510{
4511        struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
4512        const struct omap_dss_dsi_config *cfg = ctx->config;
4513        int bitspp = dsi_get_pixel_size(cfg->pixel_format);
4514        int ndl = dsi->num_lanes_used - 1;
4515        unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
4516        unsigned long byteclk = hsclk / 4;
4517
4518        unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
4519        int xres;
4520        int panel_htot, panel_hbl; /* pixels */
4521        int dispc_htot, dispc_hbl; /* pixels */
4522        int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
4523        int hfp, hsa, hbp;
4524        const struct omap_video_timings *req_vm;
4525        struct omap_video_timings *dispc_vm;
4526        struct omap_dss_dsi_videomode_timings *dsi_vm;
4527        u64 dsi_tput, dispc_tput;
4528
4529        dsi_tput = (u64)byteclk * ndl * 8;
4530
4531        req_vm = cfg->timings;
4532        req_pck_min = ctx->req_pck_min;
4533        req_pck_max = ctx->req_pck_max;
4534        req_pck_nom = ctx->req_pck_nom;
4535
4536        dispc_pck = ctx->dispc_cinfo.pck;
4537        dispc_tput = (u64)dispc_pck * bitspp;
4538
4539        xres = req_vm->x_res;
4540
4541        panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
4542        panel_htot = xres + panel_hbl;
4543
4544        dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
4545
4546        /*
4547         * When there are no line buffers, DISPC and DSI must have the
4548         * same tput. Otherwise DISPC tput needs to be higher than DSI's.
4549         */
4550        if (dsi->line_buffer_size < xres * bitspp / 8) {
4551                if (dispc_tput != dsi_tput)
4552                        return false;
4553        } else {
4554                if (dispc_tput < dsi_tput)
4555                        return false;
4556        }
4557
4558        /* DSI tput must be over the min requirement */
4559        if (dsi_tput < (u64)bitspp * req_pck_min)
4560                return false;
4561
4562        /* When non-burst mode, DSI tput must be below max requirement. */
4563        if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
4564                if (dsi_tput > (u64)bitspp * req_pck_max)
4565                        return false;
4566        }
4567
4568        hss = DIV_ROUND_UP(4, ndl);
4569
4570        if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
4571                if (ndl == 3 && req_vm->hsw == 0)
4572                        hse = 1;
4573                else
4574                        hse = DIV_ROUND_UP(4, ndl);
4575        } else {
4576                hse = 0;
4577        }
4578
4579        /* DSI htot to match the panel's nominal pck */
4580        dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);
4581
4582        /* fail if there would be no time for blanking */
4583        if (dsi_htot < hss + hse + dsi_hact)
4584                return false;
4585
4586        /* total DSI blanking needed to achieve panel's TL */
4587        dsi_hbl = dsi_htot - dsi_hact;
4588
4589        /* DISPC htot to match the DSI TL */
4590        dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);
4591
4592        /* verify that the DSI and DISPC TLs are the same */
4593        if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
4594                return false;
4595
4596        dispc_hbl = dispc_htot - xres;
4597
4598        /* setup DSI videomode */
4599
4600        dsi_vm = &ctx->dsi_vm;
4601        memset(dsi_vm, 0, sizeof(*dsi_vm));
4602
4603        dsi_vm->hsclk = hsclk;
4604
4605        dsi_vm->ndl = ndl;
4606        dsi_vm->bitspp = bitspp;
4607
4608        if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
4609                hsa = 0;
4610        } else if (ndl == 3 && req_vm->hsw == 0) {
4611                hsa = 0;
4612        } else {
4613                hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
4614                hsa = max(hsa - hse, 1);
4615        }
4616
4617        hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
4618        hbp = max(hbp, 1);
4619
4620        hfp = dsi_hbl - (hss + hsa + hse + hbp);
4621        if (hfp < 1) {
4622                int t;
4623                /* we need to take cycles from hbp */
4624
4625                t = 1 - hfp;
4626                hbp = max(hbp - t, 1);
4627                hfp = dsi_hbl - (hss + hsa + hse + hbp);
4628
4629                if (hfp < 1 && hsa > 0) {
4630                        /* we need to take cycles from hsa */
4631                        t = 1 - hfp;
4632                        hsa = max(hsa - t, 1);
4633                        hfp = dsi_hbl - (hss + hsa + hse + hbp);
4634                }
4635        }
4636
4637        if (hfp < 1)
4638                return false;
4639
4640        dsi_vm->hss = hss;
4641        dsi_vm->hsa = hsa;
4642        dsi_vm->hse = hse;
4643        dsi_vm->hbp = hbp;
4644        dsi_vm->hact = xres;
4645        dsi_vm->hfp = hfp;
4646
4647        dsi_vm->vsa = req_vm->vsw;
4648        dsi_vm->vbp = req_vm->vbp;
4649        dsi_vm->vact = req_vm->y_res;
4650        dsi_vm->vfp = req_vm->vfp;
4651
4652        dsi_vm->trans_mode = cfg->trans_mode;
4653
4654        dsi_vm->blanking_mode = 0;
4655        dsi_vm->hsa_blanking_mode = 1;
4656        dsi_vm->hfp_blanking_mode = 1;
4657        dsi_vm->hbp_blanking_mode = 1;
4658
4659        dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
4660        dsi_vm->window_sync = 4;
4661
4662        /* setup DISPC videomode */
4663
4664        dispc_vm = &ctx->dispc_vm;
4665        *dispc_vm = *req_vm;
4666        dispc_vm->pixelclock = dispc_pck;
4667
4668        if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
4669                hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
4670                                req_pck_nom);
4671                hsa = max(hsa, 1);
4672        } else {
4673                hsa = 1;
4674        }
4675
4676        hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
4677        hbp = max(hbp, 1);
4678
4679        hfp = dispc_hbl - hsa - hbp;
4680        if (hfp < 1) {
4681                int t;
4682                /* we need to take cycles from hbp */
4683
4684                t = 1 - hfp;
4685                hbp = max(hbp - t, 1);
4686                hfp = dispc_hbl - hsa - hbp;
4687
4688                if (hfp < 1) {
4689                        /* we need to take cycles from hsa */
4690                        t = 1 - hfp;
4691                        hsa = max(hsa - t, 1);
4692                        hfp = dispc_hbl - hsa - hbp;
4693                }
4694        }
4695
4696        if (hfp < 1)
4697                return false;
4698
4699        dispc_vm->hfp = hfp;
4700        dispc_vm->hsw = hsa;
4701        dispc_vm->hbp = hbp;
4702
4703        return true;
4704}
4705
4706
4707static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
4708                unsigned long pck, void *data)
4709{
4710        struct dsi_clk_calc_ctx *ctx = data;
4711
4712        ctx->dispc_cinfo.lck_div = lckd;
4713        ctx->dispc_cinfo.pck_div = pckd;
4714        ctx->dispc_cinfo.lck = lck;
4715        ctx->dispc_cinfo.pck = pck;
4716
4717        if (dsi_vm_calc_blanking(ctx) == false)
4718                return false;
4719
4720#ifdef PRINT_VERBOSE_VM_TIMINGS
4721        print_dispc_vm("dispc", &ctx->dispc_vm);
4722        print_dsi_vm("dsi  ", &ctx->dsi_vm);
4723        print_dispc_vm("req  ", ctx->config->timings);
4724        print_dsi_dispc_vm("act  ", &ctx->dsi_vm);
4725#endif
4726
4727        return true;
4728}
4729
4730static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
4731                void *data)
4732{
4733        struct dsi_clk_calc_ctx *ctx = data;
4734        unsigned long pck_max;
4735
4736        ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
4737        ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
4738
4739        /*
4740         * In burst mode we can let the dispc pck be arbitrarily high, but it
4741         * limits our scaling abilities. So for now, don't aim too high.
4742         */
4743
4744        if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
4745                pck_max = ctx->req_pck_max + 10000000;
4746        else
4747                pck_max = ctx->req_pck_max;
4748
4749        return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
4750                        dsi_vm_calc_dispc_cb, ctx);
4751}
4752
4753static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
4754                unsigned long clkdco, void *data)
4755{
4756        struct dsi_clk_calc_ctx *ctx = data;
4757
4758        ctx->dsi_cinfo.n = n;
4759        ctx->dsi_cinfo.m = m;
4760        ctx->dsi_cinfo.fint = fint;
4761        ctx->dsi_cinfo.clkdco = clkdco;
4762
4763        return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
4764                        dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4765                        dsi_vm_calc_hsdiv_cb, ctx);
4766}
4767
4768static bool dsi_vm_calc(struct dsi_data *dsi,
4769                const struct omap_dss_dsi_config *cfg,
4770                struct dsi_clk_calc_ctx *ctx)
4771{
4772        const struct omap_video_timings *t = cfg->timings;
4773        unsigned long clkin;
4774        unsigned long pll_min;
4775        unsigned long pll_max;
4776        int ndl = dsi->num_lanes_used - 1;
4777        int bitspp = dsi_get_pixel_size(cfg->pixel_format);
4778        unsigned long byteclk_min;
4779
4780        clkin = clk_get_rate(dsi->pll.clkin);
4781
4782        memset(ctx, 0, sizeof(*ctx));
4783        ctx->dsidev = dsi->pdev;
4784        ctx->pll = &dsi->pll;
4785        ctx->config = cfg;
4786
4787        /* these limits should come from the panel driver */
4788        ctx->req_pck_min = t->pixelclock - 1000;
4789        ctx->req_pck_nom = t->pixelclock;
4790        ctx->req_pck_max = t->pixelclock + 1000;
4791
4792        byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
4793        pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
4794
4795        if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
4796                pll_max = cfg->hs_clk_max * 4;
4797        } else {
4798                unsigned long byteclk_max;
4799                byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
4800                                ndl * 8);
4801
4802                pll_max = byteclk_max * 4 * 4;
4803        }
4804
4805        return dss_pll_calc(ctx->pll, clkin,
4806                        pll_min, pll_max,
4807                        dsi_vm_calc_pll_cb, ctx);
4808}
4809
4810static int dsi_set_config(struct omap_dss_device *dssdev,
4811                const struct omap_dss_dsi_config *config)
4812{
4813        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4814        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4815        struct dsi_clk_calc_ctx ctx;
4816        bool ok;
4817        int r;
4818
4819        mutex_lock(&dsi->lock);
4820
4821        dsi->pix_fmt = config->pixel_format;
4822        dsi->mode = config->mode;
4823
4824        if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
4825                ok = dsi_vm_calc(dsi, config, &ctx);
4826        else
4827                ok = dsi_cm_calc(dsi, config, &ctx);
4828
4829        if (!ok) {
4830                DSSERR("failed to find suitable DSI clock settings\n");
4831                r = -EINVAL;
4832                goto err;
4833        }
4834
4835        dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
4836
4837        r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
4838                config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
4839        if (r) {
4840                DSSERR("failed to find suitable DSI LP clock settings\n");
4841                goto err;
4842        }
4843
4844        dsi->user_dsi_cinfo = ctx.dsi_cinfo;
4845        dsi->user_dispc_cinfo = ctx.dispc_cinfo;
4846
4847        dsi->timings = ctx.dispc_vm;
4848        dsi->vm_timings = ctx.dsi_vm;
4849
4850        mutex_unlock(&dsi->lock);
4851
4852        return 0;
4853err:
4854        mutex_unlock(&dsi->lock);
4855
4856        return r;
4857}
4858
4859/*
4860 * Return a hardcoded channel for the DSI output. This should work for
4861 * current use cases, but this can be later expanded to either resolve
4862 * the channel in some more dynamic manner, or get the channel as a user
4863 * parameter.
4864 */
4865static enum omap_channel dsi_get_channel(int module_id)
4866{
4867        switch (omapdss_get_version()) {
4868        case OMAPDSS_VER_OMAP24xx:
4869        case OMAPDSS_VER_AM43xx:
4870                DSSWARN("DSI not supported\n");
4871                return OMAP_DSS_CHANNEL_LCD;
4872
4873        case OMAPDSS_VER_OMAP34xx_ES1:
4874        case OMAPDSS_VER_OMAP34xx_ES3:
4875        case OMAPDSS_VER_OMAP3630:
4876        case OMAPDSS_VER_AM35xx:
4877                return OMAP_DSS_CHANNEL_LCD;
4878
4879        case OMAPDSS_VER_OMAP4430_ES1:
4880        case OMAPDSS_VER_OMAP4430_ES2:
4881        case OMAPDSS_VER_OMAP4:
4882                switch (module_id) {
4883                case 0:
4884                        return OMAP_DSS_CHANNEL_LCD;
4885                case 1:
4886                        return OMAP_DSS_CHANNEL_LCD2;
4887                default:
4888                        DSSWARN("unsupported module id\n");
4889                        return OMAP_DSS_CHANNEL_LCD;
4890                }
4891
4892        case OMAPDSS_VER_OMAP5:
4893                switch (module_id) {
4894                case 0:
4895                        return OMAP_DSS_CHANNEL_LCD;
4896                case 1:
4897                        return OMAP_DSS_CHANNEL_LCD3;
4898                default:
4899                        DSSWARN("unsupported module id\n");
4900                        return OMAP_DSS_CHANNEL_LCD;
4901                }
4902
4903        default:
4904                DSSWARN("unsupported DSS version\n");
4905                return OMAP_DSS_CHANNEL_LCD;
4906        }
4907}
4908
4909static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4910{
4911        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4912        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4913        int i;
4914
4915        for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4916                if (!dsi->vc[i].dssdev) {
4917                        dsi->vc[i].dssdev = dssdev;
4918                        *channel = i;
4919                        return 0;
4920                }
4921        }
4922
4923        DSSERR("cannot get VC for display %s", dssdev->name);
4924        return -ENOSPC;
4925}
4926
4927static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4928{
4929        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4930        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4931
4932        if (vc_id < 0 || vc_id > 3) {
4933                DSSERR("VC ID out of range\n");
4934                return -EINVAL;
4935        }
4936
4937        if (channel < 0 || channel > 3) {
4938                DSSERR("Virtual Channel out of range\n");
4939                return -EINVAL;
4940        }
4941
4942        if (dsi->vc[channel].dssdev != dssdev) {
4943                DSSERR("Virtual Channel not allocated to display %s\n",
4944                        dssdev->name);
4945                return -EINVAL;
4946        }
4947
4948        dsi->vc[channel].vc_id = vc_id;
4949
4950        return 0;
4951}
4952
4953static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
4954{
4955        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4956        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4957
4958        if ((channel >= 0 && channel <= 3) &&
4959                dsi->vc[channel].dssdev == dssdev) {
4960                dsi->vc[channel].dssdev = NULL;
4961                dsi->vc[channel].vc_id = 0;
4962        }
4963}
4964
4965
4966static int dsi_get_clocks(struct platform_device *dsidev)
4967{
4968        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4969        struct clk *clk;
4970
4971        clk = devm_clk_get(&dsidev->dev, "fck");
4972        if (IS_ERR(clk)) {
4973                DSSERR("can't get fck\n");
4974                return PTR_ERR(clk);
4975        }
4976
4977        dsi->dss_clk = clk;
4978
4979        return 0;
4980}
4981
4982static int dsi_connect(struct omap_dss_device *dssdev,
4983                struct omap_dss_device *dst)
4984{
4985        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4986        enum omap_channel dispc_channel = dssdev->dispc_channel;
4987        int r;
4988
4989        r = dsi_regulator_init(dsidev);
4990        if (r)
4991                return r;
4992
4993        r = dss_mgr_connect(dispc_channel, dssdev);
4994        if (r)
4995                return r;
4996
4997        r = omapdss_output_set_device(dssdev, dst);
4998        if (r) {
4999                DSSERR("failed to connect output to new device: %s\n",
5000                                dssdev->name);
5001                dss_mgr_disconnect(dispc_channel, dssdev);
5002                return r;
5003        }
5004
5005        return 0;
5006}
5007
5008static void dsi_disconnect(struct omap_dss_device *dssdev,
5009                struct omap_dss_device *dst)
5010{
5011        enum omap_channel dispc_channel = dssdev->dispc_channel;
5012
5013        WARN_ON(dst != dssdev->dst);
5014
5015        if (dst != dssdev->dst)
5016                return;
5017
5018        omapdss_output_unset_device(dssdev);
5019
5020        dss_mgr_disconnect(dispc_channel, dssdev);
5021}
5022
5023static const struct omapdss_dsi_ops dsi_ops = {
5024        .connect = dsi_connect,
5025        .disconnect = dsi_disconnect,
5026
5027        .bus_lock = dsi_bus_lock,
5028        .bus_unlock = dsi_bus_unlock,
5029
5030        .enable = dsi_display_enable,
5031        .disable = dsi_display_disable,
5032
5033        .enable_hs = dsi_vc_enable_hs,
5034
5035        .configure_pins = dsi_configure_pins,
5036        .set_config = dsi_set_config,
5037
5038        .enable_video_output = dsi_enable_video_output,
5039        .disable_video_output = dsi_disable_video_output,
5040
5041        .update = dsi_update,
5042
5043        .enable_te = dsi_enable_te,
5044
5045        .request_vc = dsi_request_vc,
5046        .set_vc_id = dsi_set_vc_id,
5047        .release_vc = dsi_release_vc,
5048
5049        .dcs_write = dsi_vc_dcs_write,
5050        .dcs_write_nosync = dsi_vc_dcs_write_nosync,
5051        .dcs_read = dsi_vc_dcs_read,
5052
5053        .gen_write = dsi_vc_generic_write,
5054        .gen_write_nosync = dsi_vc_generic_write_nosync,
5055        .gen_read = dsi_vc_generic_read,
5056
5057        .bta_sync = dsi_vc_send_bta_sync,
5058
5059        .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
5060};
5061
5062static void dsi_init_output(struct platform_device *dsidev)
5063{
5064        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5065        struct omap_dss_device *out = &dsi->output;
5066
5067        out->dev = &dsidev->dev;
5068        out->id = dsi->module_id == 0 ?
5069                        OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
5070
5071        out->output_type = OMAP_DISPLAY_TYPE_DSI;
5072        out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
5073        out->dispc_channel = dsi_get_channel(dsi->module_id);
5074        out->ops.dsi = &dsi_ops;
5075        out->owner = THIS_MODULE;
5076
5077        omapdss_register_output(out);
5078}
5079
5080static void dsi_uninit_output(struct platform_device *dsidev)
5081{
5082        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5083        struct omap_dss_device *out = &dsi->output;
5084
5085        omapdss_unregister_output(out);
5086}
5087
5088static int dsi_probe_of(struct platform_device *pdev)
5089{
5090        struct device_node *node = pdev->dev.of_node;
5091        struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5092        struct property *prop;
5093        u32 lane_arr[10];
5094        int len, num_pins;
5095        int r, i;
5096        struct device_node *ep;
5097        struct omap_dsi_pin_config pin_cfg;
5098
5099        ep = omapdss_of_get_first_endpoint(node);
5100        if (!ep)
5101                return 0;
5102
5103        prop = of_find_property(ep, "lanes", &len);
5104        if (prop == NULL) {
5105                dev_err(&pdev->dev, "failed to find lane data\n");
5106                r = -EINVAL;
5107                goto err;
5108        }
5109
5110        num_pins = len / sizeof(u32);
5111
5112        if (num_pins < 4 || num_pins % 2 != 0 ||
5113                num_pins > dsi->num_lanes_supported * 2) {
5114                dev_err(&pdev->dev, "bad number of lanes\n");
5115                r = -EINVAL;
5116                goto err;
5117        }
5118
5119        r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
5120        if (r) {
5121                dev_err(&pdev->dev, "failed to read lane data\n");
5122                goto err;
5123        }
5124
5125        pin_cfg.num_pins = num_pins;
5126        for (i = 0; i < num_pins; ++i)
5127                pin_cfg.pins[i] = (int)lane_arr[i];
5128
5129        r = dsi_configure_pins(&dsi->output, &pin_cfg);
5130        if (r) {
5131                dev_err(&pdev->dev, "failed to configure pins");
5132                goto err;
5133        }
5134
5135        of_node_put(ep);
5136
5137        return 0;
5138
5139err:
5140        of_node_put(ep);
5141        return r;
5142}
5143
5144static const struct dss_pll_ops dsi_pll_ops = {
5145        .enable = dsi_pll_enable,
5146        .disable = dsi_pll_disable,
5147        .set_config = dss_pll_write_config_type_a,
5148};
5149
5150static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5151        .n_max = (1 << 7) - 1,
5152        .m_max = (1 << 11) - 1,
5153        .mX_max = (1 << 4) - 1,
5154        .fint_min = 750000,
5155        .fint_max = 2100000,
5156        .clkdco_low = 1000000000,
5157        .clkdco_max = 1800000000,
5158
5159        .n_msb = 7,
5160        .n_lsb = 1,
5161        .m_msb = 18,
5162        .m_lsb = 8,
5163
5164        .mX_msb[0] = 22,
5165        .mX_lsb[0] = 19,
5166        .mX_msb[1] = 26,
5167        .mX_lsb[1] = 23,
5168
5169        .has_stopmode = true,
5170        .has_freqsel = true,
5171        .has_selfreqdco = false,
5172        .has_refsel = false,
5173};
5174
5175static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5176        .n_max = (1 << 8) - 1,
5177        .m_max = (1 << 12) - 1,
5178        .mX_max = (1 << 5) - 1,
5179        .fint_min = 500000,
5180        .fint_max = 2500000,
5181        .clkdco_low = 1000000000,
5182        .clkdco_max = 1800000000,
5183
5184        .n_msb = 8,
5185        .n_lsb = 1,
5186        .m_msb = 20,
5187        .m_lsb = 9,
5188
5189        .mX_msb[0] = 25,
5190        .mX_lsb[0] = 21,
5191        .mX_msb[1] = 30,
5192        .mX_lsb[1] = 26,
5193
5194        .has_stopmode = true,
5195        .has_freqsel = false,
5196        .has_selfreqdco = false,
5197        .has_refsel = false,
5198};
5199
5200static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
5201        .n_max = (1 << 8) - 1,
5202        .m_max = (1 << 12) - 1,
5203        .mX_max = (1 << 5) - 1,
5204        .fint_min = 150000,
5205        .fint_max = 52000000,
5206        .clkdco_low = 1000000000,
5207        .clkdco_max = 1800000000,
5208
5209        .n_msb = 8,
5210        .n_lsb = 1,
5211        .m_msb = 20,
5212        .m_lsb = 9,
5213
5214        .mX_msb[0] = 25,
5215        .mX_lsb[0] = 21,
5216        .mX_msb[1] = 30,
5217        .mX_lsb[1] = 26,
5218
5219        .has_stopmode = true,
5220        .has_freqsel = false,
5221        .has_selfreqdco = true,
5222        .has_refsel = true,
5223};
5224
5225static int dsi_init_pll_data(struct platform_device *dsidev)
5226{
5227        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5228        struct dss_pll *pll = &dsi->pll;
5229        struct clk *clk;
5230        int r;
5231
5232        clk = devm_clk_get(&dsidev->dev, "sys_clk");
5233        if (IS_ERR(clk)) {
5234                DSSERR("can't get sys_clk\n");
5235                return PTR_ERR(clk);
5236        }
5237
5238        pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1";
5239        pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2;
5240        pll->clkin = clk;
5241        pll->base = dsi->pll_base;
5242
5243        switch (omapdss_get_version()) {
5244        case OMAPDSS_VER_OMAP34xx_ES1:
5245        case OMAPDSS_VER_OMAP34xx_ES3:
5246        case OMAPDSS_VER_OMAP3630:
5247        case OMAPDSS_VER_AM35xx:
5248                pll->hw = &dss_omap3_dsi_pll_hw;
5249                break;
5250
5251        case OMAPDSS_VER_OMAP4430_ES1:
5252        case OMAPDSS_VER_OMAP4430_ES2:
5253        case OMAPDSS_VER_OMAP4:
5254                pll->hw = &dss_omap4_dsi_pll_hw;
5255                break;
5256
5257        case OMAPDSS_VER_OMAP5:
5258                pll->hw = &dss_omap5_dsi_pll_hw;
5259                break;
5260
5261        default:
5262                return -ENODEV;
5263        }
5264
5265        pll->ops = &dsi_pll_ops;
5266
5267        r = dss_pll_register(pll);
5268        if (r)
5269                return r;
5270
5271        return 0;
5272}
5273
5274/* DSI1 HW IP initialisation */
5275static int dsi_bind(struct device *dev, struct device *master, void *data)
5276{
5277        struct platform_device *dsidev = to_platform_device(dev);
5278        u32 rev;
5279        int r, i;
5280        struct dsi_data *dsi;
5281        struct resource *dsi_mem;
5282        struct resource *res;
5283        struct resource temp_res;
5284
5285        dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
5286        if (!dsi)
5287                return -ENOMEM;
5288
5289        dsi->pdev = dsidev;
5290        dev_set_drvdata(&dsidev->dev, dsi);
5291
5292        spin_lock_init(&dsi->irq_lock);
5293        spin_lock_init(&dsi->errors_lock);
5294        dsi->errors = 0;
5295
5296#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5297        spin_lock_init(&dsi->irq_stats_lock);
5298        dsi->irq_stats.last_reset = jiffies;
5299#endif
5300
5301        mutex_init(&dsi->lock);
5302        sema_init(&dsi->bus_lock, 1);
5303
5304        INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
5305                             dsi_framedone_timeout_work_callback);
5306
5307#ifdef DSI_CATCH_MISSING_TE
5308        init_timer(&dsi->te_timer);
5309        dsi->te_timer.function = dsi_te_timeout;
5310        dsi->te_timer.data = 0;
5311#endif
5312
5313        res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
5314        if (!res) {
5315                res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
5316                if (!res) {
5317                        DSSERR("can't get IORESOURCE_MEM DSI\n");
5318                        return -EINVAL;
5319                }
5320
5321                temp_res.start = res->start;
5322                temp_res.end = temp_res.start + DSI_PROTO_SZ - 1;
5323                res = &temp_res;
5324        }
5325
5326        dsi_mem = res;
5327
5328        dsi->proto_base = devm_ioremap(&dsidev->dev, res->start,
5329                resource_size(res));
5330        if (!dsi->proto_base) {
5331                DSSERR("can't ioremap DSI protocol engine\n");
5332                return -ENOMEM;
5333        }
5334
5335        res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
5336        if (!res) {
5337                res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
5338                if (!res) {
5339                        DSSERR("can't get IORESOURCE_MEM DSI\n");
5340                        return -EINVAL;
5341                }
5342
5343                temp_res.start = res->start + DSI_PHY_OFFSET;
5344                temp_res.end = temp_res.start + DSI_PHY_SZ - 1;
5345                res = &temp_res;
5346        }
5347
5348        dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
5349                resource_size(res));
5350        if (!dsi->proto_base) {
5351                DSSERR("can't ioremap DSI PHY\n");
5352                return -ENOMEM;
5353        }
5354
5355        res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
5356        if (!res) {
5357                res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
5358                if (!res) {
5359                        DSSERR("can't get IORESOURCE_MEM DSI\n");
5360                        return -EINVAL;
5361                }
5362
5363                temp_res.start = res->start + DSI_PLL_OFFSET;
5364                temp_res.end = temp_res.start + DSI_PLL_SZ - 1;
5365                res = &temp_res;
5366        }
5367
5368        dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
5369                resource_size(res));
5370        if (!dsi->proto_base) {
5371                DSSERR("can't ioremap DSI PLL\n");
5372                return -ENOMEM;
5373        }
5374
5375        dsi->irq = platform_get_irq(dsi->pdev, 0);
5376        if (dsi->irq < 0) {
5377                DSSERR("platform_get_irq failed\n");
5378                return -ENODEV;
5379        }
5380
5381        r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
5382                             IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
5383        if (r < 0) {
5384                DSSERR("request_irq failed\n");
5385                return r;
5386        }
5387
5388        if (dsidev->dev.of_node) {
5389                const struct of_device_id *match;
5390                const struct dsi_module_id_data *d;
5391
5392                match = of_match_node(dsi_of_match, dsidev->dev.of_node);
5393                if (!match) {
5394                        DSSERR("unsupported DSI module\n");
5395                        return -ENODEV;
5396                }
5397
5398                d = match->data;
5399
5400                while (d->address != 0 && d->address != dsi_mem->start)
5401                        d++;
5402
5403                if (d->address == 0) {
5404                        DSSERR("unsupported DSI module\n");
5405                        return -ENODEV;
5406                }
5407
5408                dsi->module_id = d->id;
5409        } else {
5410                dsi->module_id = dsidev->id;
5411        }
5412
5413        /* DSI VCs initialization */
5414        for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5415                dsi->vc[i].source = DSI_VC_SOURCE_L4;
5416                dsi->vc[i].dssdev = NULL;
5417                dsi->vc[i].vc_id = 0;
5418        }
5419
5420        r = dsi_get_clocks(dsidev);
5421        if (r)
5422                return r;
5423
5424        dsi_init_pll_data(dsidev);
5425
5426        pm_runtime_enable(&dsidev->dev);
5427
5428        r = dsi_runtime_get(dsidev);
5429        if (r)
5430                goto err_runtime_get;
5431
5432        rev = dsi_read_reg(dsidev, DSI_REVISION);
5433        dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
5434               FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
5435
5436        /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
5437         * of data to 3 by default */
5438        if (dss_has_feature(FEAT_DSI_GNQ))
5439                /* NB_DATA_LANES */
5440                dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
5441        else
5442                dsi->num_lanes_supported = 3;
5443
5444        dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
5445
5446        dsi_init_output(dsidev);
5447
5448        if (dsidev->dev.of_node) {
5449                r = dsi_probe_of(dsidev);
5450                if (r) {
5451                        DSSERR("Invalid DSI DT data\n");
5452                        goto err_probe_of;
5453                }
5454
5455                r = of_platform_populate(dsidev->dev.of_node, NULL, NULL,
5456                        &dsidev->dev);
5457                if (r)
5458                        DSSERR("Failed to populate DSI child devices: %d\n", r);
5459        }
5460
5461        dsi_runtime_put(dsidev);
5462
5463        if (dsi->module_id == 0)
5464                dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
5465        else if (dsi->module_id == 1)
5466                dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
5467
5468#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5469        if (dsi->module_id == 0)
5470                dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
5471        else if (dsi->module_id == 1)
5472                dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
5473#endif
5474
5475        return 0;
5476
5477err_probe_of:
5478        dsi_uninit_output(dsidev);
5479        dsi_runtime_put(dsidev);
5480
5481err_runtime_get:
5482        pm_runtime_disable(&dsidev->dev);
5483        return r;
5484}
5485
5486static void dsi_unbind(struct device *dev, struct device *master, void *data)
5487{
5488        struct platform_device *dsidev = to_platform_device(dev);
5489        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5490
5491        of_platform_depopulate(&dsidev->dev);
5492
5493        WARN_ON(dsi->scp_clk_refcount > 0);
5494
5495        dss_pll_unregister(&dsi->pll);
5496
5497        dsi_uninit_output(dsidev);
5498
5499        pm_runtime_disable(&dsidev->dev);
5500
5501        if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
5502                regulator_disable(dsi->vdds_dsi_reg);
5503                dsi->vdds_dsi_enabled = false;
5504        }
5505}
5506
5507static const struct component_ops dsi_component_ops = {
5508        .bind   = dsi_bind,
5509        .unbind = dsi_unbind,
5510};
5511
5512static int dsi_probe(struct platform_device *pdev)
5513{
5514        return component_add(&pdev->dev, &dsi_component_ops);
5515}
5516
5517static int dsi_remove(struct platform_device *pdev)
5518{
5519        component_del(&pdev->dev, &dsi_component_ops);
5520        return 0;
5521}
5522
5523static int dsi_runtime_suspend(struct device *dev)
5524{
5525        struct platform_device *pdev = to_platform_device(dev);
5526        struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5527
5528        dsi->is_enabled = false;
5529        /* ensure the irq handler sees the is_enabled value */
5530        smp_wmb();
5531        /* wait for current handler to finish before turning the DSI off */
5532        synchronize_irq(dsi->irq);
5533
5534        dispc_runtime_put();
5535
5536        return 0;
5537}
5538
5539static int dsi_runtime_resume(struct device *dev)
5540{
5541        struct platform_device *pdev = to_platform_device(dev);
5542        struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5543        int r;
5544
5545        r = dispc_runtime_get();
5546        if (r)
5547                return r;
5548
5549        dsi->is_enabled = true;
5550        /* ensure the irq handler sees the is_enabled value */
5551        smp_wmb();
5552
5553        return 0;
5554}
5555
5556static const struct dev_pm_ops dsi_pm_ops = {
5557        .runtime_suspend = dsi_runtime_suspend,
5558        .runtime_resume = dsi_runtime_resume,
5559};
5560
5561static const struct dsi_module_id_data dsi_of_data_omap3[] = {
5562        { .address = 0x4804fc00, .id = 0, },
5563        { },
5564};
5565
5566static const struct dsi_module_id_data dsi_of_data_omap4[] = {
5567        { .address = 0x58004000, .id = 0, },
5568        { .address = 0x58005000, .id = 1, },
5569        { },
5570};
5571
5572static const struct dsi_module_id_data dsi_of_data_omap5[] = {
5573        { .address = 0x58004000, .id = 0, },
5574        { .address = 0x58009000, .id = 1, },
5575        { },
5576};
5577
5578static const struct of_device_id dsi_of_match[] = {
5579        { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, },
5580        { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, },
5581        { .compatible = "ti,omap5-dsi", .data = dsi_of_data_omap5, },
5582        {},
5583};
5584
5585static struct platform_driver omap_dsihw_driver = {
5586        .probe          = dsi_probe,
5587        .remove         = dsi_remove,
5588        .driver         = {
5589                .name   = "omapdss_dsi",
5590                .pm     = &dsi_pm_ops,
5591                .of_match_table = dsi_of_match,
5592                .suppress_bind_attrs = true,
5593        },
5594};
5595
5596int __init dsi_init_platform_driver(void)
5597{
5598        return platform_driver_register(&omap_dsihw_driver);
5599}
5600
5601void dsi_uninit_platform_driver(void)
5602{
5603        platform_driver_unregister(&omap_dsihw_driver);
5604}
5605