linux/drivers/video/omap2/dss/dsi.c
<<
>>
Prefs
   1/*
   2 * linux/drivers/video/omap2/dss/dsi.c
   3 *
   4 * Copyright (C) 2009 Nokia Corporation
   5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published by
   9 * the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#define DSS_SUBSYS_NAME "DSI"
  21
  22#include <linux/kernel.h>
  23#include <linux/io.h>
  24#include <linux/clk.h>
  25#include <linux/device.h>
  26#include <linux/err.h>
  27#include <linux/interrupt.h>
  28#include <linux/delay.h>
  29#include <linux/mutex.h>
  30#include <linux/semaphore.h>
  31#include <linux/seq_file.h>
  32#include <linux/platform_device.h>
  33#include <linux/regulator/consumer.h>
  34#include <linux/wait.h>
  35#include <linux/workqueue.h>
  36#include <linux/sched.h>
  37#include <linux/slab.h>
  38#include <linux/debugfs.h>
  39
  40#include <video/omapdss.h>
  41#include <plat/clock.h>
  42
  43#include "dss.h"
  44#include "dss_features.h"
  45
  46/*#define VERBOSE_IRQ*/
  47#define DSI_CATCH_MISSING_TE
  48
  49struct dsi_reg { u16 idx; };
  50
  51#define DSI_REG(idx)            ((const struct dsi_reg) { idx })
  52
  53#define DSI_SZ_REGS             SZ_1K
  54/* DSI Protocol Engine */
  55
  56#define DSI_REVISION                    DSI_REG(0x0000)
  57#define DSI_SYSCONFIG                   DSI_REG(0x0010)
  58#define DSI_SYSSTATUS                   DSI_REG(0x0014)
  59#define DSI_IRQSTATUS                   DSI_REG(0x0018)
  60#define DSI_IRQENABLE                   DSI_REG(0x001C)
  61#define DSI_CTRL                        DSI_REG(0x0040)
  62#define DSI_GNQ                         DSI_REG(0x0044)
  63#define DSI_COMPLEXIO_CFG1              DSI_REG(0x0048)
  64#define DSI_COMPLEXIO_IRQ_STATUS        DSI_REG(0x004C)
  65#define DSI_COMPLEXIO_IRQ_ENABLE        DSI_REG(0x0050)
  66#define DSI_CLK_CTRL                    DSI_REG(0x0054)
  67#define DSI_TIMING1                     DSI_REG(0x0058)
  68#define DSI_TIMING2                     DSI_REG(0x005C)
  69#define DSI_VM_TIMING1                  DSI_REG(0x0060)
  70#define DSI_VM_TIMING2                  DSI_REG(0x0064)
  71#define DSI_VM_TIMING3                  DSI_REG(0x0068)
  72#define DSI_CLK_TIMING                  DSI_REG(0x006C)
  73#define DSI_TX_FIFO_VC_SIZE             DSI_REG(0x0070)
  74#define DSI_RX_FIFO_VC_SIZE             DSI_REG(0x0074)
  75#define DSI_COMPLEXIO_CFG2              DSI_REG(0x0078)
  76#define DSI_RX_FIFO_VC_FULLNESS         DSI_REG(0x007C)
  77#define DSI_VM_TIMING4                  DSI_REG(0x0080)
  78#define DSI_TX_FIFO_VC_EMPTINESS        DSI_REG(0x0084)
  79#define DSI_VM_TIMING5                  DSI_REG(0x0088)
  80#define DSI_VM_TIMING6                  DSI_REG(0x008C)
  81#define DSI_VM_TIMING7                  DSI_REG(0x0090)
  82#define DSI_STOPCLK_TIMING              DSI_REG(0x0094)
  83#define DSI_VC_CTRL(n)                  DSI_REG(0x0100 + (n * 0x20))
  84#define DSI_VC_TE(n)                    DSI_REG(0x0104 + (n * 0x20))
  85#define DSI_VC_LONG_PACKET_HEADER(n)    DSI_REG(0x0108 + (n * 0x20))
  86#define DSI_VC_LONG_PACKET_PAYLOAD(n)   DSI_REG(0x010C + (n * 0x20))
  87#define DSI_VC_SHORT_PACKET_HEADER(n)   DSI_REG(0x0110 + (n * 0x20))
  88#define DSI_VC_IRQSTATUS(n)             DSI_REG(0x0118 + (n * 0x20))
  89#define DSI_VC_IRQENABLE(n)             DSI_REG(0x011C + (n * 0x20))
  90
  91/* DSIPHY_SCP */
  92
  93#define DSI_DSIPHY_CFG0                 DSI_REG(0x200 + 0x0000)
  94#define DSI_DSIPHY_CFG1                 DSI_REG(0x200 + 0x0004)
  95#define DSI_DSIPHY_CFG2                 DSI_REG(0x200 + 0x0008)
  96#define DSI_DSIPHY_CFG5                 DSI_REG(0x200 + 0x0014)
  97#define DSI_DSIPHY_CFG10                DSI_REG(0x200 + 0x0028)
  98
  99/* DSI_PLL_CTRL_SCP */
 100
 101#define DSI_PLL_CONTROL                 DSI_REG(0x300 + 0x0000)
 102#define DSI_PLL_STATUS                  DSI_REG(0x300 + 0x0004)
 103#define DSI_PLL_GO                      DSI_REG(0x300 + 0x0008)
 104#define DSI_PLL_CONFIGURATION1          DSI_REG(0x300 + 0x000C)
 105#define DSI_PLL_CONFIGURATION2          DSI_REG(0x300 + 0x0010)
 106
 107#define REG_GET(dsidev, idx, start, end) \
 108        FLD_GET(dsi_read_reg(dsidev, idx), start, end)
 109
 110#define REG_FLD_MOD(dsidev, idx, val, start, end) \
 111        dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
 112
 113/* Global interrupts */
 114#define DSI_IRQ_VC0             (1 << 0)
 115#define DSI_IRQ_VC1             (1 << 1)
 116#define DSI_IRQ_VC2             (1 << 2)
 117#define DSI_IRQ_VC3             (1 << 3)
 118#define DSI_IRQ_WAKEUP          (1 << 4)
 119#define DSI_IRQ_RESYNC          (1 << 5)
 120#define DSI_IRQ_PLL_LOCK        (1 << 7)
 121#define DSI_IRQ_PLL_UNLOCK      (1 << 8)
 122#define DSI_IRQ_PLL_RECALL      (1 << 9)
 123#define DSI_IRQ_COMPLEXIO_ERR   (1 << 10)
 124#define DSI_IRQ_HS_TX_TIMEOUT   (1 << 14)
 125#define DSI_IRQ_LP_RX_TIMEOUT   (1 << 15)
 126#define DSI_IRQ_TE_TRIGGER      (1 << 16)
 127#define DSI_IRQ_ACK_TRIGGER     (1 << 17)
 128#define DSI_IRQ_SYNC_LOST       (1 << 18)
 129#define DSI_IRQ_LDO_POWER_GOOD  (1 << 19)
 130#define DSI_IRQ_TA_TIMEOUT      (1 << 20)
 131#define DSI_IRQ_ERROR_MASK \
 132        (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
 133        DSI_IRQ_TA_TIMEOUT)
 134#define DSI_IRQ_CHANNEL_MASK    0xf
 135
 136/* Virtual channel interrupts */
 137#define DSI_VC_IRQ_CS           (1 << 0)
 138#define DSI_VC_IRQ_ECC_CORR     (1 << 1)
 139#define DSI_VC_IRQ_PACKET_SENT  (1 << 2)
 140#define DSI_VC_IRQ_FIFO_TX_OVF  (1 << 3)
 141#define DSI_VC_IRQ_FIFO_RX_OVF  (1 << 4)
 142#define DSI_VC_IRQ_BTA          (1 << 5)
 143#define DSI_VC_IRQ_ECC_NO_CORR  (1 << 6)
 144#define DSI_VC_IRQ_FIFO_TX_UDF  (1 << 7)
 145#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
 146#define DSI_VC_IRQ_ERROR_MASK \
 147        (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
 148        DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
 149        DSI_VC_IRQ_FIFO_TX_UDF)
 150
 151/* ComplexIO interrupts */
 152#define DSI_CIO_IRQ_ERRSYNCESC1         (1 << 0)
 153#define DSI_CIO_IRQ_ERRSYNCESC2         (1 << 1)
 154#define DSI_CIO_IRQ_ERRSYNCESC3         (1 << 2)
 155#define DSI_CIO_IRQ_ERRSYNCESC4         (1 << 3)
 156#define DSI_CIO_IRQ_ERRSYNCESC5         (1 << 4)
 157#define DSI_CIO_IRQ_ERRESC1             (1 << 5)
 158#define DSI_CIO_IRQ_ERRESC2             (1 << 6)
 159#define DSI_CIO_IRQ_ERRESC3             (1 << 7)
 160#define DSI_CIO_IRQ_ERRESC4             (1 << 8)
 161#define DSI_CIO_IRQ_ERRESC5             (1 << 9)
 162#define DSI_CIO_IRQ_ERRCONTROL1         (1 << 10)
 163#define DSI_CIO_IRQ_ERRCONTROL2         (1 << 11)
 164#define DSI_CIO_IRQ_ERRCONTROL3         (1 << 12)
 165#define DSI_CIO_IRQ_ERRCONTROL4         (1 << 13)
 166#define DSI_CIO_IRQ_ERRCONTROL5         (1 << 14)
 167#define DSI_CIO_IRQ_STATEULPS1          (1 << 15)
 168#define DSI_CIO_IRQ_STATEULPS2          (1 << 16)
 169#define DSI_CIO_IRQ_STATEULPS3          (1 << 17)
 170#define DSI_CIO_IRQ_STATEULPS4          (1 << 18)
 171#define DSI_CIO_IRQ_STATEULPS5          (1 << 19)
 172#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1  (1 << 20)
 173#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1  (1 << 21)
 174#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2  (1 << 22)
 175#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2  (1 << 23)
 176#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3  (1 << 24)
 177#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3  (1 << 25)
 178#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4  (1 << 26)
 179#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4  (1 << 27)
 180#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5  (1 << 28)
 181#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5  (1 << 29)
 182#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0  (1 << 30)
 183#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1  (1 << 31)
 184#define DSI_CIO_IRQ_ERROR_MASK \
 185        (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
 186         DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
 187         DSI_CIO_IRQ_ERRSYNCESC5 | \
 188         DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
 189         DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
 190         DSI_CIO_IRQ_ERRESC5 | \
 191         DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
 192         DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
 193         DSI_CIO_IRQ_ERRCONTROL5 | \
 194         DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
 195         DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
 196         DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
 197         DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
 198         DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
 199
 200#define DSI_DT_DCS_SHORT_WRITE_0        0x05
 201#define DSI_DT_DCS_SHORT_WRITE_1        0x15
 202#define DSI_DT_DCS_READ                 0x06
 203#define DSI_DT_SET_MAX_RET_PKG_SIZE     0x37
 204#define DSI_DT_NULL_PACKET              0x09
 205#define DSI_DT_DCS_LONG_WRITE           0x39
 206
 207#define DSI_DT_RX_ACK_WITH_ERR          0x02
 208#define DSI_DT_RX_DCS_LONG_READ         0x1c
 209#define DSI_DT_RX_SHORT_READ_1          0x21
 210#define DSI_DT_RX_SHORT_READ_2          0x22
 211
 212typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
 213
 214#define DSI_MAX_NR_ISRS                2
 215
 216struct dsi_isr_data {
 217        omap_dsi_isr_t  isr;
 218        void            *arg;
 219        u32             mask;
 220};
 221
 222enum fifo_size {
 223        DSI_FIFO_SIZE_0         = 0,
 224        DSI_FIFO_SIZE_32        = 1,
 225        DSI_FIFO_SIZE_64        = 2,
 226        DSI_FIFO_SIZE_96        = 3,
 227        DSI_FIFO_SIZE_128       = 4,
 228};
 229
 230enum dsi_vc_mode {
 231        DSI_VC_MODE_L4 = 0,
 232        DSI_VC_MODE_VP,
 233};
 234
 235enum dsi_lane {
 236        DSI_CLK_P       = 1 << 0,
 237        DSI_CLK_N       = 1 << 1,
 238        DSI_DATA1_P     = 1 << 2,
 239        DSI_DATA1_N     = 1 << 3,
 240        DSI_DATA2_P     = 1 << 4,
 241        DSI_DATA2_N     = 1 << 5,
 242        DSI_DATA3_P     = 1 << 6,
 243        DSI_DATA3_N     = 1 << 7,
 244        DSI_DATA4_P     = 1 << 8,
 245        DSI_DATA4_N     = 1 << 9,
 246};
 247
 248struct dsi_update_region {
 249        u16 x, y, w, h;
 250        struct omap_dss_device *device;
 251};
 252
 253struct dsi_irq_stats {
 254        unsigned long last_reset;
 255        unsigned irq_count;
 256        unsigned dsi_irqs[32];
 257        unsigned vc_irqs[4][32];
 258        unsigned cio_irqs[32];
 259};
 260
 261struct dsi_isr_tables {
 262        struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
 263        struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
 264        struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
 265};
 266
 267struct dsi_data {
 268        struct platform_device *pdev;
 269        void __iomem    *base;
 270        int irq;
 271
 272        void (*dsi_mux_pads)(bool enable);
 273
 274        struct dsi_clock_info current_cinfo;
 275
 276        bool vdds_dsi_enabled;
 277        struct regulator *vdds_dsi_reg;
 278
 279        struct {
 280                enum dsi_vc_mode mode;
 281                struct omap_dss_device *dssdev;
 282                enum fifo_size fifo_size;
 283                int vc_id;
 284        } vc[4];
 285
 286        struct mutex lock;
 287        struct semaphore bus_lock;
 288
 289        unsigned pll_locked;
 290
 291        spinlock_t irq_lock;
 292        struct dsi_isr_tables isr_tables;
 293        /* space for a copy used by the interrupt handler */
 294        struct dsi_isr_tables isr_tables_copy;
 295
 296        int update_channel;
 297        struct dsi_update_region update_region;
 298
 299        bool te_enabled;
 300        bool ulps_enabled;
 301
 302        void (*framedone_callback)(int, void *);
 303        void *framedone_data;
 304
 305        struct delayed_work framedone_timeout_work;
 306
 307#ifdef DSI_CATCH_MISSING_TE
 308        struct timer_list te_timer;
 309#endif
 310
 311        unsigned long cache_req_pck;
 312        unsigned long cache_clk_freq;
 313        struct dsi_clock_info cache_cinfo;
 314
 315        u32             errors;
 316        spinlock_t      errors_lock;
 317#ifdef DEBUG
 318        ktime_t perf_setup_time;
 319        ktime_t perf_start_time;
 320#endif
 321        int debug_read;
 322        int debug_write;
 323
 324#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 325        spinlock_t irq_stats_lock;
 326        struct dsi_irq_stats irq_stats;
 327#endif
 328        /* DSI PLL Parameter Ranges */
 329        unsigned long regm_max, regn_max;
 330        unsigned long  regm_dispc_max, regm_dsi_max;
 331        unsigned long  fint_min, fint_max;
 332        unsigned long lpdiv_max;
 333
 334        int num_data_lanes;
 335
 336        unsigned scp_clk_refcount;
 337};
 338
 339struct dsi_packet_sent_handler_data {
 340        struct platform_device *dsidev;
 341        struct completion *completion;
 342};
 343
 344static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
 345
 346#ifdef DEBUG
 347static unsigned int dsi_perf;
 348module_param_named(dsi_perf, dsi_perf, bool, 0644);
 349#endif
 350
 351static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
 352{
 353        return dev_get_drvdata(&dsidev->dev);
 354}
 355
 356static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
 357{
 358        return dsi_pdev_map[dssdev->phy.dsi.module];
 359}
 360
 361struct platform_device *dsi_get_dsidev_from_id(int module)
 362{
 363        return dsi_pdev_map[module];
 364}
 365
 366static int dsi_get_dsidev_id(struct platform_device *dsidev)
 367{
 368        /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
 369         * device names aren't changed to the form "omapdss_dsi.0",
 370         * "omapdss_dsi.1" and so on */
 371        BUG_ON(dsidev->id != -1);
 372
 373        return 0;
 374}
 375
 376static inline void dsi_write_reg(struct platform_device *dsidev,
 377                const struct dsi_reg idx, u32 val)
 378{
 379        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 380
 381        __raw_writel(val, dsi->base + idx.idx);
 382}
 383
 384static inline u32 dsi_read_reg(struct platform_device *dsidev,
 385                const struct dsi_reg idx)
 386{
 387        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 388
 389        return __raw_readl(dsi->base + idx.idx);
 390}
 391
 392
 393void dsi_save_context(void)
 394{
 395}
 396
 397void dsi_restore_context(void)
 398{
 399}
 400
 401void dsi_bus_lock(struct omap_dss_device *dssdev)
 402{
 403        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 404        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 405
 406        down(&dsi->bus_lock);
 407}
 408EXPORT_SYMBOL(dsi_bus_lock);
 409
 410void dsi_bus_unlock(struct omap_dss_device *dssdev)
 411{
 412        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 413        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 414
 415        up(&dsi->bus_lock);
 416}
 417EXPORT_SYMBOL(dsi_bus_unlock);
 418
 419static bool dsi_bus_is_locked(struct platform_device *dsidev)
 420{
 421        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 422
 423        return dsi->bus_lock.count == 0;
 424}
 425
 426static void dsi_completion_handler(void *data, u32 mask)
 427{
 428        complete((struct completion *)data);
 429}
 430
 431static inline int wait_for_bit_change(struct platform_device *dsidev,
 432                const struct dsi_reg idx, int bitnum, int value)
 433{
 434        int t = 100000;
 435
 436        while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
 437                if (--t == 0)
 438                        return !value;
 439        }
 440
 441        return value;
 442}
 443
 444#ifdef DEBUG
 445static void dsi_perf_mark_setup(struct platform_device *dsidev)
 446{
 447        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 448        dsi->perf_setup_time = ktime_get();
 449}
 450
 451static void dsi_perf_mark_start(struct platform_device *dsidev)
 452{
 453        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 454        dsi->perf_start_time = ktime_get();
 455}
 456
 457static void dsi_perf_show(struct platform_device *dsidev, const char *name)
 458{
 459        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 460        ktime_t t, setup_time, trans_time;
 461        u32 total_bytes;
 462        u32 setup_us, trans_us, total_us;
 463
 464        if (!dsi_perf)
 465                return;
 466
 467        t = ktime_get();
 468
 469        setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
 470        setup_us = (u32)ktime_to_us(setup_time);
 471        if (setup_us == 0)
 472                setup_us = 1;
 473
 474        trans_time = ktime_sub(t, dsi->perf_start_time);
 475        trans_us = (u32)ktime_to_us(trans_time);
 476        if (trans_us == 0)
 477                trans_us = 1;
 478
 479        total_us = setup_us + trans_us;
 480
 481        total_bytes = dsi->update_region.w *
 482                dsi->update_region.h *
 483                dsi->update_region.device->ctrl.pixel_size / 8;
 484
 485        printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
 486                        "%u bytes, %u kbytes/sec\n",
 487                        name,
 488                        setup_us,
 489                        trans_us,
 490                        total_us,
 491                        1000*1000 / total_us,
 492                        total_bytes,
 493                        total_bytes * 1000 / total_us);
 494}
 495#else
 496#define dsi_perf_mark_setup(x)
 497#define dsi_perf_mark_start(x)
 498#define dsi_perf_show(x, y)
 499#endif
 500
 501static void print_irq_status(u32 status)
 502{
 503        if (status == 0)
 504                return;
 505
 506#ifndef VERBOSE_IRQ
 507        if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
 508                return;
 509#endif
 510        printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
 511
 512#define PIS(x) \
 513        if (status & DSI_IRQ_##x) \
 514                printk(#x " ");
 515#ifdef VERBOSE_IRQ
 516        PIS(VC0);
 517        PIS(VC1);
 518        PIS(VC2);
 519        PIS(VC3);
 520#endif
 521        PIS(WAKEUP);
 522        PIS(RESYNC);
 523        PIS(PLL_LOCK);
 524        PIS(PLL_UNLOCK);
 525        PIS(PLL_RECALL);
 526        PIS(COMPLEXIO_ERR);
 527        PIS(HS_TX_TIMEOUT);
 528        PIS(LP_RX_TIMEOUT);
 529        PIS(TE_TRIGGER);
 530        PIS(ACK_TRIGGER);
 531        PIS(SYNC_LOST);
 532        PIS(LDO_POWER_GOOD);
 533        PIS(TA_TIMEOUT);
 534#undef PIS
 535
 536        printk("\n");
 537}
 538
 539static void print_irq_status_vc(int channel, u32 status)
 540{
 541        if (status == 0)
 542                return;
 543
 544#ifndef VERBOSE_IRQ
 545        if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
 546                return;
 547#endif
 548        printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
 549
 550#define PIS(x) \
 551        if (status & DSI_VC_IRQ_##x) \
 552                printk(#x " ");
 553        PIS(CS);
 554        PIS(ECC_CORR);
 555#ifdef VERBOSE_IRQ
 556        PIS(PACKET_SENT);
 557#endif
 558        PIS(FIFO_TX_OVF);
 559        PIS(FIFO_RX_OVF);
 560        PIS(BTA);
 561        PIS(ECC_NO_CORR);
 562        PIS(FIFO_TX_UDF);
 563        PIS(PP_BUSY_CHANGE);
 564#undef PIS
 565        printk("\n");
 566}
 567
 568static void print_irq_status_cio(u32 status)
 569{
 570        if (status == 0)
 571                return;
 572
 573        printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
 574
 575#define PIS(x) \
 576        if (status & DSI_CIO_IRQ_##x) \
 577                printk(#x " ");
 578        PIS(ERRSYNCESC1);
 579        PIS(ERRSYNCESC2);
 580        PIS(ERRSYNCESC3);
 581        PIS(ERRESC1);
 582        PIS(ERRESC2);
 583        PIS(ERRESC3);
 584        PIS(ERRCONTROL1);
 585        PIS(ERRCONTROL2);
 586        PIS(ERRCONTROL3);
 587        PIS(STATEULPS1);
 588        PIS(STATEULPS2);
 589        PIS(STATEULPS3);
 590        PIS(ERRCONTENTIONLP0_1);
 591        PIS(ERRCONTENTIONLP1_1);
 592        PIS(ERRCONTENTIONLP0_2);
 593        PIS(ERRCONTENTIONLP1_2);
 594        PIS(ERRCONTENTIONLP0_3);
 595        PIS(ERRCONTENTIONLP1_3);
 596        PIS(ULPSACTIVENOT_ALL0);
 597        PIS(ULPSACTIVENOT_ALL1);
 598#undef PIS
 599
 600        printk("\n");
 601}
 602
 603#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 604static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
 605                u32 *vcstatus, u32 ciostatus)
 606{
 607        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 608        int i;
 609
 610        spin_lock(&dsi->irq_stats_lock);
 611
 612        dsi->irq_stats.irq_count++;
 613        dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
 614
 615        for (i = 0; i < 4; ++i)
 616                dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
 617
 618        dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
 619
 620        spin_unlock(&dsi->irq_stats_lock);
 621}
 622#else
 623#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
 624#endif
 625
 626static int debug_irq;
 627
 628static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
 629                u32 *vcstatus, u32 ciostatus)
 630{
 631        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 632        int i;
 633
 634        if (irqstatus & DSI_IRQ_ERROR_MASK) {
 635                DSSERR("DSI error, irqstatus %x\n", irqstatus);
 636                print_irq_status(irqstatus);
 637                spin_lock(&dsi->errors_lock);
 638                dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
 639                spin_unlock(&dsi->errors_lock);
 640        } else if (debug_irq) {
 641                print_irq_status(irqstatus);
 642        }
 643
 644        for (i = 0; i < 4; ++i) {
 645                if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
 646                        DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
 647                                       i, vcstatus[i]);
 648                        print_irq_status_vc(i, vcstatus[i]);
 649                } else if (debug_irq) {
 650                        print_irq_status_vc(i, vcstatus[i]);
 651                }
 652        }
 653
 654        if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
 655                DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
 656                print_irq_status_cio(ciostatus);
 657        } else if (debug_irq) {
 658                print_irq_status_cio(ciostatus);
 659        }
 660}
 661
 662static void dsi_call_isrs(struct dsi_isr_data *isr_array,
 663                unsigned isr_array_size, u32 irqstatus)
 664{
 665        struct dsi_isr_data *isr_data;
 666        int i;
 667
 668        for (i = 0; i < isr_array_size; i++) {
 669                isr_data = &isr_array[i];
 670                if (isr_data->isr && isr_data->mask & irqstatus)
 671                        isr_data->isr(isr_data->arg, irqstatus);
 672        }
 673}
 674
 675static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
 676                u32 irqstatus, u32 *vcstatus, u32 ciostatus)
 677{
 678        int i;
 679
 680        dsi_call_isrs(isr_tables->isr_table,
 681                        ARRAY_SIZE(isr_tables->isr_table),
 682                        irqstatus);
 683
 684        for (i = 0; i < 4; ++i) {
 685                if (vcstatus[i] == 0)
 686                        continue;
 687                dsi_call_isrs(isr_tables->isr_table_vc[i],
 688                                ARRAY_SIZE(isr_tables->isr_table_vc[i]),
 689                                vcstatus[i]);
 690        }
 691
 692        if (ciostatus != 0)
 693                dsi_call_isrs(isr_tables->isr_table_cio,
 694                                ARRAY_SIZE(isr_tables->isr_table_cio),
 695                                ciostatus);
 696}
 697
 698static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
 699{
 700        struct platform_device *dsidev;
 701        struct dsi_data *dsi;
 702        u32 irqstatus, vcstatus[4], ciostatus;
 703        int i;
 704
 705        dsidev = (struct platform_device *) arg;
 706        dsi = dsi_get_dsidrv_data(dsidev);
 707
 708        spin_lock(&dsi->irq_lock);
 709
 710        irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
 711
 712        /* IRQ is not for us */
 713        if (!irqstatus) {
 714                spin_unlock(&dsi->irq_lock);
 715                return IRQ_NONE;
 716        }
 717
 718        dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
 719        /* flush posted write */
 720        dsi_read_reg(dsidev, DSI_IRQSTATUS);
 721
 722        for (i = 0; i < 4; ++i) {
 723                if ((irqstatus & (1 << i)) == 0) {
 724                        vcstatus[i] = 0;
 725                        continue;
 726                }
 727
 728                vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
 729
 730                dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
 731                /* flush posted write */
 732                dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
 733        }
 734
 735        if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
 736                ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
 737
 738                dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
 739                /* flush posted write */
 740                dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
 741        } else {
 742                ciostatus = 0;
 743        }
 744
 745#ifdef DSI_CATCH_MISSING_TE
 746        if (irqstatus & DSI_IRQ_TE_TRIGGER)
 747                del_timer(&dsi->te_timer);
 748#endif
 749
 750        /* make a copy and unlock, so that isrs can unregister
 751         * themselves */
 752        memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
 753                sizeof(dsi->isr_tables));
 754
 755        spin_unlock(&dsi->irq_lock);
 756
 757        dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
 758
 759        dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
 760
 761        dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
 762
 763        return IRQ_HANDLED;
 764}
 765
 766/* dsi->irq_lock has to be locked by the caller */
 767static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
 768                struct dsi_isr_data *isr_array,
 769                unsigned isr_array_size, u32 default_mask,
 770                const struct dsi_reg enable_reg,
 771                const struct dsi_reg status_reg)
 772{
 773        struct dsi_isr_data *isr_data;
 774        u32 mask;
 775        u32 old_mask;
 776        int i;
 777
 778        mask = default_mask;
 779
 780        for (i = 0; i < isr_array_size; i++) {
 781                isr_data = &isr_array[i];
 782
 783                if (isr_data->isr == NULL)
 784                        continue;
 785
 786                mask |= isr_data->mask;
 787        }
 788
 789        old_mask = dsi_read_reg(dsidev, enable_reg);
 790        /* clear the irqstatus for newly enabled irqs */
 791        dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
 792        dsi_write_reg(dsidev, enable_reg, mask);
 793
 794        /* flush posted writes */
 795        dsi_read_reg(dsidev, enable_reg);
 796        dsi_read_reg(dsidev, status_reg);
 797}
 798
 799/* dsi->irq_lock has to be locked by the caller */
 800static void _omap_dsi_set_irqs(struct platform_device *dsidev)
 801{
 802        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 803        u32 mask = DSI_IRQ_ERROR_MASK;
 804#ifdef DSI_CATCH_MISSING_TE
 805        mask |= DSI_IRQ_TE_TRIGGER;
 806#endif
 807        _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
 808                        ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
 809                        DSI_IRQENABLE, DSI_IRQSTATUS);
 810}
 811
 812/* dsi->irq_lock has to be locked by the caller */
 813static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
 814{
 815        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 816
 817        _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
 818                        ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
 819                        DSI_VC_IRQ_ERROR_MASK,
 820                        DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
 821}
 822
 823/* dsi->irq_lock has to be locked by the caller */
 824static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
 825{
 826        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 827
 828        _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
 829                        ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
 830                        DSI_CIO_IRQ_ERROR_MASK,
 831                        DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
 832}
 833
 834static void _dsi_initialize_irq(struct platform_device *dsidev)
 835{
 836        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 837        unsigned long flags;
 838        int vc;
 839
 840        spin_lock_irqsave(&dsi->irq_lock, flags);
 841
 842        memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
 843
 844        _omap_dsi_set_irqs(dsidev);
 845        for (vc = 0; vc < 4; ++vc)
 846                _omap_dsi_set_irqs_vc(dsidev, vc);
 847        _omap_dsi_set_irqs_cio(dsidev);
 848
 849        spin_unlock_irqrestore(&dsi->irq_lock, flags);
 850}
 851
 852static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
 853                struct dsi_isr_data *isr_array, unsigned isr_array_size)
 854{
 855        struct dsi_isr_data *isr_data;
 856        int free_idx;
 857        int i;
 858
 859        BUG_ON(isr == NULL);
 860
 861        /* check for duplicate entry and find a free slot */
 862        free_idx = -1;
 863        for (i = 0; i < isr_array_size; i++) {
 864                isr_data = &isr_array[i];
 865
 866                if (isr_data->isr == isr && isr_data->arg == arg &&
 867                                isr_data->mask == mask) {
 868                        return -EINVAL;
 869                }
 870
 871                if (isr_data->isr == NULL && free_idx == -1)
 872                        free_idx = i;
 873        }
 874
 875        if (free_idx == -1)
 876                return -EBUSY;
 877
 878        isr_data = &isr_array[free_idx];
 879        isr_data->isr = isr;
 880        isr_data->arg = arg;
 881        isr_data->mask = mask;
 882
 883        return 0;
 884}
 885
 886static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
 887                struct dsi_isr_data *isr_array, unsigned isr_array_size)
 888{
 889        struct dsi_isr_data *isr_data;
 890        int i;
 891
 892        for (i = 0; i < isr_array_size; i++) {
 893                isr_data = &isr_array[i];
 894                if (isr_data->isr != isr || isr_data->arg != arg ||
 895                                isr_data->mask != mask)
 896                        continue;
 897
 898                isr_data->isr = NULL;
 899                isr_data->arg = NULL;
 900                isr_data->mask = 0;
 901
 902                return 0;
 903        }
 904
 905        return -EINVAL;
 906}
 907
 908static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
 909                void *arg, u32 mask)
 910{
 911        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 912        unsigned long flags;
 913        int r;
 914
 915        spin_lock_irqsave(&dsi->irq_lock, flags);
 916
 917        r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
 918                        ARRAY_SIZE(dsi->isr_tables.isr_table));
 919
 920        if (r == 0)
 921                _omap_dsi_set_irqs(dsidev);
 922
 923        spin_unlock_irqrestore(&dsi->irq_lock, flags);
 924
 925        return r;
 926}
 927
 928static int dsi_unregister_isr(struct platform_device *dsidev,
 929                omap_dsi_isr_t isr, void *arg, u32 mask)
 930{
 931        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 932        unsigned long flags;
 933        int r;
 934
 935        spin_lock_irqsave(&dsi->irq_lock, flags);
 936
 937        r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
 938                        ARRAY_SIZE(dsi->isr_tables.isr_table));
 939
 940        if (r == 0)
 941                _omap_dsi_set_irqs(dsidev);
 942
 943        spin_unlock_irqrestore(&dsi->irq_lock, flags);
 944
 945        return r;
 946}
 947
 948static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
 949                omap_dsi_isr_t isr, void *arg, u32 mask)
 950{
 951        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 952        unsigned long flags;
 953        int r;
 954
 955        spin_lock_irqsave(&dsi->irq_lock, flags);
 956
 957        r = _dsi_register_isr(isr, arg, mask,
 958                        dsi->isr_tables.isr_table_vc[channel],
 959                        ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
 960
 961        if (r == 0)
 962                _omap_dsi_set_irqs_vc(dsidev, channel);
 963
 964        spin_unlock_irqrestore(&dsi->irq_lock, flags);
 965
 966        return r;
 967}
 968
 969static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
 970                omap_dsi_isr_t isr, void *arg, u32 mask)
 971{
 972        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 973        unsigned long flags;
 974        int r;
 975
 976        spin_lock_irqsave(&dsi->irq_lock, flags);
 977
 978        r = _dsi_unregister_isr(isr, arg, mask,
 979                        dsi->isr_tables.isr_table_vc[channel],
 980                        ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
 981
 982        if (r == 0)
 983                _omap_dsi_set_irqs_vc(dsidev, channel);
 984
 985        spin_unlock_irqrestore(&dsi->irq_lock, flags);
 986
 987        return r;
 988}
 989
 990static int dsi_register_isr_cio(struct platform_device *dsidev,
 991                omap_dsi_isr_t isr, void *arg, u32 mask)
 992{
 993        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 994        unsigned long flags;
 995        int r;
 996
 997        spin_lock_irqsave(&dsi->irq_lock, flags);
 998
 999        r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1000                        ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1001
1002        if (r == 0)
1003                _omap_dsi_set_irqs_cio(dsidev);
1004
1005        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1006
1007        return r;
1008}
1009
1010static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1011                omap_dsi_isr_t isr, void *arg, u32 mask)
1012{
1013        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1014        unsigned long flags;
1015        int r;
1016
1017        spin_lock_irqsave(&dsi->irq_lock, flags);
1018
1019        r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1020                        ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1021
1022        if (r == 0)
1023                _omap_dsi_set_irqs_cio(dsidev);
1024
1025        spin_unlock_irqrestore(&dsi->irq_lock, flags);
1026
1027        return r;
1028}
1029
1030static u32 dsi_get_errors(struct platform_device *dsidev)
1031{
1032        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1033        unsigned long flags;
1034        u32 e;
1035        spin_lock_irqsave(&dsi->errors_lock, flags);
1036        e = dsi->errors;
1037        dsi->errors = 0;
1038        spin_unlock_irqrestore(&dsi->errors_lock, flags);
1039        return e;
1040}
1041
1042/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
1043static inline void enable_clocks(bool enable)
1044{
1045        if (enable)
1046                dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1047        else
1048                dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1049}
1050
1051/* source clock for DSI PLL. this could also be PCLKFREE */
1052static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1053                bool enable)
1054{
1055        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1056
1057        if (enable)
1058                dss_clk_enable(DSS_CLK_SYSCK);
1059        else
1060                dss_clk_disable(DSS_CLK_SYSCK);
1061
1062        if (enable && dsi->pll_locked) {
1063                if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1064                        DSSERR("cannot lock PLL when enabling clocks\n");
1065        }
1066}
1067
1068#ifdef DEBUG
1069static void _dsi_print_reset_status(struct platform_device *dsidev)
1070{
1071        u32 l;
1072        int b0, b1, b2;
1073
1074        if (!dss_debug)
1075                return;
1076
1077        /* A dummy read using the SCP interface to any DSIPHY register is
1078         * required after DSIPHY reset to complete the reset of the DSI complex
1079         * I/O. */
1080        l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1081
1082        printk(KERN_DEBUG "DSI resets: ");
1083
1084        l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
1085        printk("PLL (%d) ", FLD_GET(l, 0, 0));
1086
1087        l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1088        printk("CIO (%d) ", FLD_GET(l, 29, 29));
1089
1090        if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1091                b0 = 28;
1092                b1 = 27;
1093                b2 = 26;
1094        } else {
1095                b0 = 24;
1096                b1 = 25;
1097                b2 = 26;
1098        }
1099
1100        l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1101        printk("PHY (%x%x%x, %d, %d, %d)\n",
1102                        FLD_GET(l, b0, b0),
1103                        FLD_GET(l, b1, b1),
1104                        FLD_GET(l, b2, b2),
1105                        FLD_GET(l, 29, 29),
1106                        FLD_GET(l, 30, 30),
1107                        FLD_GET(l, 31, 31));
1108}
1109#else
1110#define _dsi_print_reset_status(x)
1111#endif
1112
1113static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1114{
1115        DSSDBG("dsi_if_enable(%d)\n", enable);
1116
1117        enable = enable ? 1 : 0;
1118        REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1119
1120        if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1121                        DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1122                        return -EIO;
1123        }
1124
1125        return 0;
1126}
1127
1128unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1129{
1130        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1131
1132        return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1133}
1134
1135static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1136{
1137        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1138
1139        return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1140}
1141
1142static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1143{
1144        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1145
1146        return dsi->current_cinfo.clkin4ddr / 16;
1147}
1148
1149static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1150{
1151        unsigned long r;
1152        int dsi_module = dsi_get_dsidev_id(dsidev);
1153
1154        if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1155                /* DSI FCLK source is DSS_CLK_FCK */
1156                r = dss_clk_get_rate(DSS_CLK_FCK);
1157        } else {
1158                /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1159                r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1160        }
1161
1162        return r;
1163}
1164
1165static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1166{
1167        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1168        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1169        unsigned long dsi_fclk;
1170        unsigned lp_clk_div;
1171        unsigned long lp_clk;
1172
1173        lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1174
1175        if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1176                return -EINVAL;
1177
1178        dsi_fclk = dsi_fclk_rate(dsidev);
1179
1180        lp_clk = dsi_fclk / 2 / lp_clk_div;
1181
1182        DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1183        dsi->current_cinfo.lp_clk = lp_clk;
1184        dsi->current_cinfo.lp_clk_div = lp_clk_div;
1185
1186        /* LP_CLK_DIVISOR */
1187        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1188
1189        /* LP_RX_SYNCHRO_ENABLE */
1190        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1191
1192        return 0;
1193}
1194
1195static void dsi_enable_scp_clk(struct platform_device *dsidev)
1196{
1197        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1198
1199        if (dsi->scp_clk_refcount++ == 0)
1200                REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1201}
1202
1203static void dsi_disable_scp_clk(struct platform_device *dsidev)
1204{
1205        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1206
1207        WARN_ON(dsi->scp_clk_refcount == 0);
1208        if (--dsi->scp_clk_refcount == 0)
1209                REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1210}
1211
1212enum dsi_pll_power_state {
1213        DSI_PLL_POWER_OFF       = 0x0,
1214        DSI_PLL_POWER_ON_HSCLK  = 0x1,
1215        DSI_PLL_POWER_ON_ALL    = 0x2,
1216        DSI_PLL_POWER_ON_DIV    = 0x3,
1217};
1218
1219static int dsi_pll_power(struct platform_device *dsidev,
1220                enum dsi_pll_power_state state)
1221{
1222        int t = 0;
1223
1224        /* DSI-PLL power command 0x3 is not working */
1225        if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1226                        state == DSI_PLL_POWER_ON_DIV)
1227                state = DSI_PLL_POWER_ON_ALL;
1228
1229        /* PLL_PWR_CMD */
1230        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1231
1232        /* PLL_PWR_STATUS */
1233        while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1234                if (++t > 1000) {
1235                        DSSERR("Failed to set DSI PLL power mode to %d\n",
1236                                        state);
1237                        return -ENODEV;
1238                }
1239                udelay(1);
1240        }
1241
1242        return 0;
1243}
1244
1245/* calculate clock rates using dividers in cinfo */
1246static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1247                struct dsi_clock_info *cinfo)
1248{
1249        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1250        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1251
1252        if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1253                return -EINVAL;
1254
1255        if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1256                return -EINVAL;
1257
1258        if (cinfo->regm_dispc > dsi->regm_dispc_max)
1259                return -EINVAL;
1260
1261        if (cinfo->regm_dsi > dsi->regm_dsi_max)
1262                return -EINVAL;
1263
1264        if (cinfo->use_sys_clk) {
1265                cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
1266                /* XXX it is unclear if highfreq should be used
1267                 * with DSS_SYS_CLK source also */
1268                cinfo->highfreq = 0;
1269        } else {
1270                cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
1271
1272                if (cinfo->clkin < 32000000)
1273                        cinfo->highfreq = 0;
1274                else
1275                        cinfo->highfreq = 1;
1276        }
1277
1278        cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1279
1280        if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1281                return -EINVAL;
1282
1283        cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1284
1285        if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1286                return -EINVAL;
1287
1288        if (cinfo->regm_dispc > 0)
1289                cinfo->dsi_pll_hsdiv_dispc_clk =
1290                        cinfo->clkin4ddr / cinfo->regm_dispc;
1291        else
1292                cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1293
1294        if (cinfo->regm_dsi > 0)
1295                cinfo->dsi_pll_hsdiv_dsi_clk =
1296                        cinfo->clkin4ddr / cinfo->regm_dsi;
1297        else
1298                cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1299
1300        return 0;
1301}
1302
1303int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1304                unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1305                struct dispc_clock_info *dispc_cinfo)
1306{
1307        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1308        struct dsi_clock_info cur, best;
1309        struct dispc_clock_info best_dispc;
1310        int min_fck_per_pck;
1311        int match = 0;
1312        unsigned long dss_sys_clk, max_dss_fck;
1313
1314        dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
1315
1316        max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1317
1318        if (req_pck == dsi->cache_req_pck &&
1319                        dsi->cache_cinfo.clkin == dss_sys_clk) {
1320                DSSDBG("DSI clock info found from cache\n");
1321                *dsi_cinfo = dsi->cache_cinfo;
1322                dispc_find_clk_divs(is_tft, req_pck,
1323                        dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1324                return 0;
1325        }
1326
1327        min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1328
1329        if (min_fck_per_pck &&
1330                req_pck * min_fck_per_pck > max_dss_fck) {
1331                DSSERR("Requested pixel clock not possible with the current "
1332                                "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1333                                "the constraint off.\n");
1334                min_fck_per_pck = 0;
1335        }
1336
1337        DSSDBG("dsi_pll_calc\n");
1338
1339retry:
1340        memset(&best, 0, sizeof(best));
1341        memset(&best_dispc, 0, sizeof(best_dispc));
1342
1343        memset(&cur, 0, sizeof(cur));
1344        cur.clkin = dss_sys_clk;
1345        cur.use_sys_clk = 1;
1346        cur.highfreq = 0;
1347
1348        /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1349        /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1350        /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1351        for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1352                if (cur.highfreq == 0)
1353                        cur.fint = cur.clkin / cur.regn;
1354                else
1355                        cur.fint = cur.clkin / (2 * cur.regn);
1356
1357                if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1358                        continue;
1359
1360                /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1361                for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1362                        unsigned long a, b;
1363
1364                        a = 2 * cur.regm * (cur.clkin/1000);
1365                        b = cur.regn * (cur.highfreq + 1);
1366                        cur.clkin4ddr = a / b * 1000;
1367
1368                        if (cur.clkin4ddr > 1800 * 1000 * 1000)
1369                                break;
1370
1371                        /* dsi_pll_hsdiv_dispc_clk(MHz) =
1372                         * DSIPHY(MHz) / regm_dispc  < 173MHz/186Mhz */
1373                        for (cur.regm_dispc = 1; cur.regm_dispc <
1374                                        dsi->regm_dispc_max; ++cur.regm_dispc) {
1375                                struct dispc_clock_info cur_dispc;
1376                                cur.dsi_pll_hsdiv_dispc_clk =
1377                                        cur.clkin4ddr / cur.regm_dispc;
1378
1379                                /* this will narrow down the search a bit,
1380                                 * but still give pixclocks below what was
1381                                 * requested */
1382                                if (cur.dsi_pll_hsdiv_dispc_clk  < req_pck)
1383                                        break;
1384
1385                                if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1386                                        continue;
1387
1388                                if (min_fck_per_pck &&
1389                                        cur.dsi_pll_hsdiv_dispc_clk <
1390                                                req_pck * min_fck_per_pck)
1391                                        continue;
1392
1393                                match = 1;
1394
1395                                dispc_find_clk_divs(is_tft, req_pck,
1396                                                cur.dsi_pll_hsdiv_dispc_clk,
1397                                                &cur_dispc);
1398
1399                                if (abs(cur_dispc.pck - req_pck) <
1400                                                abs(best_dispc.pck - req_pck)) {
1401                                        best = cur;
1402                                        best_dispc = cur_dispc;
1403
1404                                        if (cur_dispc.pck == req_pck)
1405                                                goto found;
1406                                }
1407                        }
1408                }
1409        }
1410found:
1411        if (!match) {
1412                if (min_fck_per_pck) {
1413                        DSSERR("Could not find suitable clock settings.\n"
1414                                        "Turning FCK/PCK constraint off and"
1415                                        "trying again.\n");
1416                        min_fck_per_pck = 0;
1417                        goto retry;
1418                }
1419
1420                DSSERR("Could not find suitable clock settings.\n");
1421
1422                return -EINVAL;
1423        }
1424
1425        /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1426        best.regm_dsi = 0;
1427        best.dsi_pll_hsdiv_dsi_clk = 0;
1428
1429        if (dsi_cinfo)
1430                *dsi_cinfo = best;
1431        if (dispc_cinfo)
1432                *dispc_cinfo = best_dispc;
1433
1434        dsi->cache_req_pck = req_pck;
1435        dsi->cache_clk_freq = 0;
1436        dsi->cache_cinfo = best;
1437
1438        return 0;
1439}
1440
1441int dsi_pll_set_clock_div(struct platform_device *dsidev,
1442                struct dsi_clock_info *cinfo)
1443{
1444        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1445        int r = 0;
1446        u32 l;
1447        int f = 0;
1448        u8 regn_start, regn_end, regm_start, regm_end;
1449        u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1450
1451        DSSDBGF();
1452
1453        dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1454        dsi->current_cinfo.highfreq = cinfo->highfreq;
1455
1456        dsi->current_cinfo.fint = cinfo->fint;
1457        dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1458        dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1459                        cinfo->dsi_pll_hsdiv_dispc_clk;
1460        dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1461                        cinfo->dsi_pll_hsdiv_dsi_clk;
1462
1463        dsi->current_cinfo.regn = cinfo->regn;
1464        dsi->current_cinfo.regm = cinfo->regm;
1465        dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1466        dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1467
1468        DSSDBG("DSI Fint %ld\n", cinfo->fint);
1469
1470        DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1471                        cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1472                        cinfo->clkin,
1473                        cinfo->highfreq);
1474
1475        /* DSIPHY == CLKIN4DDR */
1476        DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1477                        cinfo->regm,
1478                        cinfo->regn,
1479                        cinfo->clkin,
1480                        cinfo->highfreq + 1,
1481                        cinfo->clkin4ddr);
1482
1483        DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1484                        cinfo->clkin4ddr / 1000 / 1000 / 2);
1485
1486        DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1487
1488        DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1489                dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1490                dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1491                cinfo->dsi_pll_hsdiv_dispc_clk);
1492        DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1493                dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1494                dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1495                cinfo->dsi_pll_hsdiv_dsi_clk);
1496
1497        dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
1498        dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
1499        dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
1500                        &regm_dispc_end);
1501        dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
1502                        &regm_dsi_end);
1503
1504        /* DSI_PLL_AUTOMODE = manual */
1505        REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1506
1507        l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1508        l = FLD_MOD(l, 1, 0, 0);                /* DSI_PLL_STOPMODE */
1509        /* DSI_PLL_REGN */
1510        l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1511        /* DSI_PLL_REGM */
1512        l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1513        /* DSI_CLOCK_DIV */
1514        l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1515                        regm_dispc_start, regm_dispc_end);
1516        /* DSIPROTO_CLOCK_DIV */
1517        l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1518                        regm_dsi_start, regm_dsi_end);
1519        dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1520
1521        BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1522
1523        if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1524                f = cinfo->fint < 1000000 ? 0x3 :
1525                        cinfo->fint < 1250000 ? 0x4 :
1526                        cinfo->fint < 1500000 ? 0x5 :
1527                        cinfo->fint < 1750000 ? 0x6 :
1528                        0x7;
1529        }
1530
1531        l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1532
1533        if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1534                l = FLD_MOD(l, f, 4, 1);        /* DSI_PLL_FREQSEL */
1535        l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1536                        11, 11);                /* DSI_PLL_CLKSEL */
1537        l = FLD_MOD(l, cinfo->highfreq,
1538                        12, 12);                /* DSI_PLL_HIGHFREQ */
1539        l = FLD_MOD(l, 1, 13, 13);              /* DSI_PLL_REFEN */
1540        l = FLD_MOD(l, 0, 14, 14);              /* DSIPHY_CLKINEN */
1541        l = FLD_MOD(l, 1, 20, 20);              /* DSI_HSDIVBYPASS */
1542        dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1543
1544        REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0);       /* DSI_PLL_GO */
1545
1546        if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1547                DSSERR("dsi pll go bit not going down.\n");
1548                r = -EIO;
1549                goto err;
1550        }
1551
1552        if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1553                DSSERR("cannot lock PLL\n");
1554                r = -EIO;
1555                goto err;
1556        }
1557
1558        dsi->pll_locked = 1;
1559
1560        l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1561        l = FLD_MOD(l, 0, 0, 0);        /* DSI_PLL_IDLE */
1562        l = FLD_MOD(l, 0, 5, 5);        /* DSI_PLL_PLLLPMODE */
1563        l = FLD_MOD(l, 0, 6, 6);        /* DSI_PLL_LOWCURRSTBY */
1564        l = FLD_MOD(l, 0, 7, 7);        /* DSI_PLL_TIGHTPHASELOCK */
1565        l = FLD_MOD(l, 0, 8, 8);        /* DSI_PLL_DRIFTGUARDEN */
1566        l = FLD_MOD(l, 0, 10, 9);       /* DSI_PLL_LOCKSEL */
1567        l = FLD_MOD(l, 1, 13, 13);      /* DSI_PLL_REFEN */
1568        l = FLD_MOD(l, 1, 14, 14);      /* DSIPHY_CLKINEN */
1569        l = FLD_MOD(l, 0, 15, 15);      /* DSI_BYPASSEN */
1570        l = FLD_MOD(l, 1, 16, 16);      /* DSS_CLOCK_EN */
1571        l = FLD_MOD(l, 0, 17, 17);      /* DSS_CLOCK_PWDN */
1572        l = FLD_MOD(l, 1, 18, 18);      /* DSI_PROTO_CLOCK_EN */
1573        l = FLD_MOD(l, 0, 19, 19);      /* DSI_PROTO_CLOCK_PWDN */
1574        l = FLD_MOD(l, 0, 20, 20);      /* DSI_HSDIVBYPASS */
1575        dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1576
1577        DSSDBG("PLL config done\n");
1578err:
1579        return r;
1580}
1581
1582int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1583                bool enable_hsdiv)
1584{
1585        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1586        int r = 0;
1587        enum dsi_pll_power_state pwstate;
1588
1589        DSSDBG("PLL init\n");
1590
1591        if (dsi->vdds_dsi_reg == NULL) {
1592                struct regulator *vdds_dsi;
1593
1594                vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1595
1596                if (IS_ERR(vdds_dsi)) {
1597                        DSSERR("can't get VDDS_DSI regulator\n");
1598                        return PTR_ERR(vdds_dsi);
1599                }
1600
1601                dsi->vdds_dsi_reg = vdds_dsi;
1602        }
1603
1604        enable_clocks(1);
1605        dsi_enable_pll_clock(dsidev, 1);
1606        /*
1607         * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1608         */
1609        dsi_enable_scp_clk(dsidev);
1610
1611        if (!dsi->vdds_dsi_enabled) {
1612                r = regulator_enable(dsi->vdds_dsi_reg);
1613                if (r)
1614                        goto err0;
1615                dsi->vdds_dsi_enabled = true;
1616        }
1617
1618        /* XXX PLL does not come out of reset without this... */
1619        dispc_pck_free_enable(1);
1620
1621        if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1622                DSSERR("PLL not coming out of reset.\n");
1623                r = -ENODEV;
1624                dispc_pck_free_enable(0);
1625                goto err1;
1626        }
1627
1628        /* XXX ... but if left on, we get problems when planes do not
1629         * fill the whole display. No idea about this */
1630        dispc_pck_free_enable(0);
1631
1632        if (enable_hsclk && enable_hsdiv)
1633                pwstate = DSI_PLL_POWER_ON_ALL;
1634        else if (enable_hsclk)
1635                pwstate = DSI_PLL_POWER_ON_HSCLK;
1636        else if (enable_hsdiv)
1637                pwstate = DSI_PLL_POWER_ON_DIV;
1638        else
1639                pwstate = DSI_PLL_POWER_OFF;
1640
1641        r = dsi_pll_power(dsidev, pwstate);
1642
1643        if (r)
1644                goto err1;
1645
1646        DSSDBG("PLL init done\n");
1647
1648        return 0;
1649err1:
1650        if (dsi->vdds_dsi_enabled) {
1651                regulator_disable(dsi->vdds_dsi_reg);
1652                dsi->vdds_dsi_enabled = false;
1653        }
1654err0:
1655        dsi_disable_scp_clk(dsidev);
1656        enable_clocks(0);
1657        dsi_enable_pll_clock(dsidev, 0);
1658        return r;
1659}
1660
1661void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1662{
1663        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1664
1665        dsi->pll_locked = 0;
1666        dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1667        if (disconnect_lanes) {
1668                WARN_ON(!dsi->vdds_dsi_enabled);
1669                regulator_disable(dsi->vdds_dsi_reg);
1670                dsi->vdds_dsi_enabled = false;
1671        }
1672
1673        dsi_disable_scp_clk(dsidev);
1674        enable_clocks(0);
1675        dsi_enable_pll_clock(dsidev, 0);
1676
1677        DSSDBG("PLL uninit done\n");
1678}
1679
1680static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1681                struct seq_file *s)
1682{
1683        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1684        struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1685        enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1686        int dsi_module = dsi_get_dsidev_id(dsidev);
1687
1688        dispc_clk_src = dss_get_dispc_clk_source();
1689        dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1690
1691        enable_clocks(1);
1692
1693        seq_printf(s,   "- DSI%d PLL -\n", dsi_module + 1);
1694
1695        seq_printf(s,   "dsi pll source = %s\n",
1696                        cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1697
1698        seq_printf(s,   "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1699
1700        seq_printf(s,   "CLKIN4DDR\t%-16luregm %u\n",
1701                        cinfo->clkin4ddr, cinfo->regm);
1702
1703        seq_printf(s,   "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
1704                        dss_get_generic_clk_source_name(dispc_clk_src),
1705                        dss_feat_get_clk_source_name(dispc_clk_src),
1706                        cinfo->dsi_pll_hsdiv_dispc_clk,
1707                        cinfo->regm_dispc,
1708                        dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1709                        "off" : "on");
1710
1711        seq_printf(s,   "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
1712                        dss_get_generic_clk_source_name(dsi_clk_src),
1713                        dss_feat_get_clk_source_name(dsi_clk_src),
1714                        cinfo->dsi_pll_hsdiv_dsi_clk,
1715                        cinfo->regm_dsi,
1716                        dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1717                        "off" : "on");
1718
1719        seq_printf(s,   "- DSI%d -\n", dsi_module + 1);
1720
1721        seq_printf(s,   "dsi fclk source = %s (%s)\n",
1722                        dss_get_generic_clk_source_name(dsi_clk_src),
1723                        dss_feat_get_clk_source_name(dsi_clk_src));
1724
1725        seq_printf(s,   "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1726
1727        seq_printf(s,   "DDR_CLK\t\t%lu\n",
1728                        cinfo->clkin4ddr / 4);
1729
1730        seq_printf(s,   "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1731
1732        seq_printf(s,   "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1733
1734        enable_clocks(0);
1735}
1736
1737void dsi_dump_clocks(struct seq_file *s)
1738{
1739        struct platform_device *dsidev;
1740        int i;
1741
1742        for  (i = 0; i < MAX_NUM_DSI; i++) {
1743                dsidev = dsi_get_dsidev_from_id(i);
1744                if (dsidev)
1745                        dsi_dump_dsidev_clocks(dsidev, s);
1746        }
1747}
1748
1749#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1750static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1751                struct seq_file *s)
1752{
1753        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1754        unsigned long flags;
1755        struct dsi_irq_stats stats;
1756        int dsi_module = dsi_get_dsidev_id(dsidev);
1757
1758        spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1759
1760        stats = dsi->irq_stats;
1761        memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1762        dsi->irq_stats.last_reset = jiffies;
1763
1764        spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1765
1766        seq_printf(s, "period %u ms\n",
1767                        jiffies_to_msecs(jiffies - stats.last_reset));
1768
1769        seq_printf(s, "irqs %d\n", stats.irq_count);
1770#define PIS(x) \
1771        seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1772
1773        seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1774        PIS(VC0);
1775        PIS(VC1);
1776        PIS(VC2);
1777        PIS(VC3);
1778        PIS(WAKEUP);
1779        PIS(RESYNC);
1780        PIS(PLL_LOCK);
1781        PIS(PLL_UNLOCK);
1782        PIS(PLL_RECALL);
1783        PIS(COMPLEXIO_ERR);
1784        PIS(HS_TX_TIMEOUT);
1785        PIS(LP_RX_TIMEOUT);
1786        PIS(TE_TRIGGER);
1787        PIS(ACK_TRIGGER);
1788        PIS(SYNC_LOST);
1789        PIS(LDO_POWER_GOOD);
1790        PIS(TA_TIMEOUT);
1791#undef PIS
1792
1793#define PIS(x) \
1794        seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1795                        stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1796                        stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1797                        stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1798                        stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1799
1800        seq_printf(s, "-- VC interrupts --\n");
1801        PIS(CS);
1802        PIS(ECC_CORR);
1803        PIS(PACKET_SENT);
1804        PIS(FIFO_TX_OVF);
1805        PIS(FIFO_RX_OVF);
1806        PIS(BTA);
1807        PIS(ECC_NO_CORR);
1808        PIS(FIFO_TX_UDF);
1809        PIS(PP_BUSY_CHANGE);
1810#undef PIS
1811
1812#define PIS(x) \
1813        seq_printf(s, "%-20s %10d\n", #x, \
1814                        stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1815
1816        seq_printf(s, "-- CIO interrupts --\n");
1817        PIS(ERRSYNCESC1);
1818        PIS(ERRSYNCESC2);
1819        PIS(ERRSYNCESC3);
1820        PIS(ERRESC1);
1821        PIS(ERRESC2);
1822        PIS(ERRESC3);
1823        PIS(ERRCONTROL1);
1824        PIS(ERRCONTROL2);
1825        PIS(ERRCONTROL3);
1826        PIS(STATEULPS1);
1827        PIS(STATEULPS2);
1828        PIS(STATEULPS3);
1829        PIS(ERRCONTENTIONLP0_1);
1830        PIS(ERRCONTENTIONLP1_1);
1831        PIS(ERRCONTENTIONLP0_2);
1832        PIS(ERRCONTENTIONLP1_2);
1833        PIS(ERRCONTENTIONLP0_3);
1834        PIS(ERRCONTENTIONLP1_3);
1835        PIS(ULPSACTIVENOT_ALL0);
1836        PIS(ULPSACTIVENOT_ALL1);
1837#undef PIS
1838}
1839
1840static void dsi1_dump_irqs(struct seq_file *s)
1841{
1842        struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1843
1844        dsi_dump_dsidev_irqs(dsidev, s);
1845}
1846
1847static void dsi2_dump_irqs(struct seq_file *s)
1848{
1849        struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1850
1851        dsi_dump_dsidev_irqs(dsidev, s);
1852}
1853
1854void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1855                const struct file_operations *debug_fops)
1856{
1857        struct platform_device *dsidev;
1858
1859        dsidev = dsi_get_dsidev_from_id(0);
1860        if (dsidev)
1861                debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1862                        &dsi1_dump_irqs, debug_fops);
1863
1864        dsidev = dsi_get_dsidev_from_id(1);
1865        if (dsidev)
1866                debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1867                        &dsi2_dump_irqs, debug_fops);
1868}
1869#endif
1870
1871static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1872                struct seq_file *s)
1873{
1874#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1875
1876        dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1877        dsi_enable_scp_clk(dsidev);
1878
1879        DUMPREG(DSI_REVISION);
1880        DUMPREG(DSI_SYSCONFIG);
1881        DUMPREG(DSI_SYSSTATUS);
1882        DUMPREG(DSI_IRQSTATUS);
1883        DUMPREG(DSI_IRQENABLE);
1884        DUMPREG(DSI_CTRL);
1885        DUMPREG(DSI_COMPLEXIO_CFG1);
1886        DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1887        DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1888        DUMPREG(DSI_CLK_CTRL);
1889        DUMPREG(DSI_TIMING1);
1890        DUMPREG(DSI_TIMING2);
1891        DUMPREG(DSI_VM_TIMING1);
1892        DUMPREG(DSI_VM_TIMING2);
1893        DUMPREG(DSI_VM_TIMING3);
1894        DUMPREG(DSI_CLK_TIMING);
1895        DUMPREG(DSI_TX_FIFO_VC_SIZE);
1896        DUMPREG(DSI_RX_FIFO_VC_SIZE);
1897        DUMPREG(DSI_COMPLEXIO_CFG2);
1898        DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1899        DUMPREG(DSI_VM_TIMING4);
1900        DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1901        DUMPREG(DSI_VM_TIMING5);
1902        DUMPREG(DSI_VM_TIMING6);
1903        DUMPREG(DSI_VM_TIMING7);
1904        DUMPREG(DSI_STOPCLK_TIMING);
1905
1906        DUMPREG(DSI_VC_CTRL(0));
1907        DUMPREG(DSI_VC_TE(0));
1908        DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1909        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1910        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1911        DUMPREG(DSI_VC_IRQSTATUS(0));
1912        DUMPREG(DSI_VC_IRQENABLE(0));
1913
1914        DUMPREG(DSI_VC_CTRL(1));
1915        DUMPREG(DSI_VC_TE(1));
1916        DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1917        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1918        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1919        DUMPREG(DSI_VC_IRQSTATUS(1));
1920        DUMPREG(DSI_VC_IRQENABLE(1));
1921
1922        DUMPREG(DSI_VC_CTRL(2));
1923        DUMPREG(DSI_VC_TE(2));
1924        DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1925        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1926        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1927        DUMPREG(DSI_VC_IRQSTATUS(2));
1928        DUMPREG(DSI_VC_IRQENABLE(2));
1929
1930        DUMPREG(DSI_VC_CTRL(3));
1931        DUMPREG(DSI_VC_TE(3));
1932        DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1933        DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1934        DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1935        DUMPREG(DSI_VC_IRQSTATUS(3));
1936        DUMPREG(DSI_VC_IRQENABLE(3));
1937
1938        DUMPREG(DSI_DSIPHY_CFG0);
1939        DUMPREG(DSI_DSIPHY_CFG1);
1940        DUMPREG(DSI_DSIPHY_CFG2);
1941        DUMPREG(DSI_DSIPHY_CFG5);
1942
1943        DUMPREG(DSI_PLL_CONTROL);
1944        DUMPREG(DSI_PLL_STATUS);
1945        DUMPREG(DSI_PLL_GO);
1946        DUMPREG(DSI_PLL_CONFIGURATION1);
1947        DUMPREG(DSI_PLL_CONFIGURATION2);
1948
1949        dsi_disable_scp_clk(dsidev);
1950        dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1951#undef DUMPREG
1952}
1953
1954static void dsi1_dump_regs(struct seq_file *s)
1955{
1956        struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1957
1958        dsi_dump_dsidev_regs(dsidev, s);
1959}
1960
1961static void dsi2_dump_regs(struct seq_file *s)
1962{
1963        struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1964
1965        dsi_dump_dsidev_regs(dsidev, s);
1966}
1967
1968void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
1969                const struct file_operations *debug_fops)
1970{
1971        struct platform_device *dsidev;
1972
1973        dsidev = dsi_get_dsidev_from_id(0);
1974        if (dsidev)
1975                debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
1976                        &dsi1_dump_regs, debug_fops);
1977
1978        dsidev = dsi_get_dsidev_from_id(1);
1979        if (dsidev)
1980                debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
1981                        &dsi2_dump_regs, debug_fops);
1982}
1983enum dsi_cio_power_state {
1984        DSI_COMPLEXIO_POWER_OFF         = 0x0,
1985        DSI_COMPLEXIO_POWER_ON          = 0x1,
1986        DSI_COMPLEXIO_POWER_ULPS        = 0x2,
1987};
1988
1989static int dsi_cio_power(struct platform_device *dsidev,
1990                enum dsi_cio_power_state state)
1991{
1992        int t = 0;
1993
1994        /* PWR_CMD */
1995        REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
1996
1997        /* PWR_STATUS */
1998        while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
1999                        26, 25) != state) {
2000                if (++t > 1000) {
2001                        DSSERR("failed to set complexio power state to "
2002                                        "%d\n", state);
2003                        return -ENODEV;
2004                }
2005                udelay(1);
2006        }
2007
2008        return 0;
2009}
2010
2011/* Number of data lanes present on DSI interface */
2012static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
2013{
2014        /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
2015         * of data lanes as 2 by default */
2016        if (dss_has_feature(FEAT_DSI_GNQ))
2017                return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
2018        else
2019                return 2;
2020}
2021
2022/* Number of data lanes used by the dss device */
2023static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
2024{
2025        int num_data_lanes = 0;
2026
2027        if (dssdev->phy.dsi.data1_lane != 0)
2028                num_data_lanes++;
2029        if (dssdev->phy.dsi.data2_lane != 0)
2030                num_data_lanes++;
2031        if (dssdev->phy.dsi.data3_lane != 0)
2032                num_data_lanes++;
2033        if (dssdev->phy.dsi.data4_lane != 0)
2034                num_data_lanes++;
2035
2036        return num_data_lanes;
2037}
2038
2039static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2040{
2041        int val;
2042
2043        /* line buffer on OMAP3 is 1024 x 24bits */
2044        /* XXX: for some reason using full buffer size causes
2045         * considerable TX slowdown with update sizes that fill the
2046         * whole buffer */
2047        if (!dss_has_feature(FEAT_DSI_GNQ))
2048                return 1023 * 3;
2049
2050        val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2051
2052        switch (val) {
2053        case 1:
2054                return 512 * 3;         /* 512x24 bits */
2055        case 2:
2056                return 682 * 3;         /* 682x24 bits */
2057        case 3:
2058                return 853 * 3;         /* 853x24 bits */
2059        case 4:
2060                return 1024 * 3;        /* 1024x24 bits */
2061        case 5:
2062                return 1194 * 3;        /* 1194x24 bits */
2063        case 6:
2064                return 1365 * 3;        /* 1365x24 bits */
2065        default:
2066                BUG();
2067        }
2068}
2069
2070static void dsi_set_lane_config(struct omap_dss_device *dssdev)
2071{
2072        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2073        u32 r;
2074        int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2075
2076        int clk_lane   = dssdev->phy.dsi.clk_lane;
2077        int data1_lane = dssdev->phy.dsi.data1_lane;
2078        int data2_lane = dssdev->phy.dsi.data2_lane;
2079        int clk_pol    = dssdev->phy.dsi.clk_pol;
2080        int data1_pol  = dssdev->phy.dsi.data1_pol;
2081        int data2_pol  = dssdev->phy.dsi.data2_pol;
2082
2083        r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2084        r = FLD_MOD(r, clk_lane, 2, 0);
2085        r = FLD_MOD(r, clk_pol, 3, 3);
2086        r = FLD_MOD(r, data1_lane, 6, 4);
2087        r = FLD_MOD(r, data1_pol, 7, 7);
2088        r = FLD_MOD(r, data2_lane, 10, 8);
2089        r = FLD_MOD(r, data2_pol, 11, 11);
2090        if (num_data_lanes_dssdev > 2) {
2091                int data3_lane  = dssdev->phy.dsi.data3_lane;
2092                int data3_pol  = dssdev->phy.dsi.data3_pol;
2093
2094                r = FLD_MOD(r, data3_lane, 14, 12);
2095                r = FLD_MOD(r, data3_pol, 15, 15);
2096        }
2097        if (num_data_lanes_dssdev > 3) {
2098                int data4_lane  = dssdev->phy.dsi.data4_lane;
2099                int data4_pol  = dssdev->phy.dsi.data4_pol;
2100
2101                r = FLD_MOD(r, data4_lane, 18, 16);
2102                r = FLD_MOD(r, data4_pol, 19, 19);
2103        }
2104        dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2105
2106        /* The configuration of the DSI complex I/O (number of data lanes,
2107           position, differential order) should not be changed while
2108           DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
2109           the hardware to take into account a new configuration of the complex
2110           I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
2111           follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
2112           then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
2113           DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
2114           DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
2115           DSI complex I/O configuration is unknown. */
2116
2117        /*
2118        REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2119        REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
2120        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
2121        REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2122        */
2123}
2124
2125static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2126{
2127        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2128
2129        /* convert time in ns to ddr ticks, rounding up */
2130        unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2131        return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2132}
2133
2134static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2135{
2136        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2137
2138        unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2139        return ddr * 1000 * 1000 / (ddr_clk / 1000);
2140}
2141
2142static void dsi_cio_timings(struct platform_device *dsidev)
2143{
2144        u32 r;
2145        u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2146        u32 tlpx_half, tclk_trail, tclk_zero;
2147        u32 tclk_prepare;
2148
2149        /* calculate timings */
2150
2151        /* 1 * DDR_CLK = 2 * UI */
2152
2153        /* min 40ns + 4*UI      max 85ns + 6*UI */
2154        ths_prepare = ns2ddr(dsidev, 70) + 2;
2155
2156        /* min 145ns + 10*UI */
2157        ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2158
2159        /* min max(8*UI, 60ns+4*UI) */
2160        ths_trail = ns2ddr(dsidev, 60) + 5;
2161
2162        /* min 100ns */
2163        ths_exit = ns2ddr(dsidev, 145);
2164
2165        /* tlpx min 50n */
2166        tlpx_half = ns2ddr(dsidev, 25);
2167
2168        /* min 60ns */
2169        tclk_trail = ns2ddr(dsidev, 60) + 2;
2170
2171        /* min 38ns, max 95ns */
2172        tclk_prepare = ns2ddr(dsidev, 65);
2173
2174        /* min tclk-prepare + tclk-zero = 300ns */
2175        tclk_zero = ns2ddr(dsidev, 260);
2176
2177        DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2178                ths_prepare, ddr2ns(dsidev, ths_prepare),
2179                ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2180        DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2181                        ths_trail, ddr2ns(dsidev, ths_trail),
2182                        ths_exit, ddr2ns(dsidev, ths_exit));
2183
2184        DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2185                        "tclk_zero %u (%uns)\n",
2186                        tlpx_half, ddr2ns(dsidev, tlpx_half),
2187                        tclk_trail, ddr2ns(dsidev, tclk_trail),
2188                        tclk_zero, ddr2ns(dsidev, tclk_zero));
2189        DSSDBG("tclk_prepare %u (%uns)\n",
2190                        tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2191
2192        /* program timings */
2193
2194        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2195        r = FLD_MOD(r, ths_prepare, 31, 24);
2196        r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2197        r = FLD_MOD(r, ths_trail, 15, 8);
2198        r = FLD_MOD(r, ths_exit, 7, 0);
2199        dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2200
2201        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2202        r = FLD_MOD(r, tlpx_half, 22, 16);
2203        r = FLD_MOD(r, tclk_trail, 15, 8);
2204        r = FLD_MOD(r, tclk_zero, 7, 0);
2205        dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2206
2207        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2208        r = FLD_MOD(r, tclk_prepare, 7, 0);
2209        dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2210}
2211
2212static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
2213                enum dsi_lane lanes)
2214{
2215        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2216        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2217        int clk_lane   = dssdev->phy.dsi.clk_lane;
2218        int data1_lane = dssdev->phy.dsi.data1_lane;
2219        int data2_lane = dssdev->phy.dsi.data2_lane;
2220        int data3_lane = dssdev->phy.dsi.data3_lane;
2221        int data4_lane = dssdev->phy.dsi.data4_lane;
2222        int clk_pol    = dssdev->phy.dsi.clk_pol;
2223        int data1_pol  = dssdev->phy.dsi.data1_pol;
2224        int data2_pol  = dssdev->phy.dsi.data2_pol;
2225        int data3_pol  = dssdev->phy.dsi.data3_pol;
2226        int data4_pol  = dssdev->phy.dsi.data4_pol;
2227
2228        u32 l = 0;
2229        u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
2230
2231        if (lanes & DSI_CLK_P)
2232                l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
2233        if (lanes & DSI_CLK_N)
2234                l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
2235
2236        if (lanes & DSI_DATA1_P)
2237                l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
2238        if (lanes & DSI_DATA1_N)
2239                l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
2240
2241        if (lanes & DSI_DATA2_P)
2242                l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
2243        if (lanes & DSI_DATA2_N)
2244                l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
2245
2246        if (lanes & DSI_DATA3_P)
2247                l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
2248        if (lanes & DSI_DATA3_N)
2249                l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
2250
2251        if (lanes & DSI_DATA4_P)
2252                l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
2253        if (lanes & DSI_DATA4_N)
2254                l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
2255        /*
2256         * Bits in REGLPTXSCPDAT4TO0DXDY:
2257         * 17: DY0 18: DX0
2258         * 19: DY1 20: DX1
2259         * 21: DY2 22: DX2
2260         * 23: DY3 24: DX3
2261         * 25: DY4 26: DX4
2262         */
2263
2264        /* Set the lane override configuration */
2265
2266        /* REGLPTXSCPDAT4TO0DXDY */
2267        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2268
2269        /* Enable lane override */
2270
2271        /* ENLPTXSCPDAT */
2272        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2273}
2274
2275static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2276{
2277        /* Disable lane override */
2278        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2279        /* Reset the lane override configuration */
2280        /* REGLPTXSCPDAT4TO0DXDY */
2281        REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2282}
2283
2284static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2285{
2286        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2287        int t;
2288        int bits[3];
2289        bool in_use[3];
2290
2291        if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
2292                bits[0] = 28;
2293                bits[1] = 27;
2294                bits[2] = 26;
2295        } else {
2296                bits[0] = 24;
2297                bits[1] = 25;
2298                bits[2] = 26;
2299        }
2300
2301        in_use[0] = false;
2302        in_use[1] = false;
2303        in_use[2] = false;
2304
2305        if (dssdev->phy.dsi.clk_lane != 0)
2306                in_use[dssdev->phy.dsi.clk_lane - 1] = true;
2307        if (dssdev->phy.dsi.data1_lane != 0)
2308                in_use[dssdev->phy.dsi.data1_lane - 1] = true;
2309        if (dssdev->phy.dsi.data2_lane != 0)
2310                in_use[dssdev->phy.dsi.data2_lane - 1] = true;
2311
2312        t = 100000;
2313        while (true) {
2314                u32 l;
2315                int i;
2316                int ok;
2317
2318                l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2319
2320                ok = 0;
2321                for (i = 0; i < 3; ++i) {
2322                        if (!in_use[i] || (l & (1 << bits[i])))
2323                                ok++;
2324                }
2325
2326                if (ok == 3)
2327                        break;
2328
2329                if (--t == 0) {
2330                        for (i = 0; i < 3; ++i) {
2331                                if (!in_use[i] || (l & (1 << bits[i])))
2332                                        continue;
2333
2334                                DSSERR("CIO TXCLKESC%d domain not coming " \
2335                                                "out of reset\n", i);
2336                        }
2337                        return -EIO;
2338                }
2339        }
2340
2341        return 0;
2342}
2343
2344static int dsi_cio_init(struct omap_dss_device *dssdev)
2345{
2346        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2347        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2348        int r;
2349        int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2350        u32 l;
2351
2352        DSSDBGF();
2353
2354        if (dsi->dsi_mux_pads)
2355                dsi->dsi_mux_pads(true);
2356
2357        dsi_enable_scp_clk(dsidev);
2358
2359        /* A dummy read using the SCP interface to any DSIPHY register is
2360         * required after DSIPHY reset to complete the reset of the DSI complex
2361         * I/O. */
2362        dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2363
2364        if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2365                DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2366                r = -EIO;
2367                goto err_scp_clk_dom;
2368        }
2369
2370        dsi_set_lane_config(dssdev);
2371
2372        /* set TX STOP MODE timer to maximum for this operation */
2373        l = dsi_read_reg(dsidev, DSI_TIMING1);
2374        l = FLD_MOD(l, 1, 15, 15);      /* FORCE_TX_STOP_MODE_IO */
2375        l = FLD_MOD(l, 1, 14, 14);      /* STOP_STATE_X16_IO */
2376        l = FLD_MOD(l, 1, 13, 13);      /* STOP_STATE_X4_IO */
2377        l = FLD_MOD(l, 0x1fff, 12, 0);  /* STOP_STATE_COUNTER_IO */
2378        dsi_write_reg(dsidev, DSI_TIMING1, l);
2379
2380        if (dsi->ulps_enabled) {
2381                u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
2382
2383                DSSDBG("manual ulps exit\n");
2384
2385                /* ULPS is exited by Mark-1 state for 1ms, followed by
2386                 * stop state. DSS HW cannot do this via the normal
2387                 * ULPS exit sequence, as after reset the DSS HW thinks
2388                 * that we are not in ULPS mode, and refuses to send the
2389                 * sequence. So we need to send the ULPS exit sequence
2390                 * manually.
2391                 */
2392
2393                if (num_data_lanes_dssdev > 2)
2394                        lane_mask |= DSI_DATA3_P;
2395
2396                if (num_data_lanes_dssdev > 3)
2397                        lane_mask |= DSI_DATA4_P;
2398
2399                dsi_cio_enable_lane_override(dssdev, lane_mask);
2400        }
2401
2402        r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2403        if (r)
2404                goto err_cio_pwr;
2405
2406        if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2407                DSSERR("CIO PWR clock domain not coming out of reset.\n");
2408                r = -ENODEV;
2409                goto err_cio_pwr_dom;
2410        }
2411
2412        dsi_if_enable(dsidev, true);
2413        dsi_if_enable(dsidev, false);
2414        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2415
2416        r = dsi_cio_wait_tx_clk_esc_reset(dssdev);
2417        if (r)
2418                goto err_tx_clk_esc_rst;
2419
2420        if (dsi->ulps_enabled) {
2421                /* Keep Mark-1 state for 1ms (as per DSI spec) */
2422                ktime_t wait = ns_to_ktime(1000 * 1000);
2423                set_current_state(TASK_UNINTERRUPTIBLE);
2424                schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2425
2426                /* Disable the override. The lanes should be set to Mark-11
2427                 * state by the HW */
2428                dsi_cio_disable_lane_override(dsidev);
2429        }
2430
2431        /* FORCE_TX_STOP_MODE_IO */
2432        REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2433
2434        dsi_cio_timings(dsidev);
2435
2436        dsi->ulps_enabled = false;
2437
2438        DSSDBG("CIO init done\n");
2439
2440        return 0;
2441
2442err_tx_clk_esc_rst:
2443        REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2444err_cio_pwr_dom:
2445        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2446err_cio_pwr:
2447        if (dsi->ulps_enabled)
2448                dsi_cio_disable_lane_override(dsidev);
2449err_scp_clk_dom:
2450        dsi_disable_scp_clk(dsidev);
2451        if (dsi->dsi_mux_pads)
2452                dsi->dsi_mux_pads(false);
2453        return r;
2454}
2455
2456static void dsi_cio_uninit(struct platform_device *dsidev)
2457{
2458        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2459
2460        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2461        dsi_disable_scp_clk(dsidev);
2462        if (dsi->dsi_mux_pads)
2463                dsi->dsi_mux_pads(false);
2464}
2465
2466static int _dsi_wait_reset(struct platform_device *dsidev)
2467{
2468        int t = 0;
2469
2470        while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
2471                if (++t > 5) {
2472                        DSSERR("soft reset failed\n");
2473                        return -ENODEV;
2474                }
2475                udelay(1);
2476        }
2477
2478        return 0;
2479}
2480
2481static int _dsi_reset(struct platform_device *dsidev)
2482{
2483        /* Soft reset */
2484        REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
2485        return _dsi_wait_reset(dsidev);
2486}
2487
2488static void dsi_config_tx_fifo(struct platform_device *dsidev,
2489                enum fifo_size size1, enum fifo_size size2,
2490                enum fifo_size size3, enum fifo_size size4)
2491{
2492        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2493        u32 r = 0;
2494        int add = 0;
2495        int i;
2496
2497        dsi->vc[0].fifo_size = size1;
2498        dsi->vc[1].fifo_size = size2;
2499        dsi->vc[2].fifo_size = size3;
2500        dsi->vc[3].fifo_size = size4;
2501
2502        for (i = 0; i < 4; i++) {
2503                u8 v;
2504                int size = dsi->vc[i].fifo_size;
2505
2506                if (add + size > 4) {
2507                        DSSERR("Illegal FIFO configuration\n");
2508                        BUG();
2509                }
2510
2511                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2512                r |= v << (8 * i);
2513                /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2514                add += size;
2515        }
2516
2517        dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2518}
2519
2520static void dsi_config_rx_fifo(struct platform_device *dsidev,
2521                enum fifo_size size1, enum fifo_size size2,
2522                enum fifo_size size3, enum fifo_size size4)
2523{
2524        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2525        u32 r = 0;
2526        int add = 0;
2527        int i;
2528
2529        dsi->vc[0].fifo_size = size1;
2530        dsi->vc[1].fifo_size = size2;
2531        dsi->vc[2].fifo_size = size3;
2532        dsi->vc[3].fifo_size = size4;
2533
2534        for (i = 0; i < 4; i++) {
2535                u8 v;
2536                int size = dsi->vc[i].fifo_size;
2537
2538                if (add + size > 4) {
2539                        DSSERR("Illegal FIFO configuration\n");
2540                        BUG();
2541                }
2542
2543                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2544                r |= v << (8 * i);
2545                /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2546                add += size;
2547        }
2548
2549        dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2550}
2551
2552static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2553{
2554        u32 r;
2555
2556        r = dsi_read_reg(dsidev, DSI_TIMING1);
2557        r = FLD_MOD(r, 1, 15, 15);      /* FORCE_TX_STOP_MODE_IO */
2558        dsi_write_reg(dsidev, DSI_TIMING1, r);
2559
2560        if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2561                DSSERR("TX_STOP bit not going down\n");
2562                return -EIO;
2563        }
2564
2565        return 0;
2566}
2567
2568static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2569{
2570        return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2571}
2572
2573static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2574{
2575        struct dsi_packet_sent_handler_data *vp_data =
2576                (struct dsi_packet_sent_handler_data *) data;
2577        struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2578        const int channel = dsi->update_channel;
2579        u8 bit = dsi->te_enabled ? 30 : 31;
2580
2581        if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2582                complete(vp_data->completion);
2583}
2584
2585static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2586{
2587        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2588        DECLARE_COMPLETION_ONSTACK(completion);
2589        struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2590        int r = 0;
2591        u8 bit;
2592
2593        bit = dsi->te_enabled ? 30 : 31;
2594
2595        r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2596                &vp_data, DSI_VC_IRQ_PACKET_SENT);
2597        if (r)
2598                goto err0;
2599
2600        /* Wait for completion only if TE_EN/TE_START is still set */
2601        if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2602                if (wait_for_completion_timeout(&completion,
2603                                msecs_to_jiffies(10)) == 0) {
2604                        DSSERR("Failed to complete previous frame transfer\n");
2605                        r = -EIO;
2606                        goto err1;
2607                }
2608        }
2609
2610        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2611                &vp_data, DSI_VC_IRQ_PACKET_SENT);
2612
2613        return 0;
2614err1:
2615        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2616                &vp_data, DSI_VC_IRQ_PACKET_SENT);
2617err0:
2618        return r;
2619}
2620
2621static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2622{
2623        struct dsi_packet_sent_handler_data *l4_data =
2624                (struct dsi_packet_sent_handler_data *) data;
2625        struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2626        const int channel = dsi->update_channel;
2627
2628        if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2629                complete(l4_data->completion);
2630}
2631
2632static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2633{
2634        DECLARE_COMPLETION_ONSTACK(completion);
2635        struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2636        int r = 0;
2637
2638        r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2639                &l4_data, DSI_VC_IRQ_PACKET_SENT);
2640        if (r)
2641                goto err0;
2642
2643        /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2644        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2645                if (wait_for_completion_timeout(&completion,
2646                                msecs_to_jiffies(10)) == 0) {
2647                        DSSERR("Failed to complete previous l4 transfer\n");
2648                        r = -EIO;
2649                        goto err1;
2650                }
2651        }
2652
2653        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2654                &l4_data, DSI_VC_IRQ_PACKET_SENT);
2655
2656        return 0;
2657err1:
2658        dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2659                &l4_data, DSI_VC_IRQ_PACKET_SENT);
2660err0:
2661        return r;
2662}
2663
2664static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2665{
2666        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2667
2668        WARN_ON(!dsi_bus_is_locked(dsidev));
2669
2670        WARN_ON(in_interrupt());
2671
2672        if (!dsi_vc_is_enabled(dsidev, channel))
2673                return 0;
2674
2675        switch (dsi->vc[channel].mode) {
2676        case DSI_VC_MODE_VP:
2677                return dsi_sync_vc_vp(dsidev, channel);
2678        case DSI_VC_MODE_L4:
2679                return dsi_sync_vc_l4(dsidev, channel);
2680        default:
2681                BUG();
2682        }
2683}
2684
2685static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2686                bool enable)
2687{
2688        DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2689                        channel, enable);
2690
2691        enable = enable ? 1 : 0;
2692
2693        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2694
2695        if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2696                0, enable) != enable) {
2697                        DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2698                        return -EIO;
2699        }
2700
2701        return 0;
2702}
2703
2704static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2705{
2706        u32 r;
2707
2708        DSSDBGF("%d", channel);
2709
2710        r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2711
2712        if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2713                DSSERR("VC(%d) busy when trying to configure it!\n",
2714                                channel);
2715
2716        r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2717        r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN  */
2718        r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2719        r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2720        r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2721        r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2722        r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2723        if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2724                r = FLD_MOD(r, 3, 11, 10);      /* OCP_WIDTH = 32 bit */
2725
2726        r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2727        r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2728
2729        dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2730}
2731
2732static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2733{
2734        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2735
2736        if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
2737                return 0;
2738
2739        DSSDBGF("%d", channel);
2740
2741        dsi_sync_vc(dsidev, channel);
2742
2743        dsi_vc_enable(dsidev, channel, 0);
2744
2745        /* VC_BUSY */
2746        if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2747                DSSERR("vc(%d) busy when trying to config for L4\n", channel);
2748                return -EIO;
2749        }
2750
2751        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
2752
2753        /* DCS_CMD_ENABLE */
2754        if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2755                REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30);
2756
2757        dsi_vc_enable(dsidev, channel, 1);
2758
2759        dsi->vc[channel].mode = DSI_VC_MODE_L4;
2760
2761        return 0;
2762}
2763
2764static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2765{
2766        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2767
2768        if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
2769                return 0;
2770
2771        DSSDBGF("%d", channel);
2772
2773        dsi_sync_vc(dsidev, channel);
2774
2775        dsi_vc_enable(dsidev, channel, 0);
2776
2777        /* VC_BUSY */
2778        if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2779                DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2780                return -EIO;
2781        }
2782
2783        /* SOURCE, 1 = video port */
2784        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1);
2785
2786        /* DCS_CMD_ENABLE */
2787        if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2788                REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30);
2789
2790        dsi_vc_enable(dsidev, channel, 1);
2791
2792        dsi->vc[channel].mode = DSI_VC_MODE_VP;
2793
2794        return 0;
2795}
2796
2797
2798void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2799                bool enable)
2800{
2801        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2802
2803        DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2804
2805        WARN_ON(!dsi_bus_is_locked(dsidev));
2806
2807        dsi_vc_enable(dsidev, channel, 0);
2808        dsi_if_enable(dsidev, 0);
2809
2810        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2811
2812        dsi_vc_enable(dsidev, channel, 1);
2813        dsi_if_enable(dsidev, 1);
2814
2815        dsi_force_tx_stop_mode_io(dsidev);
2816}
2817EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2818
2819static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2820{
2821        while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2822                u32 val;
2823                val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2824                DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2825                                (val >> 0) & 0xff,
2826                                (val >> 8) & 0xff,
2827                                (val >> 16) & 0xff,
2828                                (val >> 24) & 0xff);
2829        }
2830}
2831
2832static void dsi_show_rx_ack_with_err(u16 err)
2833{
2834        DSSERR("\tACK with ERROR (%#x):\n", err);
2835        if (err & (1 << 0))
2836                DSSERR("\t\tSoT Error\n");
2837        if (err & (1 << 1))
2838                DSSERR("\t\tSoT Sync Error\n");
2839        if (err & (1 << 2))
2840                DSSERR("\t\tEoT Sync Error\n");
2841        if (err & (1 << 3))
2842                DSSERR("\t\tEscape Mode Entry Command Error\n");
2843        if (err & (1 << 4))
2844                DSSERR("\t\tLP Transmit Sync Error\n");
2845        if (err & (1 << 5))
2846                DSSERR("\t\tHS Receive Timeout Error\n");
2847        if (err & (1 << 6))
2848                DSSERR("\t\tFalse Control Error\n");
2849        if (err & (1 << 7))
2850                DSSERR("\t\t(reserved7)\n");
2851        if (err & (1 << 8))
2852                DSSERR("\t\tECC Error, single-bit (corrected)\n");
2853        if (err & (1 << 9))
2854                DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2855        if (err & (1 << 10))
2856                DSSERR("\t\tChecksum Error\n");
2857        if (err & (1 << 11))
2858                DSSERR("\t\tData type not recognized\n");
2859        if (err & (1 << 12))
2860                DSSERR("\t\tInvalid VC ID\n");
2861        if (err & (1 << 13))
2862                DSSERR("\t\tInvalid Transmission Length\n");
2863        if (err & (1 << 14))
2864                DSSERR("\t\t(reserved14)\n");
2865        if (err & (1 << 15))
2866                DSSERR("\t\tDSI Protocol Violation\n");
2867}
2868
2869static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2870                int channel)
2871{
2872        /* RX_FIFO_NOT_EMPTY */
2873        while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2874                u32 val;
2875                u8 dt;
2876                val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2877                DSSERR("\trawval %#08x\n", val);
2878                dt = FLD_GET(val, 5, 0);
2879                if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2880                        u16 err = FLD_GET(val, 23, 8);
2881                        dsi_show_rx_ack_with_err(err);
2882                } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2883                        DSSERR("\tDCS short response, 1 byte: %#x\n",
2884                                        FLD_GET(val, 23, 8));
2885                } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2886                        DSSERR("\tDCS short response, 2 byte: %#x\n",
2887                                        FLD_GET(val, 23, 8));
2888                } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2889                        DSSERR("\tDCS long response, len %d\n",
2890                                        FLD_GET(val, 23, 8));
2891                        dsi_vc_flush_long_data(dsidev, channel);
2892                } else {
2893                        DSSERR("\tunknown datatype 0x%02x\n", dt);
2894                }
2895        }
2896        return 0;
2897}
2898
2899static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2900{
2901        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2902
2903        if (dsi->debug_write || dsi->debug_read)
2904                DSSDBG("dsi_vc_send_bta %d\n", channel);
2905
2906        WARN_ON(!dsi_bus_is_locked(dsidev));
2907
2908        /* RX_FIFO_NOT_EMPTY */
2909        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2910                DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2911                dsi_vc_flush_receive_data(dsidev, channel);
2912        }
2913
2914        REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2915
2916        return 0;
2917}
2918
2919int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2920{
2921        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2922        DECLARE_COMPLETION_ONSTACK(completion);
2923        int r = 0;
2924        u32 err;
2925
2926        r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2927                        &completion, DSI_VC_IRQ_BTA);
2928        if (r)
2929                goto err0;
2930
2931        r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2932                        DSI_IRQ_ERROR_MASK);
2933        if (r)
2934                goto err1;
2935
2936        r = dsi_vc_send_bta(dsidev, channel);
2937        if (r)
2938                goto err2;
2939
2940        if (wait_for_completion_timeout(&completion,
2941                                msecs_to_jiffies(500)) == 0) {
2942                DSSERR("Failed to receive BTA\n");
2943                r = -EIO;
2944                goto err2;
2945        }
2946
2947        err = dsi_get_errors(dsidev);
2948        if (err) {
2949                DSSERR("Error while sending BTA: %x\n", err);
2950                r = -EIO;
2951                goto err2;
2952        }
2953err2:
2954        dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2955                        DSI_IRQ_ERROR_MASK);
2956err1:
2957        dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2958                        &completion, DSI_VC_IRQ_BTA);
2959err0:
2960        return r;
2961}
2962EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2963
2964static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2965                int channel, u8 data_type, u16 len, u8 ecc)
2966{
2967        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2968        u32 val;
2969        u8 data_id;
2970
2971        WARN_ON(!dsi_bus_is_locked(dsidev));
2972
2973        data_id = data_type | dsi->vc[channel].vc_id << 6;
2974
2975        val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2976                FLD_VAL(ecc, 31, 24);
2977
2978        dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2979}
2980
2981static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2982                int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2983{
2984        u32 val;
2985
2986        val = b4 << 24 | b3 << 16 | b2 << 8  | b1 << 0;
2987
2988/*      DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2989                        b1, b2, b3, b4, val); */
2990
2991        dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2992}
2993
2994static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2995                u8 data_type, u8 *data, u16 len, u8 ecc)
2996{
2997        /*u32 val; */
2998        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2999        int i;
3000        u8 *p;
3001        int r = 0;
3002        u8 b1, b2, b3, b4;
3003
3004        if (dsi->debug_write)
3005                DSSDBG("dsi_vc_send_long, %d bytes\n", len);
3006
3007        /* len + header */
3008        if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
3009                DSSERR("unable to send long packet: packet too long.\n");
3010                return -EINVAL;
3011        }
3012
3013        dsi_vc_config_l4(dsidev, channel);
3014
3015        dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
3016
3017        p = data;
3018        for (i = 0; i < len >> 2; i++) {
3019                if (dsi->debug_write)
3020                        DSSDBG("\tsending full packet %d\n", i);
3021
3022                b1 = *p++;
3023                b2 = *p++;
3024                b3 = *p++;
3025                b4 = *p++;
3026
3027                dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
3028        }
3029
3030        i = len % 4;
3031        if (i) {
3032                b1 = 0; b2 = 0; b3 = 0;
3033
3034                if (dsi->debug_write)
3035                        DSSDBG("\tsending remainder bytes %d\n", i);
3036
3037                switch (i) {
3038                case 3:
3039                        b1 = *p++;
3040                        b2 = *p++;
3041                        b3 = *p++;
3042                        break;
3043                case 2:
3044                        b1 = *p++;
3045                        b2 = *p++;
3046                        break;
3047                case 1:
3048                        b1 = *p++;
3049                        break;
3050                }
3051
3052                dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
3053        }
3054
3055        return r;
3056}
3057
3058static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3059                u8 data_type, u16 data, u8 ecc)
3060{
3061        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3062        u32 r;
3063        u8 data_id;
3064
3065        WARN_ON(!dsi_bus_is_locked(dsidev));
3066
3067        if (dsi->debug_write)
3068                DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
3069                                channel,
3070                                data_type, data & 0xff, (data >> 8) & 0xff);
3071
3072        dsi_vc_config_l4(dsidev, channel);
3073
3074        if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
3075                DSSERR("ERROR FIFO FULL, aborting transfer\n");
3076                return -EINVAL;
3077        }
3078
3079        data_id = data_type | dsi->vc[channel].vc_id << 6;
3080
3081        r = (data_id << 0) | (data << 8) | (ecc << 24);
3082
3083        dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
3084
3085        return 0;
3086}
3087
3088int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
3089{
3090        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3091        u8 nullpkg[] = {0, 0, 0, 0};
3092
3093        return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg,
3094                4, 0);
3095}
3096EXPORT_SYMBOL(dsi_vc_send_null);
3097
3098int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3099                u8 *data, int len)
3100{
3101        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3102        int r;
3103
3104        BUG_ON(len == 0);
3105
3106        if (len == 1) {
3107                r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0,
3108                                data[0], 0);
3109        } else if (len == 2) {
3110                r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1,
3111                                data[0] | (data[1] << 8), 0);
3112        } else {
3113                /* 0x39 = DCS Long Write */
3114                r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3115                                data, len, 0);
3116        }
3117
3118        return r;
3119}
3120EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
3121
3122int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3123                int len)
3124{
3125        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3126        int r;
3127
3128        r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len);
3129        if (r)
3130                goto err;
3131
3132        r = dsi_vc_send_bta_sync(dssdev, channel);
3133        if (r)
3134                goto err;
3135
3136        /* RX_FIFO_NOT_EMPTY */
3137        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3138                DSSERR("rx fifo not empty after write, dumping data:\n");
3139                dsi_vc_flush_receive_data(dsidev, channel);
3140                r = -EIO;
3141                goto err;
3142        }
3143
3144        return 0;
3145err:
3146        DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
3147                        channel, data[0], len);
3148        return r;
3149}
3150EXPORT_SYMBOL(dsi_vc_dcs_write);
3151
3152int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
3153{
3154        return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
3155}
3156EXPORT_SYMBOL(dsi_vc_dcs_write_0);
3157
3158int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3159                u8 param)
3160{
3161        u8 buf[2];
3162        buf[0] = dcs_cmd;
3163        buf[1] = param;
3164        return dsi_vc_dcs_write(dssdev, channel, buf, 2);
3165}
3166EXPORT_SYMBOL(dsi_vc_dcs_write_1);
3167
3168int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3169                u8 *buf, int buflen)
3170{
3171        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3172        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3173        u32 val;
3174        u8 dt;
3175        int r;
3176
3177        if (dsi->debug_read)
3178                DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
3179
3180        r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
3181        if (r)
3182                goto err;
3183
3184        r = dsi_vc_send_bta_sync(dssdev, channel);
3185        if (r)
3186                goto err;
3187
3188        /* RX_FIFO_NOT_EMPTY */
3189        if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3190                DSSERR("RX fifo empty when trying to read.\n");
3191                r = -EIO;
3192                goto err;
3193        }
3194
3195        val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3196        if (dsi->debug_read)
3197                DSSDBG("\theader: %08x\n", val);
3198        dt = FLD_GET(val, 5, 0);
3199        if (dt == DSI_DT_RX_ACK_WITH_ERR) {
3200                u16 err = FLD_GET(val, 23, 8);
3201                dsi_show_rx_ack_with_err(err);
3202                r = -EIO;
3203                goto err;
3204
3205        } else if (dt == DSI_DT_RX_SHORT_READ_1) {
3206                u8 data = FLD_GET(val, 15, 8);
3207                if (dsi->debug_read)
3208                        DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
3209
3210                if (buflen < 1) {
3211                        r = -EIO;
3212                        goto err;
3213                }
3214
3215                buf[0] = data;
3216
3217                return 1;
3218        } else if (dt == DSI_DT_RX_SHORT_READ_2) {
3219                u16 data = FLD_GET(val, 23, 8);
3220                if (dsi->debug_read)
3221                        DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
3222
3223                if (buflen < 2) {
3224                        r = -EIO;
3225                        goto err;
3226                }
3227
3228                buf[0] = data & 0xff;
3229                buf[1] = (data >> 8) & 0xff;
3230
3231                return 2;
3232        } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
3233                int w;
3234                int len = FLD_GET(val, 23, 8);
3235                if (dsi->debug_read)
3236                        DSSDBG("\tDCS long response, len %d\n", len);
3237
3238                if (len > buflen) {
3239                        r = -EIO;
3240                        goto err;
3241                }
3242
3243                /* two byte checksum ends the packet, not included in len */
3244                for (w = 0; w < len + 2;) {
3245                        int b;
3246                        val = dsi_read_reg(dsidev,
3247                                DSI_VC_SHORT_PACKET_HEADER(channel));
3248                        if (dsi->debug_read)
3249                                DSSDBG("\t\t%02x %02x %02x %02x\n",
3250                                                (val >> 0) & 0xff,
3251                                                (val >> 8) & 0xff,
3252                                                (val >> 16) & 0xff,
3253                                                (val >> 24) & 0xff);
3254
3255                        for (b = 0; b < 4; ++b) {
3256                                if (w < len)
3257                                        buf[w] = (val >> (b * 8)) & 0xff;
3258                                /* we discard the 2 byte checksum */
3259                                ++w;
3260                        }
3261                }
3262
3263                return len;
3264        } else {
3265                DSSERR("\tunknown datatype 0x%02x\n", dt);
3266                r = -EIO;
3267                goto err;
3268        }
3269
3270        BUG();
3271err:
3272        DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n",
3273                        channel, dcs_cmd);
3274        return r;
3275
3276}
3277EXPORT_SYMBOL(dsi_vc_dcs_read);
3278
3279int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3280                u8 *data)
3281{
3282        int r;
3283
3284        r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1);
3285
3286        if (r < 0)
3287                return r;
3288
3289        if (r != 1)
3290                return -EIO;
3291
3292        return 0;
3293}
3294EXPORT_SYMBOL(dsi_vc_dcs_read_1);
3295
3296int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3297                u8 *data1, u8 *data2)
3298{
3299        u8 buf[2];
3300        int r;
3301
3302        r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2);
3303
3304        if (r < 0)
3305                return r;
3306
3307        if (r != 2)
3308                return -EIO;
3309
3310        *data1 = buf[0];
3311        *data2 = buf[1];
3312
3313        return 0;
3314}
3315EXPORT_SYMBOL(dsi_vc_dcs_read_2);
3316
3317int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3318                u16 len)
3319{
3320        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3321
3322        return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
3323                        len, 0);
3324}
3325EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
3326
3327static int dsi_enter_ulps(struct platform_device *dsidev)
3328{
3329        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3330        DECLARE_COMPLETION_ONSTACK(completion);
3331        int r;
3332
3333        DSSDBGF();
3334
3335        WARN_ON(!dsi_bus_is_locked(dsidev));
3336
3337        WARN_ON(dsi->ulps_enabled);
3338
3339        if (dsi->ulps_enabled)
3340                return 0;
3341
3342        if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3343                DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
3344                return -EIO;
3345        }
3346
3347        dsi_sync_vc(dsidev, 0);
3348        dsi_sync_vc(dsidev, 1);
3349        dsi_sync_vc(dsidev, 2);
3350        dsi_sync_vc(dsidev, 3);
3351
3352        dsi_force_tx_stop_mode_io(dsidev);
3353
3354        dsi_vc_enable(dsidev, 0, false);
3355        dsi_vc_enable(dsidev, 1, false);
3356        dsi_vc_enable(dsidev, 2, false);
3357        dsi_vc_enable(dsidev, 3, false);
3358
3359        if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) {      /* HS_BUSY */
3360                DSSERR("HS busy when enabling ULPS\n");
3361                return -EIO;
3362        }
3363
3364        if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) {      /* LP_BUSY */
3365                DSSERR("LP busy when enabling ULPS\n");
3366                return -EIO;
3367        }
3368
3369        r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3370                        DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3371        if (r)
3372                return r;
3373
3374        /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3375        /* LANEx_ULPS_SIG2 */
3376        REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
3377                7, 5);
3378
3379        if (wait_for_completion_timeout(&completion,
3380                                msecs_to_jiffies(1000)) == 0) {
3381                DSSERR("ULPS enable timeout\n");
3382                r = -EIO;
3383                goto err;
3384        }
3385
3386        dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3387                        DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3388
3389        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3390
3391        dsi_if_enable(dsidev, false);
3392
3393        dsi->ulps_enabled = true;
3394
3395        return 0;
3396
3397err:
3398        dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3399                        DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3400        return r;
3401}
3402
3403static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3404                unsigned ticks, bool x4, bool x16)
3405{
3406        unsigned long fck;
3407        unsigned long total_ticks;
3408        u32 r;
3409
3410        BUG_ON(ticks > 0x1fff);
3411
3412        /* ticks in DSI_FCK */
3413        fck = dsi_fclk_rate(dsidev);
3414
3415        r = dsi_read_reg(dsidev, DSI_TIMING2);
3416        r = FLD_MOD(r, 1, 15, 15);      /* LP_RX_TO */
3417        r = FLD_MOD(r, x16 ? 1 : 0, 14, 14);    /* LP_RX_TO_X16 */
3418        r = FLD_MOD(r, x4 ? 1 : 0, 13, 13);     /* LP_RX_TO_X4 */
3419        r = FLD_MOD(r, ticks, 12, 0);   /* LP_RX_COUNTER */
3420        dsi_write_reg(dsidev, DSI_TIMING2, r);
3421
3422        total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3423
3424        DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3425                        total_ticks,
3426                        ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3427                        (total_ticks * 1000) / (fck / 1000 / 1000));
3428}
3429
3430static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3431                bool x8, bool x16)
3432{
3433        unsigned long fck;
3434        unsigned long total_ticks;
3435        u32 r;
3436
3437        BUG_ON(ticks > 0x1fff);
3438
3439        /* ticks in DSI_FCK */
3440        fck = dsi_fclk_rate(dsidev);
3441
3442        r = dsi_read_reg(dsidev, DSI_TIMING1);
3443        r = FLD_MOD(r, 1, 31, 31);      /* TA_TO */
3444        r = FLD_MOD(r, x16 ? 1 : 0, 30, 30);    /* TA_TO_X16 */
3445        r = FLD_MOD(r, x8 ? 1 : 0, 29, 29);     /* TA_TO_X8 */
3446        r = FLD_MOD(r, ticks, 28, 16);  /* TA_TO_COUNTER */
3447        dsi_write_reg(dsidev, DSI_TIMING1, r);
3448
3449        total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3450
3451        DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3452                        total_ticks,
3453                        ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3454                        (total_ticks * 1000) / (fck / 1000 / 1000));
3455}
3456
3457static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3458                unsigned ticks, bool x4, bool x16)
3459{
3460        unsigned long fck;
3461        unsigned long total_ticks;
3462        u32 r;
3463
3464        BUG_ON(ticks > 0x1fff);
3465
3466        /* ticks in DSI_FCK */
3467        fck = dsi_fclk_rate(dsidev);
3468
3469        r = dsi_read_reg(dsidev, DSI_TIMING1);
3470        r = FLD_MOD(r, 1, 15, 15);      /* FORCE_TX_STOP_MODE_IO */
3471        r = FLD_MOD(r, x16 ? 1 : 0, 14, 14);    /* STOP_STATE_X16_IO */
3472        r = FLD_MOD(r, x4 ? 1 : 0, 13, 13);     /* STOP_STATE_X4_IO */
3473        r = FLD_MOD(r, ticks, 12, 0);   /* STOP_STATE_COUNTER_IO */
3474        dsi_write_reg(dsidev, DSI_TIMING1, r);
3475
3476        total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3477
3478        DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3479                        total_ticks,
3480                        ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3481                        (total_ticks * 1000) / (fck / 1000 / 1000));
3482}
3483
3484static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3485                unsigned ticks, bool x4, bool x16)
3486{
3487        unsigned long fck;
3488        unsigned long total_ticks;
3489        u32 r;
3490
3491        BUG_ON(ticks > 0x1fff);
3492
3493        /* ticks in TxByteClkHS */
3494        fck = dsi_get_txbyteclkhs(dsidev);
3495
3496        r = dsi_read_reg(dsidev, DSI_TIMING2);
3497        r = FLD_MOD(r, 1, 31, 31);      /* HS_TX_TO */
3498        r = FLD_MOD(r, x16 ? 1 : 0, 30, 30);    /* HS_TX_TO_X16 */
3499        r = FLD_MOD(r, x4 ? 1 : 0, 29, 29);     /* HS_TX_TO_X8 (4 really) */
3500        r = FLD_MOD(r, ticks, 28, 16);  /* HS_TX_TO_COUNTER */
3501        dsi_write_reg(dsidev, DSI_TIMING2, r);
3502
3503        total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3504
3505        DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3506                        total_ticks,
3507                        ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3508                        (total_ticks * 1000) / (fck / 1000 / 1000));
3509}
3510static int dsi_proto_config(struct omap_dss_device *dssdev)
3511{
3512        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3513        u32 r;
3514        int buswidth = 0;
3515
3516        dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3517                        DSI_FIFO_SIZE_32,
3518                        DSI_FIFO_SIZE_32,
3519                        DSI_FIFO_SIZE_32);
3520
3521        dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3522                        DSI_FIFO_SIZE_32,
3523                        DSI_FIFO_SIZE_32,
3524                        DSI_FIFO_SIZE_32);
3525
3526        /* XXX what values for the timeouts? */
3527        dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
3528        dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
3529        dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3530        dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3531
3532        switch (dssdev->ctrl.pixel_size) {
3533        case 16:
3534                buswidth = 0;
3535                break;
3536        case 18:
3537                buswidth = 1;
3538                break;
3539        case 24:
3540                buswidth = 2;
3541                break;
3542        default:
3543                BUG();
3544        }
3545
3546        r = dsi_read_reg(dsidev, DSI_CTRL);
3547        r = FLD_MOD(r, 1, 1, 1);        /* CS_RX_EN */
3548        r = FLD_MOD(r, 1, 2, 2);        /* ECC_RX_EN */
3549        r = FLD_MOD(r, 1, 3, 3);        /* TX_FIFO_ARBITRATION */
3550        r = FLD_MOD(r, 1, 4, 4);        /* VP_CLK_RATIO, always 1, see errata*/
3551        r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
3552        r = FLD_MOD(r, 0, 8, 8);        /* VP_CLK_POL */
3553        r = FLD_MOD(r, 2, 13, 12);      /* LINE_BUFFER, 2 lines */
3554        r = FLD_MOD(r, 1, 14, 14);      /* TRIGGER_RESET_MODE */
3555        r = FLD_MOD(r, 1, 19, 19);      /* EOT_ENABLE */
3556        if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
3557                r = FLD_MOD(r, 1, 24, 24);      /* DCS_CMD_ENABLE */
3558                /* DCS_CMD_CODE, 1=start, 0=continue */
3559                r = FLD_MOD(r, 0, 25, 25);
3560        }
3561
3562        dsi_write_reg(dsidev, DSI_CTRL, r);
3563
3564        dsi_vc_initial_config(dsidev, 0);
3565        dsi_vc_initial_config(dsidev, 1);
3566        dsi_vc_initial_config(dsidev, 2);
3567        dsi_vc_initial_config(dsidev, 3);
3568
3569        return 0;
3570}
3571
3572static void dsi_proto_timings(struct omap_dss_device *dssdev)
3573{
3574        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3575        unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3576        unsigned tclk_pre, tclk_post;
3577        unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
3578        unsigned ths_trail, ths_exit;
3579        unsigned ddr_clk_pre, ddr_clk_post;
3580        unsigned enter_hs_mode_lat, exit_hs_mode_lat;
3581        unsigned ths_eot;
3582        u32 r;
3583
3584        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3585        ths_prepare = FLD_GET(r, 31, 24);
3586        ths_prepare_ths_zero = FLD_GET(r, 23, 16);
3587        ths_zero = ths_prepare_ths_zero - ths_prepare;
3588        ths_trail = FLD_GET(r, 15, 8);
3589        ths_exit = FLD_GET(r, 7, 0);
3590
3591        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3592        tlpx = FLD_GET(r, 22, 16) * 2;
3593        tclk_trail = FLD_GET(r, 15, 8);
3594        tclk_zero = FLD_GET(r, 7, 0);
3595
3596        r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
3597        tclk_prepare = FLD_GET(r, 7, 0);
3598
3599        /* min 8*UI */
3600        tclk_pre = 20;
3601        /* min 60ns + 52*UI */
3602        tclk_post = ns2ddr(dsidev, 60) + 26;
3603
3604        ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
3605
3606        ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
3607                        4);
3608        ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
3609
3610        BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
3611        BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
3612
3613        r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3614        r = FLD_MOD(r, ddr_clk_pre, 15, 8);
3615        r = FLD_MOD(r, ddr_clk_post, 7, 0);
3616        dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
3617
3618        DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
3619                        ddr_clk_pre,
3620                        ddr_clk_post);
3621
3622        enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
3623                DIV_ROUND_UP(ths_prepare, 4) +
3624                DIV_ROUND_UP(ths_zero + 3, 4);
3625
3626        exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
3627
3628        r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
3629                FLD_VAL(exit_hs_mode_lat, 15, 0);
3630        dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
3631
3632        DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3633                        enter_hs_mode_lat, exit_hs_mode_lat);
3634}
3635
3636
3637#define DSI_DECL_VARS \
3638        int __dsi_cb = 0; u32 __dsi_cv = 0;
3639
3640#define DSI_FLUSH(dsidev, ch) \
3641        if (__dsi_cb > 0) { \
3642                /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
3643                dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
3644                __dsi_cb = __dsi_cv = 0; \
3645        }
3646
3647#define DSI_PUSH(dsidev, ch, data) \
3648        do { \
3649                __dsi_cv |= (data) << (__dsi_cb * 8); \
3650                /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
3651                if (++__dsi_cb > 3) \
3652                        DSI_FLUSH(dsidev, ch); \
3653        } while (0)
3654
3655static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3656                        int x, int y, int w, int h)
3657{
3658        /* Note: supports only 24bit colors in 32bit container */
3659        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3660        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3661        int first = 1;
3662        int fifo_stalls = 0;
3663        int max_dsi_packet_size;
3664        int max_data_per_packet;
3665        int max_pixels_per_packet;
3666        int pixels_left;
3667        int bytespp = dssdev->ctrl.pixel_size / 8;
3668        int scr_width;
3669        u32 __iomem *data;
3670        int start_offset;
3671        int horiz_inc;
3672        int current_x;
3673        struct omap_overlay *ovl;
3674
3675        debug_irq = 0;
3676
3677        DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
3678                        x, y, w, h);
3679
3680        ovl = dssdev->manager->overlays[0];
3681
3682        if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
3683                return -EINVAL;
3684
3685        if (dssdev->ctrl.pixel_size != 24)
3686                return -EINVAL;
3687
3688        scr_width = ovl->info.screen_width;
3689        data = ovl->info.vaddr;
3690
3691        start_offset = scr_width * y + x;
3692        horiz_inc = scr_width - w;
3693        current_x = x;
3694
3695        /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
3696         * in fifo */
3697
3698        /* When using CPU, max long packet size is TX buffer size */
3699        max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
3700
3701        /* we seem to get better perf if we divide the tx fifo to half,
3702           and while the other half is being sent, we fill the other half
3703           max_dsi_packet_size /= 2; */
3704
3705        max_data_per_packet = max_dsi_packet_size - 4 - 1;
3706
3707        max_pixels_per_packet = max_data_per_packet / bytespp;
3708
3709        DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
3710
3711        pixels_left = w * h;
3712
3713        DSSDBG("total pixels %d\n", pixels_left);
3714
3715        data += start_offset;
3716
3717        while (pixels_left > 0) {
3718                /* 0x2c = write_memory_start */
3719                /* 0x3c = write_memory_continue */
3720                u8 dcs_cmd = first ? 0x2c : 0x3c;
3721                int pixels;
3722                DSI_DECL_VARS;
3723                first = 0;
3724
3725#if 1
3726                /* using fifo not empty */
3727                /* TX_FIFO_NOT_EMPTY */
3728                while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) {
3729                        fifo_stalls++;
3730                        if (fifo_stalls > 0xfffff) {
3731                                DSSERR("fifo stalls overflow, pixels left %d\n",
3732                                                pixels_left);
3733                                dsi_if_enable(dsidev, 0);
3734                                return -EIO;
3735                        }
3736                        udelay(1);
3737                }
3738#elif 1
3739                /* using fifo emptiness */
3740                while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
3741                                max_dsi_packet_size) {
3742                        fifo_stalls++;
3743                        if (fifo_stalls > 0xfffff) {
3744                                DSSERR("fifo stalls overflow, pixels left %d\n",
3745                                               pixels_left);
3746                                dsi_if_enable(dsidev, 0);
3747                                return -EIO;
3748                        }
3749                }
3750#else
3751                while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS,
3752                                7, 0) + 1) * 4 == 0) {
3753                        fifo_stalls++;
3754                        if (fifo_stalls > 0xfffff) {
3755                                DSSERR("fifo stalls overflow, pixels left %d\n",
3756                                               pixels_left);
3757                                dsi_if_enable(dsidev, 0);
3758                                return -EIO;
3759                        }
3760                }
3761#endif
3762                pixels = min(max_pixels_per_packet, pixels_left);
3763
3764                pixels_left -= pixels;
3765
3766                dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE,
3767                                1 + pixels * bytespp, 0);
3768
3769                DSI_PUSH(dsidev, 0, dcs_cmd);
3770
3771                while (pixels-- > 0) {
3772                        u32 pix = __raw_readl(data++);
3773
3774                        DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff);
3775                        DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff);
3776                        DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff);
3777
3778                        current_x++;
3779                        if (current_x == x+w) {
3780                                current_x = x;
3781                                data += horiz_inc;
3782                        }
3783                }
3784
3785                DSI_FLUSH(dsidev, 0);
3786        }
3787
3788        return 0;
3789}
3790
3791static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3792                u16 x, u16 y, u16 w, u16 h)
3793{
3794        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3795        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3796        unsigned bytespp;
3797        unsigned bytespl;
3798        unsigned bytespf;
3799        unsigned total_len;
3800        unsigned packet_payload;
3801        unsigned packet_len;
3802        u32 l;
3803        int r;
3804        const unsigned channel = dsi->update_channel;
3805        const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3806
3807        DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
3808                        x, y, w, h);
3809
3810        dsi_vc_config_vp(dsidev, channel);
3811
3812        bytespp = dssdev->ctrl.pixel_size / 8;
3813        bytespl = w * bytespp;
3814        bytespf = bytespl * h;
3815
3816        /* NOTE: packet_payload has to be equal to N * bytespl, where N is
3817         * number of lines in a packet.  See errata about VP_CLK_RATIO */
3818
3819        if (bytespf < line_buf_size)
3820                packet_payload = bytespf;
3821        else
3822                packet_payload = (line_buf_size) / bytespl * bytespl;
3823
3824        packet_len = packet_payload + 1;        /* 1 byte for DCS cmd */
3825        total_len = (bytespf / packet_payload) * packet_len;
3826
3827        if (bytespf % packet_payload)
3828                total_len += (bytespf % packet_payload) + 1;
3829
3830        l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3831        dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3832
3833        dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3834                packet_len, 0);
3835
3836        if (dsi->te_enabled)
3837                l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3838        else
3839                l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3840        dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3841
3842        /* We put SIDLEMODE to no-idle for the duration of the transfer,
3843         * because DSS interrupts are not capable of waking up the CPU and the
3844         * framedone interrupt could be delayed for quite a long time. I think
3845         * the same goes for any DSS interrupts, but for some reason I have not
3846         * seen the problem anywhere else than here.
3847         */
3848        dispc_disable_sidle();
3849
3850        dsi_perf_mark_start(dsidev);
3851
3852        r = schedule_delayed_work(&dsi->framedone_timeout_work,
3853                msecs_to_jiffies(250));
3854        BUG_ON(r == 0);
3855
3856        dss_start_update(dssdev);
3857
3858        if (dsi->te_enabled) {
3859                /* disable LP_RX_TO, so that we can receive TE.  Time to wait
3860                 * for TE is longer than the timer allows */
3861                REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3862
3863                dsi_vc_send_bta(dsidev, channel);
3864
3865#ifdef DSI_CATCH_MISSING_TE
3866                mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3867#endif
3868        }
3869}
3870
3871#ifdef DSI_CATCH_MISSING_TE
3872static void dsi_te_timeout(unsigned long arg)
3873{
3874        DSSERR("TE not received for 250ms!\n");
3875}
3876#endif
3877
3878static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3879{
3880        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3881
3882        /* SIDLEMODE back to smart-idle */
3883        dispc_enable_sidle();
3884
3885        if (dsi->te_enabled) {
3886                /* enable LP_RX_TO again after the TE */
3887                REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3888        }
3889
3890        dsi->framedone_callback(error, dsi->framedone_data);
3891
3892        if (!error)
3893                dsi_perf_show(dsidev, "DISPC");
3894}
3895
3896static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3897{
3898        struct dsi_data *dsi = container_of(work, struct dsi_data,
3899                        framedone_timeout_work.work);
3900        /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3901         * 250ms which would conflict with this timeout work. What should be
3902         * done is first cancel the transfer on the HW, and then cancel the
3903         * possibly scheduled framedone work. However, cancelling the transfer
3904         * on the HW is buggy, and would probably require resetting the whole
3905         * DSI */
3906
3907        DSSERR("Framedone not received for 250ms!\n");
3908
3909        dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3910}
3911
3912static void dsi_framedone_irq_callback(void *data, u32 mask)
3913{
3914        struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3915        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3916        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3917
3918        /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3919         * turns itself off. However, DSI still has the pixels in its buffers,
3920         * and is sending the data.
3921         */
3922
3923        __cancel_delayed_work(&dsi->framedone_timeout_work);
3924
3925        dsi_handle_framedone(dsidev, 0);
3926
3927#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3928        dispc_fake_vsync_irq();
3929#endif
3930}
3931
3932int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3933                                    u16 *x, u16 *y, u16 *w, u16 *h,
3934                                    bool enlarge_update_area)
3935{
3936        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3937        u16 dw, dh;
3938
3939        dssdev->driver->get_resolution(dssdev, &dw, &dh);
3940
3941        if  (*x > dw || *y > dh)
3942                return -EINVAL;
3943
3944        if (*x + *w > dw)
3945                return -EINVAL;
3946
3947        if (*y + *h > dh)
3948                return -EINVAL;
3949
3950        if (*w == 1)
3951                return -EINVAL;
3952
3953        if (*w == 0 || *h == 0)
3954                return -EINVAL;
3955
3956        dsi_perf_mark_setup(dsidev);
3957
3958        if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3959                dss_setup_partial_planes(dssdev, x, y, w, h,
3960                                enlarge_update_area);
3961                dispc_set_lcd_size(dssdev->manager->id, *w, *h);
3962        }
3963
3964        return 0;
3965}
3966EXPORT_SYMBOL(omap_dsi_prepare_update);
3967
3968int omap_dsi_update(struct omap_dss_device *dssdev,
3969                int channel,
3970                u16 x, u16 y, u16 w, u16 h,
3971                void (*callback)(int, void *), void *data)
3972{
3973        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3974        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3975
3976        dsi->update_channel = channel;
3977
3978        /* OMAP DSS cannot send updates of odd widths.
3979         * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
3980         * here to make sure we catch erroneous updates. Otherwise we'll only
3981         * see rather obscure HW error happening, as DSS halts. */
3982        BUG_ON(x % 2 == 1);
3983
3984        if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3985                dsi->framedone_callback = callback;
3986                dsi->framedone_data = data;
3987
3988                dsi->update_region.x = x;
3989                dsi->update_region.y = y;
3990                dsi->update_region.w = w;
3991                dsi->update_region.h = h;
3992                dsi->update_region.device = dssdev;
3993
3994                dsi_update_screen_dispc(dssdev, x, y, w, h);
3995        } else {
3996                int r;
3997
3998                r = dsi_update_screen_l4(dssdev, x, y, w, h);
3999                if (r)
4000                        return r;
4001
4002                dsi_perf_show(dsidev, "L4");
4003                callback(0, data);
4004        }
4005
4006        return 0;
4007}
4008EXPORT_SYMBOL(omap_dsi_update);
4009
4010/* Display funcs */
4011
4012static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4013{
4014        int r;
4015        u32 irq;
4016
4017        irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4018                DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4019
4020        r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
4021                        irq);
4022        if (r) {
4023                DSSERR("can't get FRAMEDONE irq\n");
4024                return r;
4025        }
4026
4027        dispc_set_lcd_display_type(dssdev->manager->id,
4028                        OMAP_DSS_LCD_DISPLAY_TFT);
4029
4030        dispc_set_parallel_interface_mode(dssdev->manager->id,
4031                        OMAP_DSS_PARALLELMODE_DSI);
4032        dispc_enable_fifohandcheck(dssdev->manager->id, 1);
4033
4034        dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
4035
4036        {
4037                struct omap_video_timings timings = {
4038                        .hsw            = 1,
4039                        .hfp            = 1,
4040                        .hbp            = 1,
4041                        .vsw            = 1,
4042                        .vfp            = 0,
4043                        .vbp            = 0,
4044                };
4045
4046                dispc_set_lcd_timings(dssdev->manager->id, &timings);
4047        }
4048
4049        return 0;
4050}
4051
4052static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
4053{
4054        u32 irq;
4055
4056        irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4057                DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4058
4059        omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
4060                        irq);
4061}
4062
4063static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
4064{
4065        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4066        struct dsi_clock_info cinfo;
4067        int r;
4068
4069        /* we always use DSS_CLK_SYSCK as input clock */
4070        cinfo.use_sys_clk = true;
4071        cinfo.regn  = dssdev->clocks.dsi.regn;
4072        cinfo.regm  = dssdev->clocks.dsi.regm;
4073        cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
4074        cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
4075        r = dsi_calc_clock_rates(dssdev, &cinfo);
4076        if (r) {
4077                DSSERR("Failed to calc dsi clocks\n");
4078                return r;
4079        }
4080
4081        r = dsi_pll_set_clock_div(dsidev, &cinfo);
4082        if (r) {
4083                DSSERR("Failed to set dsi clocks\n");
4084                return r;
4085        }
4086
4087        return 0;
4088}
4089
4090static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
4091{
4092        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4093        struct dispc_clock_info dispc_cinfo;
4094        int r;
4095        unsigned long long fck;
4096
4097        fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4098
4099        dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
4100        dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
4101
4102        r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4103        if (r) {
4104                DSSERR("Failed to calc dispc clocks\n");
4105                return r;
4106        }
4107
4108        r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
4109        if (r) {
4110                DSSERR("Failed to set dispc clocks\n");
4111                return r;
4112        }
4113
4114        return 0;
4115}
4116
4117static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4118{
4119        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4120        int dsi_module = dsi_get_dsidev_id(dsidev);
4121        int r;
4122
4123        r = dsi_pll_init(dsidev, true, true);
4124        if (r)
4125                goto err0;
4126
4127        r = dsi_configure_dsi_clocks(dssdev);
4128        if (r)
4129                goto err1;
4130
4131        dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4132        dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
4133        dss_select_lcd_clk_source(dssdev->manager->id,
4134                        dssdev->clocks.dispc.channel.lcd_clk_src);
4135
4136        DSSDBG("PLL OK\n");
4137
4138        r = dsi_configure_dispc_clocks(dssdev);
4139        if (r)
4140                goto err2;
4141
4142        r = dsi_cio_init(dssdev);
4143        if (r)
4144                goto err2;
4145
4146        _dsi_print_reset_status(dsidev);
4147
4148        dsi_proto_timings(dssdev);
4149        dsi_set_lp_clk_divisor(dssdev);
4150
4151        if (1)
4152                _dsi_print_reset_status(dsidev);
4153
4154        r = dsi_proto_config(dssdev);
4155        if (r)
4156                goto err3;
4157
4158        /* enable interface */
4159        dsi_vc_enable(dsidev, 0, 1);
4160        dsi_vc_enable(dsidev, 1, 1);
4161        dsi_vc_enable(dsidev, 2, 1);
4162        dsi_vc_enable(dsidev, 3, 1);
4163        dsi_if_enable(dsidev, 1);
4164        dsi_force_tx_stop_mode_io(dsidev);
4165
4166        return 0;
4167err3:
4168        dsi_cio_uninit(dsidev);
4169err2:
4170        dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4171        dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4172err1:
4173        dsi_pll_uninit(dsidev, true);
4174err0:
4175        return r;
4176}
4177
4178static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4179                bool disconnect_lanes, bool enter_ulps)
4180{
4181        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4182        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4183        int dsi_module = dsi_get_dsidev_id(dsidev);
4184
4185        if (enter_ulps && !dsi->ulps_enabled)
4186                dsi_enter_ulps(dsidev);
4187
4188        /* disable interface */
4189        dsi_if_enable(dsidev, 0);
4190        dsi_vc_enable(dsidev, 0, 0);
4191        dsi_vc_enable(dsidev, 1, 0);
4192        dsi_vc_enable(dsidev, 2, 0);
4193        dsi_vc_enable(dsidev, 3, 0);
4194
4195        dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4196        dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4197        dsi_cio_uninit(dsidev);
4198        dsi_pll_uninit(dsidev, disconnect_lanes);
4199}
4200
4201static int dsi_core_init(struct platform_device *dsidev)
4202{
4203        /* Autoidle */
4204        REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
4205
4206        /* ENWAKEUP */
4207        REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
4208
4209        /* SIDLEMODE smart-idle */
4210        REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
4211
4212        _dsi_initialize_irq(dsidev);
4213
4214        return 0;
4215}
4216
4217int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4218{
4219        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4220        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4221        int r = 0;
4222
4223        DSSDBG("dsi_display_enable\n");
4224
4225        WARN_ON(!dsi_bus_is_locked(dsidev));
4226
4227        mutex_lock(&dsi->lock);
4228
4229        r = omap_dss_start_device(dssdev);
4230        if (r) {
4231                DSSERR("failed to start device\n");
4232                goto err0;
4233        }
4234
4235        enable_clocks(1);
4236        dsi_enable_pll_clock(dsidev, 1);
4237
4238        r = _dsi_reset(dsidev);
4239        if (r)
4240                goto err1;
4241
4242        dsi_core_init(dsidev);
4243
4244        r = dsi_display_init_dispc(dssdev);
4245        if (r)
4246                goto err1;
4247
4248        r = dsi_display_init_dsi(dssdev);
4249        if (r)
4250                goto err2;
4251
4252        mutex_unlock(&dsi->lock);
4253
4254        return 0;
4255
4256err2:
4257        dsi_display_uninit_dispc(dssdev);
4258err1:
4259        enable_clocks(0);
4260        dsi_enable_pll_clock(dsidev, 0);
4261        omap_dss_stop_device(dssdev);
4262err0:
4263        mutex_unlock(&dsi->lock);
4264        DSSDBG("dsi_display_enable FAILED\n");
4265        return r;
4266}
4267EXPORT_SYMBOL(omapdss_dsi_display_enable);
4268
4269void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4270                bool disconnect_lanes, bool enter_ulps)
4271{
4272        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4273        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4274
4275        DSSDBG("dsi_display_disable\n");
4276
4277        WARN_ON(!dsi_bus_is_locked(dsidev));
4278
4279        mutex_lock(&dsi->lock);
4280
4281        dsi_display_uninit_dispc(dssdev);
4282
4283        dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
4284
4285        enable_clocks(0);
4286        dsi_enable_pll_clock(dsidev, 0);
4287
4288        omap_dss_stop_device(dssdev);
4289
4290        mutex_unlock(&dsi->lock);
4291}
4292EXPORT_SYMBOL(omapdss_dsi_display_disable);
4293
4294int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4295{
4296        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4297        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4298
4299        dsi->te_enabled = enable;
4300        return 0;
4301}
4302EXPORT_SYMBOL(omapdss_dsi_enable_te);
4303
4304void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
4305                u32 fifo_size, enum omap_burst_size *burst_size,
4306                u32 *fifo_low, u32 *fifo_high)
4307{
4308        unsigned burst_size_bytes;
4309
4310        *burst_size = OMAP_DSS_BURST_16x32;
4311        burst_size_bytes = 16 * 32 / 8;
4312
4313        *fifo_high = fifo_size - burst_size_bytes;
4314        *fifo_low = fifo_size - burst_size_bytes * 2;
4315}
4316
4317int dsi_init_display(struct omap_dss_device *dssdev)
4318{
4319        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4320        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4321        int dsi_module = dsi_get_dsidev_id(dsidev);
4322
4323        DSSDBG("DSI init\n");
4324
4325        /* XXX these should be figured out dynamically */
4326        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
4327                OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
4328
4329        if (dsi->vdds_dsi_reg == NULL) {
4330                struct regulator *vdds_dsi;
4331
4332                vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4333
4334                if (IS_ERR(vdds_dsi)) {
4335                        DSSERR("can't get VDDS_DSI regulator\n");
4336                        return PTR_ERR(vdds_dsi);
4337                }
4338
4339                dsi->vdds_dsi_reg = vdds_dsi;
4340        }
4341
4342        if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
4343                DSSERR("DSI%d can't support more than %d data lanes\n",
4344                        dsi_module + 1, dsi->num_data_lanes);
4345                return -EINVAL;
4346        }
4347
4348        return 0;
4349}
4350
4351int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4352{
4353        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4354        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4355        int i;
4356
4357        for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4358                if (!dsi->vc[i].dssdev) {
4359                        dsi->vc[i].dssdev = dssdev;
4360                        *channel = i;
4361                        return 0;
4362                }
4363        }
4364
4365        DSSERR("cannot get VC for display %s", dssdev->name);
4366        return -ENOSPC;
4367}
4368EXPORT_SYMBOL(omap_dsi_request_vc);
4369
4370int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4371{
4372        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4373        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4374
4375        if (vc_id < 0 || vc_id > 3) {
4376                DSSERR("VC ID out of range\n");
4377                return -EINVAL;
4378        }
4379
4380        if (channel < 0 || channel > 3) {
4381                DSSERR("Virtual Channel out of range\n");
4382                return -EINVAL;
4383        }
4384
4385        if (dsi->vc[channel].dssdev != dssdev) {
4386                DSSERR("Virtual Channel not allocated to display %s\n",
4387                        dssdev->name);
4388                return -EINVAL;
4389        }
4390
4391        dsi->vc[channel].vc_id = vc_id;
4392
4393        return 0;
4394}
4395EXPORT_SYMBOL(omap_dsi_set_vc_id);
4396
4397void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
4398{
4399        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4400        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4401
4402        if ((channel >= 0 && channel <= 3) &&
4403                dsi->vc[channel].dssdev == dssdev) {
4404                dsi->vc[channel].dssdev = NULL;
4405                dsi->vc[channel].vc_id = 0;
4406        }
4407}
4408EXPORT_SYMBOL(omap_dsi_release_vc);
4409
4410void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
4411{
4412        if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
4413                DSSERR("%s (%s) not active\n",
4414                        dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
4415                        dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
4416}
4417
4418void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
4419{
4420        if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
4421                DSSERR("%s (%s) not active\n",
4422                        dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
4423                        dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
4424}
4425
4426static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
4427{
4428        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4429
4430        dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
4431        dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
4432        dsi->regm_dispc_max =
4433                dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
4434        dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4435        dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4436        dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4437        dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
4438}
4439
4440static int dsi_init(struct platform_device *dsidev)
4441{
4442        struct omap_display_platform_data *dss_plat_data;
4443        struct omap_dss_board_info *board_info;
4444        u32 rev;
4445        int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
4446        struct resource *dsi_mem;
4447        struct dsi_data *dsi;
4448
4449        dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4450        if (!dsi) {
4451                r = -ENOMEM;
4452                goto err0;
4453        }
4454
4455        dsi->pdev = dsidev;
4456        dsi_pdev_map[dsi_module] = dsidev;
4457        dev_set_drvdata(&dsidev->dev, dsi);
4458
4459        dss_plat_data = dsidev->dev.platform_data;
4460        board_info = dss_plat_data->board_data;
4461        dsi->dsi_mux_pads = board_info->dsi_mux_pads;
4462
4463        spin_lock_init(&dsi->irq_lock);
4464        spin_lock_init(&dsi->errors_lock);
4465        dsi->errors = 0;
4466
4467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4468        spin_lock_init(&dsi->irq_stats_lock);
4469        dsi->irq_stats.last_reset = jiffies;
4470#endif
4471
4472        mutex_init(&dsi->lock);
4473        sema_init(&dsi->bus_lock, 1);
4474
4475        INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
4476                        dsi_framedone_timeout_work_callback);
4477
4478#ifdef DSI_CATCH_MISSING_TE
4479        init_timer(&dsi->te_timer);
4480        dsi->te_timer.function = dsi_te_timeout;
4481        dsi->te_timer.data = 0;
4482#endif
4483        dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
4484        if (!dsi_mem) {
4485                DSSERR("can't get IORESOURCE_MEM DSI\n");
4486                r = -EINVAL;
4487                goto err1;
4488        }
4489        dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
4490        if (!dsi->base) {
4491                DSSERR("can't ioremap DSI\n");
4492                r = -ENOMEM;
4493                goto err1;
4494        }
4495        dsi->irq = platform_get_irq(dsi->pdev, 0);
4496        if (dsi->irq < 0) {
4497                DSSERR("platform_get_irq failed\n");
4498                r = -ENODEV;
4499                goto err2;
4500        }
4501
4502        r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
4503                dev_name(&dsidev->dev), dsi->pdev);
4504        if (r < 0) {
4505                DSSERR("request_irq failed\n");
4506                goto err2;
4507        }
4508
4509        /* DSI VCs initialization */
4510        for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4511                dsi->vc[i].mode = DSI_VC_MODE_L4;
4512                dsi->vc[i].dssdev = NULL;
4513                dsi->vc[i].vc_id = 0;
4514        }
4515
4516        dsi_calc_clock_param_ranges(dsidev);
4517
4518        enable_clocks(1);
4519
4520        rev = dsi_read_reg(dsidev, DSI_REVISION);
4521        dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
4522               FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
4523
4524        dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
4525
4526        enable_clocks(0);
4527
4528        return 0;
4529err2:
4530        iounmap(dsi->base);
4531err1:
4532        kfree(dsi);
4533err0:
4534        return r;
4535}
4536
4537static void dsi_exit(struct platform_device *dsidev)
4538{
4539        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4540
4541        if (dsi->vdds_dsi_reg != NULL) {
4542                if (dsi->vdds_dsi_enabled) {
4543                        regulator_disable(dsi->vdds_dsi_reg);
4544                        dsi->vdds_dsi_enabled = false;
4545                }
4546
4547                regulator_put(dsi->vdds_dsi_reg);
4548                dsi->vdds_dsi_reg = NULL;
4549        }
4550
4551        free_irq(dsi->irq, dsi->pdev);
4552        iounmap(dsi->base);
4553
4554        kfree(dsi);
4555
4556        DSSDBG("omap_dsi_exit\n");
4557}
4558
4559/* DSI1 HW IP initialisation */
4560static int omap_dsi1hw_probe(struct platform_device *dsidev)
4561{
4562        int r;
4563
4564        r = dsi_init(dsidev);
4565        if (r) {
4566                DSSERR("Failed to initialize DSI\n");
4567                goto err_dsi;
4568        }
4569err_dsi:
4570        return r;
4571}
4572
4573static int omap_dsi1hw_remove(struct platform_device *dsidev)
4574{
4575        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4576
4577        dsi_exit(dsidev);
4578        WARN_ON(dsi->scp_clk_refcount > 0);
4579        return 0;
4580}
4581
4582static struct platform_driver omap_dsi1hw_driver = {
4583        .probe          = omap_dsi1hw_probe,
4584        .remove         = omap_dsi1hw_remove,
4585        .driver         = {
4586                .name   = "omapdss_dsi1",
4587                .owner  = THIS_MODULE,
4588        },
4589};
4590
4591int dsi_init_platform_driver(void)
4592{
4593        return platform_driver_register(&omap_dsi1hw_driver);
4594}
4595
4596void dsi_uninit_platform_driver(void)
4597{
4598        return platform_driver_unregister(&omap_dsi1hw_driver);
4599}
4600