linux/drivers/mmc/host/davinci_mmc.c
<<
>>
Prefs
   1/*
   2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
   3 *
   4 * Copyright (C) 2006 Texas Instruments.
   5 *       Original author: Purushotam Kumar
   6 * Copyright (C) 2009 David Brownell
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/ioport.h>
  25#include <linux/platform_device.h>
  26#include <linux/clk.h>
  27#include <linux/err.h>
  28#include <linux/cpufreq.h>
  29#include <linux/mmc/host.h>
  30#include <linux/io.h>
  31#include <linux/irq.h>
  32#include <linux/delay.h>
  33#include <linux/dma-mapping.h>
  34#include <linux/mmc/mmc.h>
  35
  36#include <mach/mmc.h>
  37#include <mach/edma.h>
  38
  39/*
  40 * Register Definitions
  41 */
  42#define DAVINCI_MMCCTL       0x00 /* Control Register                  */
  43#define DAVINCI_MMCCLK       0x04 /* Memory Clock Control Register     */
  44#define DAVINCI_MMCST0       0x08 /* Status Register 0                 */
  45#define DAVINCI_MMCST1       0x0C /* Status Register 1                 */
  46#define DAVINCI_MMCIM        0x10 /* Interrupt Mask Register           */
  47#define DAVINCI_MMCTOR       0x14 /* Response Time-Out Register        */
  48#define DAVINCI_MMCTOD       0x18 /* Data Read Time-Out Register       */
  49#define DAVINCI_MMCBLEN      0x1C /* Block Length Register             */
  50#define DAVINCI_MMCNBLK      0x20 /* Number of Blocks Register         */
  51#define DAVINCI_MMCNBLC      0x24 /* Number of Blocks Counter Register */
  52#define DAVINCI_MMCDRR       0x28 /* Data Receive Register             */
  53#define DAVINCI_MMCDXR       0x2C /* Data Transmit Register            */
  54#define DAVINCI_MMCCMD       0x30 /* Command Register                  */
  55#define DAVINCI_MMCARGHL     0x34 /* Argument Register                 */
  56#define DAVINCI_MMCRSP01     0x38 /* Response Register 0 and 1         */
  57#define DAVINCI_MMCRSP23     0x3C /* Response Register 0 and 1         */
  58#define DAVINCI_MMCRSP45     0x40 /* Response Register 0 and 1         */
  59#define DAVINCI_MMCRSP67     0x44 /* Response Register 0 and 1         */
  60#define DAVINCI_MMCDRSP      0x48 /* Data Response Register            */
  61#define DAVINCI_MMCETOK      0x4C
  62#define DAVINCI_MMCCIDX      0x50 /* Command Index Register            */
  63#define DAVINCI_MMCCKC       0x54
  64#define DAVINCI_MMCTORC      0x58
  65#define DAVINCI_MMCTODC      0x5C
  66#define DAVINCI_MMCBLNC      0x60
  67#define DAVINCI_SDIOCTL      0x64
  68#define DAVINCI_SDIOST0      0x68
  69#define DAVINCI_SDIOIEN      0x6C
  70#define DAVINCI_SDIOIST      0x70
  71#define DAVINCI_MMCFIFOCTL   0x74 /* FIFO Control Register             */
  72
  73/* DAVINCI_MMCCTL definitions */
  74#define MMCCTL_DATRST         (1 << 0)
  75#define MMCCTL_CMDRST         (1 << 1)
  76#define MMCCTL_WIDTH_8_BIT    (1 << 8)
  77#define MMCCTL_WIDTH_4_BIT    (1 << 2)
  78#define MMCCTL_DATEG_DISABLED (0 << 6)
  79#define MMCCTL_DATEG_RISING   (1 << 6)
  80#define MMCCTL_DATEG_FALLING  (2 << 6)
  81#define MMCCTL_DATEG_BOTH     (3 << 6)
  82#define MMCCTL_PERMDR_LE      (0 << 9)
  83#define MMCCTL_PERMDR_BE      (1 << 9)
  84#define MMCCTL_PERMDX_LE      (0 << 10)
  85#define MMCCTL_PERMDX_BE      (1 << 10)
  86
  87/* DAVINCI_MMCCLK definitions */
  88#define MMCCLK_CLKEN          (1 << 8)
  89#define MMCCLK_CLKRT_MASK     (0xFF << 0)
  90
  91/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
  92#define MMCST0_DATDNE         BIT(0)    /* data done */
  93#define MMCST0_BSYDNE         BIT(1)    /* busy done */
  94#define MMCST0_RSPDNE         BIT(2)    /* command done */
  95#define MMCST0_TOUTRD         BIT(3)    /* data read timeout */
  96#define MMCST0_TOUTRS         BIT(4)    /* command response timeout */
  97#define MMCST0_CRCWR          BIT(5)    /* data write CRC error */
  98#define MMCST0_CRCRD          BIT(6)    /* data read CRC error */
  99#define MMCST0_CRCRS          BIT(7)    /* command response CRC error */
 100#define MMCST0_DXRDY          BIT(9)    /* data transmit ready (fifo empty) */
 101#define MMCST0_DRRDY          BIT(10)   /* data receive ready (data in fifo)*/
 102#define MMCST0_DATED          BIT(11)   /* DAT3 edge detect */
 103#define MMCST0_TRNDNE         BIT(12)   /* transfer done */
 104
 105/* DAVINCI_MMCST1 definitions */
 106#define MMCST1_BUSY           (1 << 0)
 107
 108/* DAVINCI_MMCCMD definitions */
 109#define MMCCMD_CMD_MASK       (0x3F << 0)
 110#define MMCCMD_PPLEN          (1 << 7)
 111#define MMCCMD_BSYEXP         (1 << 8)
 112#define MMCCMD_RSPFMT_MASK    (3 << 9)
 113#define MMCCMD_RSPFMT_NONE    (0 << 9)
 114#define MMCCMD_RSPFMT_R1456   (1 << 9)
 115#define MMCCMD_RSPFMT_R2      (2 << 9)
 116#define MMCCMD_RSPFMT_R3      (3 << 9)
 117#define MMCCMD_DTRW           (1 << 11)
 118#define MMCCMD_STRMTP         (1 << 12)
 119#define MMCCMD_WDATX          (1 << 13)
 120#define MMCCMD_INITCK         (1 << 14)
 121#define MMCCMD_DCLR           (1 << 15)
 122#define MMCCMD_DMATRIG        (1 << 16)
 123
 124/* DAVINCI_MMCFIFOCTL definitions */
 125#define MMCFIFOCTL_FIFORST    (1 << 0)
 126#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
 127#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
 128#define MMCFIFOCTL_FIFOLEV    (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
 129#define MMCFIFOCTL_ACCWD_4    (0 << 3) /* access width of 4 bytes    */
 130#define MMCFIFOCTL_ACCWD_3    (1 << 3) /* access width of 3 bytes    */
 131#define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */
 132#define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */
 133
 134/* DAVINCI_SDIOST0 definitions */
 135#define SDIOST0_DAT1_HI       BIT(0)
 136
 137/* DAVINCI_SDIOIEN definitions */
 138#define SDIOIEN_IOINTEN       BIT(0)
 139
 140/* DAVINCI_SDIOIST definitions */
 141#define SDIOIST_IOINT         BIT(0)
 142
 143/* MMCSD Init clock in Hz in opendrain mode */
 144#define MMCSD_INIT_CLOCK                200000
 145
 146/*
 147 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
 148 * and we handle up to MAX_NR_SG segments.  MMC_BLOCK_BOUNCE kicks in only
 149 * for drivers with max_segs == 1, making the segments bigger (64KB)
 150 * than the page or two that's otherwise typical. nr_sg (passed from
 151 * platform data) == 16 gives at least the same throughput boost, using
 152 * EDMA transfer linkage instead of spending CPU time copying pages.
 153 */
 154#define MAX_CCNT        ((1 << 16) - 1)
 155
 156#define MAX_NR_SG       16
 157
 158static unsigned rw_threshold = 32;
 159module_param(rw_threshold, uint, S_IRUGO);
 160MODULE_PARM_DESC(rw_threshold,
 161                "Read/Write threshold. Default = 32");
 162
 163static unsigned __initdata use_dma = 1;
 164module_param(use_dma, uint, 0);
 165MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
 166
 167struct mmc_davinci_host {
 168        struct mmc_command *cmd;
 169        struct mmc_data *data;
 170        struct mmc_host *mmc;
 171        struct clk *clk;
 172        unsigned int mmc_input_clk;
 173        void __iomem *base;
 174        struct resource *mem_res;
 175        int mmc_irq, sdio_irq;
 176        unsigned char bus_mode;
 177
 178#define DAVINCI_MMC_DATADIR_NONE        0
 179#define DAVINCI_MMC_DATADIR_READ        1
 180#define DAVINCI_MMC_DATADIR_WRITE       2
 181        unsigned char data_dir;
 182        unsigned char suspended;
 183
 184        /* buffer is used during PIO of one scatterlist segment, and
 185         * is updated along with buffer_bytes_left.  bytes_left applies
 186         * to all N blocks of the PIO transfer.
 187         */
 188        u8 *buffer;
 189        u32 buffer_bytes_left;
 190        u32 bytes_left;
 191
 192        u32 rxdma, txdma;
 193        bool use_dma;
 194        bool do_dma;
 195        bool sdio_int;
 196
 197        /* Scatterlist DMA uses one or more parameter RAM entries:
 198         * the main one (associated with rxdma or txdma) plus zero or
 199         * more links.  The entries for a given transfer differ only
 200         * by memory buffer (address, length) and link field.
 201         */
 202        struct edmacc_param     tx_template;
 203        struct edmacc_param     rx_template;
 204        unsigned                n_link;
 205        u32                     links[MAX_NR_SG - 1];
 206
 207        /* For PIO we walk scatterlists one segment at a time. */
 208        unsigned int            sg_len;
 209        struct scatterlist *sg;
 210
 211        /* Version of the MMC/SD controller */
 212        u8 version;
 213        /* for ns in one cycle calculation */
 214        unsigned ns_in_one_cycle;
 215        /* Number of sg segments */
 216        u8 nr_sg;
 217#ifdef CONFIG_CPU_FREQ
 218        struct notifier_block   freq_transition;
 219#endif
 220};
 221
 222
 223/* PIO only */
 224static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
 225{
 226        host->buffer_bytes_left = sg_dma_len(host->sg);
 227        host->buffer = sg_virt(host->sg);
 228        if (host->buffer_bytes_left > host->bytes_left)
 229                host->buffer_bytes_left = host->bytes_left;
 230}
 231
 232static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
 233                                        unsigned int n)
 234{
 235        u8 *p;
 236        unsigned int i;
 237
 238        if (host->buffer_bytes_left == 0) {
 239                host->sg = sg_next(host->data->sg);
 240                mmc_davinci_sg_to_buf(host);
 241        }
 242
 243        p = host->buffer;
 244        if (n > host->buffer_bytes_left)
 245                n = host->buffer_bytes_left;
 246        host->buffer_bytes_left -= n;
 247        host->bytes_left -= n;
 248
 249        /* NOTE:  we never transfer more than rw_threshold bytes
 250         * to/from the fifo here; there's no I/O overlap.
 251         * This also assumes that access width( i.e. ACCWD) is 4 bytes
 252         */
 253        if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
 254                for (i = 0; i < (n >> 2); i++) {
 255                        writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
 256                        p = p + 4;
 257                }
 258                if (n & 3) {
 259                        iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
 260                        p = p + (n & 3);
 261                }
 262        } else {
 263                for (i = 0; i < (n >> 2); i++) {
 264                        *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
 265                        p  = p + 4;
 266                }
 267                if (n & 3) {
 268                        ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
 269                        p = p + (n & 3);
 270                }
 271        }
 272        host->buffer = p;
 273}
 274
 275static void mmc_davinci_start_command(struct mmc_davinci_host *host,
 276                struct mmc_command *cmd)
 277{
 278        u32 cmd_reg = 0;
 279        u32 im_val;
 280
 281        dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
 282                cmd->opcode, cmd->arg,
 283                ({ char *s;
 284                switch (mmc_resp_type(cmd)) {
 285                case MMC_RSP_R1:
 286                        s = ", R1/R5/R6/R7 response";
 287                        break;
 288                case MMC_RSP_R1B:
 289                        s = ", R1b response";
 290                        break;
 291                case MMC_RSP_R2:
 292                        s = ", R2 response";
 293                        break;
 294                case MMC_RSP_R3:
 295                        s = ", R3/R4 response";
 296                        break;
 297                default:
 298                        s = ", (R? response)";
 299                        break;
 300                }; s; }));
 301        host->cmd = cmd;
 302
 303        switch (mmc_resp_type(cmd)) {
 304        case MMC_RSP_R1B:
 305                /* There's some spec confusion about when R1B is
 306                 * allowed, but if the card doesn't issue a BUSY
 307                 * then it's harmless for us to allow it.
 308                 */
 309                cmd_reg |= MMCCMD_BSYEXP;
 310                /* FALLTHROUGH */
 311        case MMC_RSP_R1:                /* 48 bits, CRC */
 312                cmd_reg |= MMCCMD_RSPFMT_R1456;
 313                break;
 314        case MMC_RSP_R2:                /* 136 bits, CRC */
 315                cmd_reg |= MMCCMD_RSPFMT_R2;
 316                break;
 317        case MMC_RSP_R3:                /* 48 bits, no CRC */
 318                cmd_reg |= MMCCMD_RSPFMT_R3;
 319                break;
 320        default:
 321                cmd_reg |= MMCCMD_RSPFMT_NONE;
 322                dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
 323                        mmc_resp_type(cmd));
 324                break;
 325        }
 326
 327        /* Set command index */
 328        cmd_reg |= cmd->opcode;
 329
 330        /* Enable EDMA transfer triggers */
 331        if (host->do_dma)
 332                cmd_reg |= MMCCMD_DMATRIG;
 333
 334        if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
 335                        host->data_dir == DAVINCI_MMC_DATADIR_READ)
 336                cmd_reg |= MMCCMD_DMATRIG;
 337
 338        /* Setting whether command involves data transfer or not */
 339        if (cmd->data)
 340                cmd_reg |= MMCCMD_WDATX;
 341
 342        /* Setting whether stream or block transfer */
 343        if (cmd->flags & MMC_DATA_STREAM)
 344                cmd_reg |= MMCCMD_STRMTP;
 345
 346        /* Setting whether data read or write */
 347        if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
 348                cmd_reg |= MMCCMD_DTRW;
 349
 350        if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
 351                cmd_reg |= MMCCMD_PPLEN;
 352
 353        /* set Command timeout */
 354        writel(0x1FFF, host->base + DAVINCI_MMCTOR);
 355
 356        /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
 357        im_val =  MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
 358        if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
 359                im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
 360
 361                if (!host->do_dma)
 362                        im_val |= MMCST0_DXRDY;
 363        } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
 364                im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
 365
 366                if (!host->do_dma)
 367                        im_val |= MMCST0_DRRDY;
 368        }
 369
 370        /*
 371         * Before non-DMA WRITE commands the controller needs priming:
 372         * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
 373         */
 374        if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
 375                davinci_fifo_data_trans(host, rw_threshold);
 376
 377        writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
 378        writel(cmd_reg,  host->base + DAVINCI_MMCCMD);
 379        writel(im_val, host->base + DAVINCI_MMCIM);
 380}
 381
 382/*----------------------------------------------------------------------*/
 383
 384/* DMA infrastructure */
 385
 386static void davinci_abort_dma(struct mmc_davinci_host *host)
 387{
 388        int sync_dev;
 389
 390        if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
 391                sync_dev = host->rxdma;
 392        else
 393                sync_dev = host->txdma;
 394
 395        edma_stop(sync_dev);
 396        edma_clean_channel(sync_dev);
 397}
 398
 399static void
 400mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
 401
 402static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
 403{
 404        if (DMA_COMPLETE != ch_status) {
 405                struct mmc_davinci_host *host = data;
 406
 407                /* Currently means:  DMA Event Missed, or "null" transfer
 408                 * request was seen.  In the future, TC errors (like bad
 409                 * addresses) might be presented too.
 410                 */
 411                dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
 412                        (host->data->flags & MMC_DATA_WRITE)
 413                                ? "write" : "read");
 414                host->data->error = -EIO;
 415                mmc_davinci_xfer_done(host, host->data);
 416        }
 417}
 418
 419/* Set up tx or rx template, to be modified and updated later */
 420static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
 421                bool tx, struct edmacc_param *template)
 422{
 423        unsigned        sync_dev;
 424        const u16       acnt = 4;
 425        const u16       bcnt = rw_threshold >> 2;
 426        const u16       ccnt = 0;
 427        u32             src_port = 0;
 428        u32             dst_port = 0;
 429        s16             src_bidx, dst_bidx;
 430        s16             src_cidx, dst_cidx;
 431
 432        /*
 433         * A-B Sync transfer:  each DMA request is for one "frame" of
 434         * rw_threshold bytes, broken into "acnt"-size chunks repeated
 435         * "bcnt" times.  Each segment needs "ccnt" such frames; since
 436         * we tell the block layer our mmc->max_seg_size limit, we can
 437         * trust (later) that it's within bounds.
 438         *
 439         * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
 440         * EDMA will optimize memory operations to use larger bursts.
 441         */
 442        if (tx) {
 443                sync_dev = host->txdma;
 444
 445                /* src_prt, ccnt, and link to be set up later */
 446                src_bidx = acnt;
 447                src_cidx = acnt * bcnt;
 448
 449                dst_port = host->mem_res->start + DAVINCI_MMCDXR;
 450                dst_bidx = 0;
 451                dst_cidx = 0;
 452        } else {
 453                sync_dev = host->rxdma;
 454
 455                src_port = host->mem_res->start + DAVINCI_MMCDRR;
 456                src_bidx = 0;
 457                src_cidx = 0;
 458
 459                /* dst_prt, ccnt, and link to be set up later */
 460                dst_bidx = acnt;
 461                dst_cidx = acnt * bcnt;
 462        }
 463
 464        /*
 465         * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
 466         * are not 256-bit (32-byte) aligned.  So we use INCR, and the W8BIT
 467         * parameter is ignored.
 468         */
 469        edma_set_src(sync_dev, src_port, INCR, W8BIT);
 470        edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
 471
 472        edma_set_src_index(sync_dev, src_bidx, src_cidx);
 473        edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
 474
 475        edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
 476
 477        edma_read_slot(sync_dev, template);
 478
 479        /* don't bother with irqs or chaining */
 480        template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
 481}
 482
 483static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
 484                struct mmc_data *data)
 485{
 486        struct edmacc_param     *template;
 487        int                     channel, slot;
 488        unsigned                link;
 489        struct scatterlist      *sg;
 490        unsigned                sg_len;
 491        unsigned                bytes_left = host->bytes_left;
 492        const unsigned          shift = ffs(rw_threshold) - 1;
 493
 494        if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
 495                template = &host->tx_template;
 496                channel = host->txdma;
 497        } else {
 498                template = &host->rx_template;
 499                channel = host->rxdma;
 500        }
 501
 502        /* We know sg_len and ccnt will never be out of range because
 503         * we told the mmc layer which in turn tells the block layer
 504         * to ensure that it only hands us one scatterlist segment
 505         * per EDMA PARAM entry.  Update the PARAM
 506         * entries needed for each segment of this scatterlist.
 507         */
 508        for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
 509                        sg_len-- != 0 && bytes_left;
 510                        sg = sg_next(sg), slot = host->links[link++]) {
 511                u32             buf = sg_dma_address(sg);
 512                unsigned        count = sg_dma_len(sg);
 513
 514                template->link_bcntrld = sg_len
 515                                ? (EDMA_CHAN_SLOT(host->links[link]) << 5)
 516                                : 0xffff;
 517
 518                if (count > bytes_left)
 519                        count = bytes_left;
 520                bytes_left -= count;
 521
 522                if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
 523                        template->src = buf;
 524                else
 525                        template->dst = buf;
 526                template->ccnt = count >> shift;
 527
 528                edma_write_slot(slot, template);
 529        }
 530
 531        if (host->version == MMC_CTLR_VERSION_2)
 532                edma_clear_event(channel);
 533
 534        edma_start(channel);
 535}
 536
 537static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
 538                struct mmc_data *data)
 539{
 540        int i;
 541        int mask = rw_threshold - 1;
 542
 543        host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
 544                                ((data->flags & MMC_DATA_WRITE)
 545                                ? DMA_TO_DEVICE
 546                                : DMA_FROM_DEVICE));
 547
 548        /* no individual DMA segment should need a partial FIFO */
 549        for (i = 0; i < host->sg_len; i++) {
 550                if (sg_dma_len(data->sg + i) & mask) {
 551                        dma_unmap_sg(mmc_dev(host->mmc),
 552                                        data->sg, data->sg_len,
 553                                        (data->flags & MMC_DATA_WRITE)
 554                                        ? DMA_TO_DEVICE
 555                                        : DMA_FROM_DEVICE);
 556                        return -1;
 557                }
 558        }
 559
 560        host->do_dma = 1;
 561        mmc_davinci_send_dma_request(host, data);
 562
 563        return 0;
 564}
 565
 566static void __init_or_module
 567davinci_release_dma_channels(struct mmc_davinci_host *host)
 568{
 569        unsigned        i;
 570
 571        if (!host->use_dma)
 572                return;
 573
 574        for (i = 0; i < host->n_link; i++)
 575                edma_free_slot(host->links[i]);
 576
 577        edma_free_channel(host->txdma);
 578        edma_free_channel(host->rxdma);
 579}
 580
 581static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
 582{
 583        u32 link_size;
 584        int r, i;
 585
 586        /* Acquire master DMA write channel */
 587        r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
 588                        EVENTQ_DEFAULT);
 589        if (r < 0) {
 590                dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
 591                                "tx", r);
 592                return r;
 593        }
 594        mmc_davinci_dma_setup(host, true, &host->tx_template);
 595
 596        /* Acquire master DMA read channel */
 597        r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
 598                        EVENTQ_DEFAULT);
 599        if (r < 0) {
 600                dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
 601                                "rx", r);
 602                goto free_master_write;
 603        }
 604        mmc_davinci_dma_setup(host, false, &host->rx_template);
 605
 606        /* Allocate parameter RAM slots, which will later be bound to a
 607         * channel as needed to handle a scatterlist.
 608         */
 609        link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
 610        for (i = 0; i < link_size; i++) {
 611                r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
 612                if (r < 0) {
 613                        dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
 614                                r);
 615                        break;
 616                }
 617                host->links[i] = r;
 618        }
 619        host->n_link = i;
 620
 621        return 0;
 622
 623free_master_write:
 624        edma_free_channel(host->txdma);
 625
 626        return r;
 627}
 628
 629/*----------------------------------------------------------------------*/
 630
 631static void
 632mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
 633{
 634        int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
 635        int timeout;
 636        struct mmc_data *data = req->data;
 637
 638        if (host->version == MMC_CTLR_VERSION_2)
 639                fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
 640
 641        host->data = data;
 642        if (data == NULL) {
 643                host->data_dir = DAVINCI_MMC_DATADIR_NONE;
 644                writel(0, host->base + DAVINCI_MMCBLEN);
 645                writel(0, host->base + DAVINCI_MMCNBLK);
 646                return;
 647        }
 648
 649        dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
 650                (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
 651                (data->flags & MMC_DATA_WRITE) ? "write" : "read",
 652                data->blocks, data->blksz);
 653        dev_dbg(mmc_dev(host->mmc), "  DTO %d cycles + %d ns\n",
 654                data->timeout_clks, data->timeout_ns);
 655        timeout = data->timeout_clks +
 656                (data->timeout_ns / host->ns_in_one_cycle);
 657        if (timeout > 0xffff)
 658                timeout = 0xffff;
 659
 660        writel(timeout, host->base + DAVINCI_MMCTOD);
 661        writel(data->blocks, host->base + DAVINCI_MMCNBLK);
 662        writel(data->blksz, host->base + DAVINCI_MMCBLEN);
 663
 664        /* Configure the FIFO */
 665        switch (data->flags & MMC_DATA_WRITE) {
 666        case MMC_DATA_WRITE:
 667                host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
 668                writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
 669                        host->base + DAVINCI_MMCFIFOCTL);
 670                writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
 671                        host->base + DAVINCI_MMCFIFOCTL);
 672                break;
 673
 674        default:
 675                host->data_dir = DAVINCI_MMC_DATADIR_READ;
 676                writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
 677                        host->base + DAVINCI_MMCFIFOCTL);
 678                writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
 679                        host->base + DAVINCI_MMCFIFOCTL);
 680                break;
 681        }
 682
 683        host->buffer = NULL;
 684        host->bytes_left = data->blocks * data->blksz;
 685
 686        /* For now we try to use DMA whenever we won't need partial FIFO
 687         * reads or writes, either for the whole transfer (as tested here)
 688         * or for any individual scatterlist segment (tested when we call
 689         * start_dma_transfer).
 690         *
 691         * While we *could* change that, unusual block sizes are rarely
 692         * used.  The occasional fallback to PIO should't hurt.
 693         */
 694        if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
 695                        && mmc_davinci_start_dma_transfer(host, data) == 0) {
 696                /* zero this to ensure we take no PIO paths */
 697                host->bytes_left = 0;
 698        } else {
 699                /* Revert to CPU Copy */
 700                host->sg_len = data->sg_len;
 701                host->sg = host->data->sg;
 702                mmc_davinci_sg_to_buf(host);
 703        }
 704}
 705
 706static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
 707{
 708        struct mmc_davinci_host *host = mmc_priv(mmc);
 709        unsigned long timeout = jiffies + msecs_to_jiffies(900);
 710        u32 mmcst1 = 0;
 711
 712        /* Card may still be sending BUSY after a previous operation,
 713         * typically some kind of write.  If so, we can't proceed yet.
 714         */
 715        while (time_before(jiffies, timeout)) {
 716                mmcst1  = readl(host->base + DAVINCI_MMCST1);
 717                if (!(mmcst1 & MMCST1_BUSY))
 718                        break;
 719                cpu_relax();
 720        }
 721        if (mmcst1 & MMCST1_BUSY) {
 722                dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
 723                req->cmd->error = -ETIMEDOUT;
 724                mmc_request_done(mmc, req);
 725                return;
 726        }
 727
 728        host->do_dma = 0;
 729        mmc_davinci_prepare_data(host, req);
 730        mmc_davinci_start_command(host, req->cmd);
 731}
 732
 733static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
 734        unsigned int mmc_req_freq)
 735{
 736        unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
 737
 738        mmc_pclk = host->mmc_input_clk;
 739        if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
 740                mmc_push_pull_divisor = ((unsigned int)mmc_pclk
 741                                / (2 * mmc_req_freq)) - 1;
 742        else
 743                mmc_push_pull_divisor = 0;
 744
 745        mmc_freq = (unsigned int)mmc_pclk
 746                / (2 * (mmc_push_pull_divisor + 1));
 747
 748        if (mmc_freq > mmc_req_freq)
 749                mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
 750        /* Convert ns to clock cycles */
 751        if (mmc_req_freq <= 400000)
 752                host->ns_in_one_cycle = (1000000) / (((mmc_pclk
 753                                / (2 * (mmc_push_pull_divisor + 1)))/1000));
 754        else
 755                host->ns_in_one_cycle = (1000000) / (((mmc_pclk
 756                                / (2 * (mmc_push_pull_divisor + 1)))/1000000));
 757
 758        return mmc_push_pull_divisor;
 759}
 760
 761static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
 762{
 763        unsigned int open_drain_freq = 0, mmc_pclk = 0;
 764        unsigned int mmc_push_pull_freq = 0;
 765        struct mmc_davinci_host *host = mmc_priv(mmc);
 766
 767        if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
 768                u32 temp;
 769
 770                /* Ignoring the init clock value passed for fixing the inter
 771                 * operability with different cards.
 772                 */
 773                open_drain_freq = ((unsigned int)mmc_pclk
 774                                / (2 * MMCSD_INIT_CLOCK)) - 1;
 775
 776                if (open_drain_freq > 0xFF)
 777                        open_drain_freq = 0xFF;
 778
 779                temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
 780                temp |= open_drain_freq;
 781                writel(temp, host->base + DAVINCI_MMCCLK);
 782
 783                /* Convert ns to clock cycles */
 784                host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
 785        } else {
 786                u32 temp;
 787                mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
 788
 789                if (mmc_push_pull_freq > 0xFF)
 790                        mmc_push_pull_freq = 0xFF;
 791
 792                temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
 793                writel(temp, host->base + DAVINCI_MMCCLK);
 794
 795                udelay(10);
 796
 797                temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
 798                temp |= mmc_push_pull_freq;
 799                writel(temp, host->base + DAVINCI_MMCCLK);
 800
 801                writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
 802
 803                udelay(10);
 804        }
 805}
 806
 807static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 808{
 809        struct mmc_davinci_host *host = mmc_priv(mmc);
 810
 811        dev_dbg(mmc_dev(host->mmc),
 812                "clock %dHz busmode %d powermode %d Vdd %04x\n",
 813                ios->clock, ios->bus_mode, ios->power_mode,
 814                ios->vdd);
 815
 816        switch (ios->bus_width) {
 817        case MMC_BUS_WIDTH_8:
 818                dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
 819                writel((readl(host->base + DAVINCI_MMCCTL) &
 820                        ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
 821                        host->base + DAVINCI_MMCCTL);
 822                break;
 823        case MMC_BUS_WIDTH_4:
 824                dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
 825                if (host->version == MMC_CTLR_VERSION_2)
 826                        writel((readl(host->base + DAVINCI_MMCCTL) &
 827                                ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
 828                                host->base + DAVINCI_MMCCTL);
 829                else
 830                        writel(readl(host->base + DAVINCI_MMCCTL) |
 831                                MMCCTL_WIDTH_4_BIT,
 832                                host->base + DAVINCI_MMCCTL);
 833                break;
 834        case MMC_BUS_WIDTH_1:
 835                dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
 836                if (host->version == MMC_CTLR_VERSION_2)
 837                        writel(readl(host->base + DAVINCI_MMCCTL) &
 838                                ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
 839                                host->base + DAVINCI_MMCCTL);
 840                else
 841                        writel(readl(host->base + DAVINCI_MMCCTL) &
 842                                ~MMCCTL_WIDTH_4_BIT,
 843                                host->base + DAVINCI_MMCCTL);
 844                break;
 845        }
 846
 847        calculate_clk_divider(mmc, ios);
 848
 849        host->bus_mode = ios->bus_mode;
 850        if (ios->power_mode == MMC_POWER_UP) {
 851                unsigned long timeout = jiffies + msecs_to_jiffies(50);
 852                bool lose = true;
 853
 854                /* Send clock cycles, poll completion */
 855                writel(0, host->base + DAVINCI_MMCARGHL);
 856                writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
 857                while (time_before(jiffies, timeout)) {
 858                        u32 tmp = readl(host->base + DAVINCI_MMCST0);
 859
 860                        if (tmp & MMCST0_RSPDNE) {
 861                                lose = false;
 862                                break;
 863                        }
 864                        cpu_relax();
 865                }
 866                if (lose)
 867                        dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
 868        }
 869
 870        /* FIXME on power OFF, reset things ... */
 871}
 872
 873static void
 874mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
 875{
 876        host->data = NULL;
 877
 878        if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
 879                /*
 880                 * SDIO Interrupt Detection work-around as suggested by
 881                 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
 882                 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
 883                 */
 884                if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
 885                                        SDIOST0_DAT1_HI)) {
 886                        writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
 887                        mmc_signal_sdio_irq(host->mmc);
 888                }
 889        }
 890
 891        if (host->do_dma) {
 892                davinci_abort_dma(host);
 893
 894                dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
 895                             (data->flags & MMC_DATA_WRITE)
 896                             ? DMA_TO_DEVICE
 897                             : DMA_FROM_DEVICE);
 898                host->do_dma = false;
 899        }
 900        host->data_dir = DAVINCI_MMC_DATADIR_NONE;
 901
 902        if (!data->stop || (host->cmd && host->cmd->error)) {
 903                mmc_request_done(host->mmc, data->mrq);
 904                writel(0, host->base + DAVINCI_MMCIM);
 905        } else
 906                mmc_davinci_start_command(host, data->stop);
 907}
 908
 909static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
 910                                 struct mmc_command *cmd)
 911{
 912        host->cmd = NULL;
 913
 914        if (cmd->flags & MMC_RSP_PRESENT) {
 915                if (cmd->flags & MMC_RSP_136) {
 916                        /* response type 2 */
 917                        cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
 918                        cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
 919                        cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
 920                        cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
 921                } else {
 922                        /* response types 1, 1b, 3, 4, 5, 6 */
 923                        cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
 924                }
 925        }
 926
 927        if (host->data == NULL || cmd->error) {
 928                if (cmd->error == -ETIMEDOUT)
 929                        cmd->mrq->cmd->retries = 0;
 930                mmc_request_done(host->mmc, cmd->mrq);
 931                writel(0, host->base + DAVINCI_MMCIM);
 932        }
 933}
 934
 935static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
 936                                                                int val)
 937{
 938        u32 temp;
 939
 940        temp = readl(host->base + DAVINCI_MMCCTL);
 941        if (val)        /* reset */
 942                temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
 943        else            /* enable */
 944                temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
 945
 946        writel(temp, host->base + DAVINCI_MMCCTL);
 947        udelay(10);
 948}
 949
 950static void
 951davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
 952{
 953        mmc_davinci_reset_ctrl(host, 1);
 954        mmc_davinci_reset_ctrl(host, 0);
 955}
 956
 957static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
 958{
 959        struct mmc_davinci_host *host = dev_id;
 960        unsigned int status;
 961
 962        status = readl(host->base + DAVINCI_SDIOIST);
 963        if (status & SDIOIST_IOINT) {
 964                dev_dbg(mmc_dev(host->mmc),
 965                        "SDIO interrupt status %x\n", status);
 966                writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
 967                mmc_signal_sdio_irq(host->mmc);
 968        }
 969        return IRQ_HANDLED;
 970}
 971
 972static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
 973{
 974        struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
 975        unsigned int status, qstatus;
 976        int end_command = 0;
 977        int end_transfer = 0;
 978        struct mmc_data *data = host->data;
 979
 980        if (host->cmd == NULL && host->data == NULL) {
 981                status = readl(host->base + DAVINCI_MMCST0);
 982                dev_dbg(mmc_dev(host->mmc),
 983                        "Spurious interrupt 0x%04x\n", status);
 984                /* Disable the interrupt from mmcsd */
 985                writel(0, host->base + DAVINCI_MMCIM);
 986                return IRQ_NONE;
 987        }
 988
 989        status = readl(host->base + DAVINCI_MMCST0);
 990        qstatus = status;
 991
 992        /* handle FIFO first when using PIO for data.
 993         * bytes_left will decrease to zero as I/O progress and status will
 994         * read zero over iteration because this controller status
 995         * register(MMCST0) reports any status only once and it is cleared
 996         * by read. So, it is not unbouned loop even in the case of
 997         * non-dma.
 998         */
 999        while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
1000                davinci_fifo_data_trans(host, rw_threshold);
1001                status = readl(host->base + DAVINCI_MMCST0);
1002                if (!status)
1003                        break;
1004                qstatus |= status;
1005        }
1006
1007        if (qstatus & MMCST0_DATDNE) {
1008                /* All blocks sent/received, and CRC checks passed */
1009                if (data != NULL) {
1010                        if ((host->do_dma == 0) && (host->bytes_left > 0)) {
1011                                /* if datasize < rw_threshold
1012                                 * no RX ints are generated
1013                                 */
1014                                davinci_fifo_data_trans(host, host->bytes_left);
1015                        }
1016                        end_transfer = 1;
1017                        data->bytes_xfered = data->blocks * data->blksz;
1018                } else {
1019                        dev_err(mmc_dev(host->mmc),
1020                                        "DATDNE with no host->data\n");
1021                }
1022        }
1023
1024        if (qstatus & MMCST0_TOUTRD) {
1025                /* Read data timeout */
1026                data->error = -ETIMEDOUT;
1027                end_transfer = 1;
1028
1029                dev_dbg(mmc_dev(host->mmc),
1030                        "read data timeout, status %x\n",
1031                        qstatus);
1032
1033                davinci_abort_data(host, data);
1034        }
1035
1036        if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
1037                /* Data CRC error */
1038                data->error = -EILSEQ;
1039                end_transfer = 1;
1040
1041                /* NOTE:  this controller uses CRCWR to report both CRC
1042                 * errors and timeouts (on writes).  MMCDRSP values are
1043                 * only weakly documented, but 0x9f was clearly a timeout
1044                 * case and the two three-bit patterns in various SD specs
1045                 * (101, 010) aren't part of it ...
1046                 */
1047                if (qstatus & MMCST0_CRCWR) {
1048                        u32 temp = readb(host->base + DAVINCI_MMCDRSP);
1049
1050                        if (temp == 0x9f)
1051                                data->error = -ETIMEDOUT;
1052                }
1053                dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
1054                        (qstatus & MMCST0_CRCWR) ? "write" : "read",
1055                        (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
1056
1057                davinci_abort_data(host, data);
1058        }
1059
1060        if (qstatus & MMCST0_TOUTRS) {
1061                /* Command timeout */
1062                if (host->cmd) {
1063                        dev_dbg(mmc_dev(host->mmc),
1064                                "CMD%d timeout, status %x\n",
1065                                host->cmd->opcode, qstatus);
1066                        host->cmd->error = -ETIMEDOUT;
1067                        if (data) {
1068                                end_transfer = 1;
1069                                davinci_abort_data(host, data);
1070                        } else
1071                                end_command = 1;
1072                }
1073        }
1074
1075        if (qstatus & MMCST0_CRCRS) {
1076                /* Command CRC error */
1077                dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
1078                if (host->cmd) {
1079                        host->cmd->error = -EILSEQ;
1080                        end_command = 1;
1081                }
1082        }
1083
1084        if (qstatus & MMCST0_RSPDNE) {
1085                /* End of command phase */
1086                end_command = (int) host->cmd;
1087        }
1088
1089        if (end_command)
1090                mmc_davinci_cmd_done(host, host->cmd);
1091        if (end_transfer)
1092                mmc_davinci_xfer_done(host, data);
1093        return IRQ_HANDLED;
1094}
1095
1096static int mmc_davinci_get_cd(struct mmc_host *mmc)
1097{
1098        struct platform_device *pdev = to_platform_device(mmc->parent);
1099        struct davinci_mmc_config *config = pdev->dev.platform_data;
1100
1101        if (!config || !config->get_cd)
1102                return -ENOSYS;
1103        return config->get_cd(pdev->id);
1104}
1105
1106static int mmc_davinci_get_ro(struct mmc_host *mmc)
1107{
1108        struct platform_device *pdev = to_platform_device(mmc->parent);
1109        struct davinci_mmc_config *config = pdev->dev.platform_data;
1110
1111        if (!config || !config->get_ro)
1112                return -ENOSYS;
1113        return config->get_ro(pdev->id);
1114}
1115
1116static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1117{
1118        struct mmc_davinci_host *host = mmc_priv(mmc);
1119
1120        if (enable) {
1121                if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1122                        writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1123                        mmc_signal_sdio_irq(host->mmc);
1124                } else {
1125                        host->sdio_int = true;
1126                        writel(readl(host->base + DAVINCI_SDIOIEN) |
1127                               SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1128                }
1129        } else {
1130                host->sdio_int = false;
1131                writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1132                       host->base + DAVINCI_SDIOIEN);
1133        }
1134}
1135
1136static struct mmc_host_ops mmc_davinci_ops = {
1137        .request        = mmc_davinci_request,
1138        .set_ios        = mmc_davinci_set_ios,
1139        .get_cd         = mmc_davinci_get_cd,
1140        .get_ro         = mmc_davinci_get_ro,
1141        .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1142};
1143
1144/*----------------------------------------------------------------------*/
1145
1146#ifdef CONFIG_CPU_FREQ
1147static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
1148                                     unsigned long val, void *data)
1149{
1150        struct mmc_davinci_host *host;
1151        unsigned int mmc_pclk;
1152        struct mmc_host *mmc;
1153        unsigned long flags;
1154
1155        host = container_of(nb, struct mmc_davinci_host, freq_transition);
1156        mmc = host->mmc;
1157        mmc_pclk = clk_get_rate(host->clk);
1158
1159        if (val == CPUFREQ_POSTCHANGE) {
1160                spin_lock_irqsave(&mmc->lock, flags);
1161                host->mmc_input_clk = mmc_pclk;
1162                calculate_clk_divider(mmc, &mmc->ios);
1163                spin_unlock_irqrestore(&mmc->lock, flags);
1164        }
1165
1166        return 0;
1167}
1168
1169static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1170{
1171        host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
1172
1173        return cpufreq_register_notifier(&host->freq_transition,
1174                                         CPUFREQ_TRANSITION_NOTIFIER);
1175}
1176
1177static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1178{
1179        cpufreq_unregister_notifier(&host->freq_transition,
1180                                    CPUFREQ_TRANSITION_NOTIFIER);
1181}
1182#else
1183static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1184{
1185        return 0;
1186}
1187
1188static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1189{
1190}
1191#endif
1192static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1193{
1194
1195        mmc_davinci_reset_ctrl(host, 1);
1196
1197        writel(0, host->base + DAVINCI_MMCCLK);
1198        writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
1199
1200        writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1201        writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1202
1203        mmc_davinci_reset_ctrl(host, 0);
1204}
1205
1206static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1207{
1208        struct davinci_mmc_config *pdata = pdev->dev.platform_data;
1209        struct mmc_davinci_host *host = NULL;
1210        struct mmc_host *mmc = NULL;
1211        struct resource *r, *mem = NULL;
1212        int ret = 0, irq = 0;
1213        size_t mem_size;
1214
1215        /* REVISIT:  when we're fully converted, fail if pdata is NULL */
1216
1217        ret = -ENODEV;
1218        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1219        irq = platform_get_irq(pdev, 0);
1220        if (!r || irq == NO_IRQ)
1221                goto out;
1222
1223        ret = -EBUSY;
1224        mem_size = resource_size(r);
1225        mem = request_mem_region(r->start, mem_size, pdev->name);
1226        if (!mem)
1227                goto out;
1228
1229        ret = -ENOMEM;
1230        mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
1231        if (!mmc)
1232                goto out;
1233
1234        host = mmc_priv(mmc);
1235        host->mmc = mmc;        /* Important */
1236
1237        r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1238        if (!r)
1239                goto out;
1240        host->rxdma = r->start;
1241
1242        r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1243        if (!r)
1244                goto out;
1245        host->txdma = r->start;
1246
1247        host->mem_res = mem;
1248        host->base = ioremap(mem->start, mem_size);
1249        if (!host->base)
1250                goto out;
1251
1252        ret = -ENXIO;
1253        host->clk = clk_get(&pdev->dev, "MMCSDCLK");
1254        if (IS_ERR(host->clk)) {
1255                ret = PTR_ERR(host->clk);
1256                goto out;
1257        }
1258        clk_enable(host->clk);
1259        host->mmc_input_clk = clk_get_rate(host->clk);
1260
1261        init_mmcsd_host(host);
1262
1263        if (pdata->nr_sg)
1264                host->nr_sg = pdata->nr_sg - 1;
1265
1266        if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1267                host->nr_sg = MAX_NR_SG;
1268
1269        host->use_dma = use_dma;
1270        host->mmc_irq = irq;
1271        host->sdio_irq = platform_get_irq(pdev, 1);
1272
1273        if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1274                host->use_dma = 0;
1275
1276        /* REVISIT:  someday, support IRQ-driven card detection.  */
1277        mmc->caps |= MMC_CAP_NEEDS_POLL;
1278        mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1279
1280        if (pdata && (pdata->wires == 4 || pdata->wires == 0))
1281                mmc->caps |= MMC_CAP_4_BIT_DATA;
1282
1283        if (pdata && (pdata->wires == 8))
1284                mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
1285
1286        host->version = pdata->version;
1287
1288        mmc->ops = &mmc_davinci_ops;
1289        mmc->f_min = 312500;
1290        mmc->f_max = 25000000;
1291        if (pdata && pdata->max_freq)
1292                mmc->f_max = pdata->max_freq;
1293        if (pdata && pdata->caps)
1294                mmc->caps |= pdata->caps;
1295        mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1296
1297        /* With no iommu coalescing pages, each phys_seg is a hw_seg.
1298         * Each hw_seg uses one EDMA parameter RAM slot, always one
1299         * channel and then usually some linked slots.
1300         */
1301        mmc->max_segs           = 1 + host->n_link;
1302
1303        /* EDMA limit per hw segment (one or two MBytes) */
1304        mmc->max_seg_size       = MAX_CCNT * rw_threshold;
1305
1306        /* MMC/SD controller limits for multiblock requests */
1307        mmc->max_blk_size       = 4095;  /* BLEN is 12 bits */
1308        mmc->max_blk_count      = 65535; /* NBLK is 16 bits */
1309        mmc->max_req_size       = mmc->max_blk_size * mmc->max_blk_count;
1310
1311        dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
1312        dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
1313        dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
1314        dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
1315
1316        platform_set_drvdata(pdev, host);
1317
1318        ret = mmc_davinci_cpufreq_register(host);
1319        if (ret) {
1320                dev_err(&pdev->dev, "failed to register cpufreq\n");
1321                goto cpu_freq_fail;
1322        }
1323
1324        ret = mmc_add_host(mmc);
1325        if (ret < 0)
1326                goto out;
1327
1328        ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
1329        if (ret)
1330                goto out;
1331
1332        if (host->sdio_irq >= 0) {
1333                ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
1334                                  mmc_hostname(mmc), host);
1335                if (!ret)
1336                        mmc->caps |= MMC_CAP_SDIO_IRQ;
1337        }
1338
1339        rename_region(mem, mmc_hostname(mmc));
1340
1341        dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
1342                host->use_dma ? "DMA" : "PIO",
1343                (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1344
1345        return 0;
1346
1347out:
1348        mmc_davinci_cpufreq_deregister(host);
1349cpu_freq_fail:
1350        if (host) {
1351                davinci_release_dma_channels(host);
1352
1353                if (host->clk) {
1354                        clk_disable(host->clk);
1355                        clk_put(host->clk);
1356                }
1357
1358                if (host->base)
1359                        iounmap(host->base);
1360        }
1361
1362        if (mmc)
1363                mmc_free_host(mmc);
1364
1365        if (mem)
1366                release_resource(mem);
1367
1368        dev_dbg(&pdev->dev, "probe err %d\n", ret);
1369
1370        return ret;
1371}
1372
1373static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1374{
1375        struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1376
1377        platform_set_drvdata(pdev, NULL);
1378        if (host) {
1379                mmc_davinci_cpufreq_deregister(host);
1380
1381                mmc_remove_host(host->mmc);
1382                free_irq(host->mmc_irq, host);
1383                if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1384                        free_irq(host->sdio_irq, host);
1385
1386                davinci_release_dma_channels(host);
1387
1388                clk_disable(host->clk);
1389                clk_put(host->clk);
1390
1391                iounmap(host->base);
1392
1393                release_resource(host->mem_res);
1394
1395                mmc_free_host(host->mmc);
1396        }
1397
1398        return 0;
1399}
1400
1401#ifdef CONFIG_PM
1402static int davinci_mmcsd_suspend(struct device *dev)
1403{
1404        struct platform_device *pdev = to_platform_device(dev);
1405        struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1406        int ret;
1407
1408        mmc_host_enable(host->mmc);
1409        ret = mmc_suspend_host(host->mmc);
1410        if (!ret) {
1411                writel(0, host->base + DAVINCI_MMCIM);
1412                mmc_davinci_reset_ctrl(host, 1);
1413                mmc_host_disable(host->mmc);
1414                clk_disable(host->clk);
1415                host->suspended = 1;
1416        } else {
1417                host->suspended = 0;
1418                mmc_host_disable(host->mmc);
1419        }
1420
1421        return ret;
1422}
1423
1424static int davinci_mmcsd_resume(struct device *dev)
1425{
1426        struct platform_device *pdev = to_platform_device(dev);
1427        struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1428        int ret;
1429
1430        if (!host->suspended)
1431                return 0;
1432
1433        clk_enable(host->clk);
1434        mmc_host_enable(host->mmc);
1435
1436        mmc_davinci_reset_ctrl(host, 0);
1437        ret = mmc_resume_host(host->mmc);
1438        if (!ret)
1439                host->suspended = 0;
1440
1441        return ret;
1442}
1443
1444static const struct dev_pm_ops davinci_mmcsd_pm = {
1445        .suspend        = davinci_mmcsd_suspend,
1446        .resume         = davinci_mmcsd_resume,
1447};
1448
1449#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1450#else
1451#define davinci_mmcsd_pm_ops NULL
1452#endif
1453
1454static struct platform_driver davinci_mmcsd_driver = {
1455        .driver         = {
1456                .name   = "davinci_mmc",
1457                .owner  = THIS_MODULE,
1458                .pm     = davinci_mmcsd_pm_ops,
1459        },
1460        .remove         = __exit_p(davinci_mmcsd_remove),
1461};
1462
1463static int __init davinci_mmcsd_init(void)
1464{
1465        return platform_driver_probe(&davinci_mmcsd_driver,
1466                                     davinci_mmcsd_probe);
1467}
1468module_init(davinci_mmcsd_init);
1469
1470static void __exit davinci_mmcsd_exit(void)
1471{
1472        platform_driver_unregister(&davinci_mmcsd_driver);
1473}
1474module_exit(davinci_mmcsd_exit);
1475
1476MODULE_AUTHOR("Texas Instruments India");
1477MODULE_LICENSE("GPL");
1478MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1479
1480