linux/drivers/mmc/host/sh_mmcif.c
<<
>>
Prefs
   1/*
   2 * MMCIF eMMC driver.
   3 *
   4 * Copyright (C) 2010 Renesas Solutions Corp.
   5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License.
  10 *
  11 *
  12 * TODO
  13 *  1. DMA
  14 *  2. Power management
  15 *  3. Handle MMC errors better
  16 *
  17 */
  18
  19/*
  20 * The MMCIF driver is now processing MMC requests asynchronously, according
  21 * to the Linux MMC API requirement.
  22 *
  23 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
  24 * data, and optional stop. To achieve asynchronous processing each of these
  25 * stages is split into two halves: a top and a bottom half. The top half
  26 * initialises the hardware, installs a timeout handler to handle completion
  27 * timeouts, and returns. In case of the command stage this immediately returns
  28 * control to the caller, leaving all further processing to run asynchronously.
  29 * All further request processing is performed by the bottom halves.
  30 *
  31 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
  32 * thread, a DMA completion callback, if DMA is used, a timeout work, and
  33 * request- and stage-specific handler methods.
  34 *
  35 * Each bottom half run begins with either a hardware interrupt, a DMA callback
  36 * invocation, or a timeout work run. In case of an error or a successful
  37 * processing completion, the MMC core is informed and the request processing is
  38 * finished. In case processing has to continue, i.e., if data has to be read
  39 * from or written to the card, or if a stop command has to be sent, the next
  40 * top half is called, which performs the necessary hardware handling and
  41 * reschedules the timeout work. This returns the driver state machine into the
  42 * bottom half waiting state.
  43 */
  44
  45#include <linux/bitops.h>
  46#include <linux/clk.h>
  47#include <linux/completion.h>
  48#include <linux/delay.h>
  49#include <linux/dma-mapping.h>
  50#include <linux/dmaengine.h>
  51#include <linux/mmc/card.h>
  52#include <linux/mmc/core.h>
  53#include <linux/mmc/host.h>
  54#include <linux/mmc/mmc.h>
  55#include <linux/mmc/sdio.h>
  56#include <linux/mmc/sh_mmcif.h>
  57#include <linux/mmc/slot-gpio.h>
  58#include <linux/mod_devicetable.h>
  59#include <linux/mutex.h>
  60#include <linux/pagemap.h>
  61#include <linux/platform_device.h>
  62#include <linux/pm_qos.h>
  63#include <linux/pm_runtime.h>
  64#include <linux/sh_dma.h>
  65#include <linux/spinlock.h>
  66#include <linux/module.h>
  67
  68#define DRIVER_NAME     "sh_mmcif"
  69#define DRIVER_VERSION  "2010-04-28"
  70
  71/* CE_CMD_SET */
  72#define CMD_MASK                0x3f000000
  73#define CMD_SET_RTYP_NO         ((0 << 23) | (0 << 22))
  74#define CMD_SET_RTYP_6B         ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
  75#define CMD_SET_RTYP_17B        ((1 << 23) | (0 << 22)) /* R2 */
  76#define CMD_SET_RBSY            (1 << 21) /* R1b */
  77#define CMD_SET_CCSEN           (1 << 20)
  78#define CMD_SET_WDAT            (1 << 19) /* 1: on data, 0: no data */
  79#define CMD_SET_DWEN            (1 << 18) /* 1: write, 0: read */
  80#define CMD_SET_CMLTE           (1 << 17) /* 1: multi block trans, 0: single */
  81#define CMD_SET_CMD12EN         (1 << 16) /* 1: CMD12 auto issue */
  82#define CMD_SET_RIDXC_INDEX     ((0 << 15) | (0 << 14)) /* index check */
  83#define CMD_SET_RIDXC_BITS      ((0 << 15) | (1 << 14)) /* check bits check */
  84#define CMD_SET_RIDXC_NO        ((1 << 15) | (0 << 14)) /* no check */
  85#define CMD_SET_CRC7C           ((0 << 13) | (0 << 12)) /* CRC7 check*/
  86#define CMD_SET_CRC7C_BITS      ((0 << 13) | (1 << 12)) /* check bits check*/
  87#define CMD_SET_CRC7C_INTERNAL  ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
  88#define CMD_SET_CRC16C          (1 << 10) /* 0: CRC16 check*/
  89#define CMD_SET_CRCSTE          (1 << 8) /* 1: not receive CRC status */
  90#define CMD_SET_TBIT            (1 << 7) /* 1: tran mission bit "Low" */
  91#define CMD_SET_OPDM            (1 << 6) /* 1: open/drain */
  92#define CMD_SET_CCSH            (1 << 5)
  93#define CMD_SET_DARS            (1 << 2) /* Dual Data Rate */
  94#define CMD_SET_DATW_1          ((0 << 1) | (0 << 0)) /* 1bit */
  95#define CMD_SET_DATW_4          ((0 << 1) | (1 << 0)) /* 4bit */
  96#define CMD_SET_DATW_8          ((1 << 1) | (0 << 0)) /* 8bit */
  97
  98/* CE_CMD_CTRL */
  99#define CMD_CTRL_BREAK          (1 << 0)
 100
 101/* CE_BLOCK_SET */
 102#define BLOCK_SIZE_MASK         0x0000ffff
 103
 104/* CE_INT */
 105#define INT_CCSDE               (1 << 29)
 106#define INT_CMD12DRE            (1 << 26)
 107#define INT_CMD12RBE            (1 << 25)
 108#define INT_CMD12CRE            (1 << 24)
 109#define INT_DTRANE              (1 << 23)
 110#define INT_BUFRE               (1 << 22)
 111#define INT_BUFWEN              (1 << 21)
 112#define INT_BUFREN              (1 << 20)
 113#define INT_CCSRCV              (1 << 19)
 114#define INT_RBSYE               (1 << 17)
 115#define INT_CRSPE               (1 << 16)
 116#define INT_CMDVIO              (1 << 15)
 117#define INT_BUFVIO              (1 << 14)
 118#define INT_WDATERR             (1 << 11)
 119#define INT_RDATERR             (1 << 10)
 120#define INT_RIDXERR             (1 << 9)
 121#define INT_RSPERR              (1 << 8)
 122#define INT_CCSTO               (1 << 5)
 123#define INT_CRCSTO              (1 << 4)
 124#define INT_WDATTO              (1 << 3)
 125#define INT_RDATTO              (1 << 2)
 126#define INT_RBSYTO              (1 << 1)
 127#define INT_RSPTO               (1 << 0)
 128#define INT_ERR_STS             (INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
 129                                 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
 130                                 INT_CCSTO | INT_CRCSTO | INT_WDATTO |    \
 131                                 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
 132
 133#define INT_ALL                 (INT_RBSYE | INT_CRSPE | INT_BUFREN |    \
 134                                 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
 135                                 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
 136
 137#define INT_CCS                 (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
 138
 139/* CE_INT_MASK */
 140#define MASK_ALL                0x00000000
 141#define MASK_MCCSDE             (1 << 29)
 142#define MASK_MCMD12DRE          (1 << 26)
 143#define MASK_MCMD12RBE          (1 << 25)
 144#define MASK_MCMD12CRE          (1 << 24)
 145#define MASK_MDTRANE            (1 << 23)
 146#define MASK_MBUFRE             (1 << 22)
 147#define MASK_MBUFWEN            (1 << 21)
 148#define MASK_MBUFREN            (1 << 20)
 149#define MASK_MCCSRCV            (1 << 19)
 150#define MASK_MRBSYE             (1 << 17)
 151#define MASK_MCRSPE             (1 << 16)
 152#define MASK_MCMDVIO            (1 << 15)
 153#define MASK_MBUFVIO            (1 << 14)
 154#define MASK_MWDATERR           (1 << 11)
 155#define MASK_MRDATERR           (1 << 10)
 156#define MASK_MRIDXERR           (1 << 9)
 157#define MASK_MRSPERR            (1 << 8)
 158#define MASK_MCCSTO             (1 << 5)
 159#define MASK_MCRCSTO            (1 << 4)
 160#define MASK_MWDATTO            (1 << 3)
 161#define MASK_MRDATTO            (1 << 2)
 162#define MASK_MRBSYTO            (1 << 1)
 163#define MASK_MRSPTO             (1 << 0)
 164
 165#define MASK_START_CMD          (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
 166                                 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
 167                                 MASK_MCRCSTO | MASK_MWDATTO | \
 168                                 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
 169
 170#define MASK_CLEAN              (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |      \
 171                                 MASK_MBUFREN | MASK_MBUFWEN |                  \
 172                                 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |  \
 173                                 MASK_MCMD12RBE | MASK_MCMD12CRE)
 174
 175/* CE_HOST_STS1 */
 176#define STS1_CMDSEQ             (1 << 31)
 177
 178/* CE_HOST_STS2 */
 179#define STS2_CRCSTE             (1 << 31)
 180#define STS2_CRC16E             (1 << 30)
 181#define STS2_AC12CRCE           (1 << 29)
 182#define STS2_RSPCRC7E           (1 << 28)
 183#define STS2_CRCSTEBE           (1 << 27)
 184#define STS2_RDATEBE            (1 << 26)
 185#define STS2_AC12REBE           (1 << 25)
 186#define STS2_RSPEBE             (1 << 24)
 187#define STS2_AC12IDXE           (1 << 23)
 188#define STS2_RSPIDXE            (1 << 22)
 189#define STS2_CCSTO              (1 << 15)
 190#define STS2_RDATTO             (1 << 14)
 191#define STS2_DATBSYTO           (1 << 13)
 192#define STS2_CRCSTTO            (1 << 12)
 193#define STS2_AC12BSYTO          (1 << 11)
 194#define STS2_RSPBSYTO           (1 << 10)
 195#define STS2_AC12RSPTO          (1 << 9)
 196#define STS2_RSPTO              (1 << 8)
 197#define STS2_CRC_ERR            (STS2_CRCSTE | STS2_CRC16E |            \
 198                                 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
 199#define STS2_TIMEOUT_ERR        (STS2_CCSTO | STS2_RDATTO |             \
 200                                 STS2_DATBSYTO | STS2_CRCSTTO |         \
 201                                 STS2_AC12BSYTO | STS2_RSPBSYTO |       \
 202                                 STS2_AC12RSPTO | STS2_RSPTO)
 203
 204#define CLKDEV_EMMC_DATA        52000000 /* 52MHz */
 205#define CLKDEV_MMC_DATA         20000000 /* 20MHz */
 206#define CLKDEV_INIT             400000   /* 400 KHz */
 207
 208enum mmcif_state {
 209        STATE_IDLE,
 210        STATE_REQUEST,
 211        STATE_IOS,
 212        STATE_TIMEOUT,
 213};
 214
 215enum mmcif_wait_for {
 216        MMCIF_WAIT_FOR_REQUEST,
 217        MMCIF_WAIT_FOR_CMD,
 218        MMCIF_WAIT_FOR_MREAD,
 219        MMCIF_WAIT_FOR_MWRITE,
 220        MMCIF_WAIT_FOR_READ,
 221        MMCIF_WAIT_FOR_WRITE,
 222        MMCIF_WAIT_FOR_READ_END,
 223        MMCIF_WAIT_FOR_WRITE_END,
 224        MMCIF_WAIT_FOR_STOP,
 225};
 226
 227struct sh_mmcif_host {
 228        struct mmc_host *mmc;
 229        struct mmc_request *mrq;
 230        struct platform_device *pd;
 231        struct clk *hclk;
 232        unsigned int clk;
 233        int bus_width;
 234        unsigned char timing;
 235        bool sd_error;
 236        bool dying;
 237        long timeout;
 238        void __iomem *addr;
 239        u32 *pio_ptr;
 240        spinlock_t lock;                /* protect sh_mmcif_host::state */
 241        enum mmcif_state state;
 242        enum mmcif_wait_for wait_for;
 243        struct delayed_work timeout_work;
 244        size_t blocksize;
 245        int sg_idx;
 246        int sg_blkidx;
 247        bool power;
 248        bool card_present;
 249        bool ccs_enable;                /* Command Completion Signal support */
 250        bool clk_ctrl2_enable;
 251        struct mutex thread_lock;
 252
 253        /* DMA support */
 254        struct dma_chan         *chan_rx;
 255        struct dma_chan         *chan_tx;
 256        struct completion       dma_complete;
 257        bool                    dma_active;
 258};
 259
 260static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 261                                        unsigned int reg, u32 val)
 262{
 263        writel(val | readl(host->addr + reg), host->addr + reg);
 264}
 265
 266static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
 267                                        unsigned int reg, u32 val)
 268{
 269        writel(~val & readl(host->addr + reg), host->addr + reg);
 270}
 271
 272static void mmcif_dma_complete(void *arg)
 273{
 274        struct sh_mmcif_host *host = arg;
 275        struct mmc_request *mrq = host->mrq;
 276
 277        dev_dbg(&host->pd->dev, "Command completed\n");
 278
 279        if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
 280                 dev_name(&host->pd->dev)))
 281                return;
 282
 283        complete(&host->dma_complete);
 284}
 285
 286static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 287{
 288        struct mmc_data *data = host->mrq->data;
 289        struct scatterlist *sg = data->sg;
 290        struct dma_async_tx_descriptor *desc = NULL;
 291        struct dma_chan *chan = host->chan_rx;
 292        dma_cookie_t cookie = -EINVAL;
 293        int ret;
 294
 295        ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 296                         DMA_FROM_DEVICE);
 297        if (ret > 0) {
 298                host->dma_active = true;
 299                desc = dmaengine_prep_slave_sg(chan, sg, ret,
 300                        DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 301        }
 302
 303        if (desc) {
 304                desc->callback = mmcif_dma_complete;
 305                desc->callback_param = host;
 306                cookie = dmaengine_submit(desc);
 307                sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
 308                dma_async_issue_pending(chan);
 309        }
 310        dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
 311                __func__, data->sg_len, ret, cookie);
 312
 313        if (!desc) {
 314                /* DMA failed, fall back to PIO */
 315                if (ret >= 0)
 316                        ret = -EIO;
 317                host->chan_rx = NULL;
 318                host->dma_active = false;
 319                dma_release_channel(chan);
 320                /* Free the Tx channel too */
 321                chan = host->chan_tx;
 322                if (chan) {
 323                        host->chan_tx = NULL;
 324                        dma_release_channel(chan);
 325                }
 326                dev_warn(&host->pd->dev,
 327                         "DMA failed: %d, falling back to PIO\n", ret);
 328                sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 329        }
 330
 331        dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
 332                desc, cookie, data->sg_len);
 333}
 334
 335static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 336{
 337        struct mmc_data *data = host->mrq->data;
 338        struct scatterlist *sg = data->sg;
 339        struct dma_async_tx_descriptor *desc = NULL;
 340        struct dma_chan *chan = host->chan_tx;
 341        dma_cookie_t cookie = -EINVAL;
 342        int ret;
 343
 344        ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 345                         DMA_TO_DEVICE);
 346        if (ret > 0) {
 347                host->dma_active = true;
 348                desc = dmaengine_prep_slave_sg(chan, sg, ret,
 349                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 350        }
 351
 352        if (desc) {
 353                desc->callback = mmcif_dma_complete;
 354                desc->callback_param = host;
 355                cookie = dmaengine_submit(desc);
 356                sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
 357                dma_async_issue_pending(chan);
 358        }
 359        dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
 360                __func__, data->sg_len, ret, cookie);
 361
 362        if (!desc) {
 363                /* DMA failed, fall back to PIO */
 364                if (ret >= 0)
 365                        ret = -EIO;
 366                host->chan_tx = NULL;
 367                host->dma_active = false;
 368                dma_release_channel(chan);
 369                /* Free the Rx channel too */
 370                chan = host->chan_rx;
 371                if (chan) {
 372                        host->chan_rx = NULL;
 373                        dma_release_channel(chan);
 374                }
 375                dev_warn(&host->pd->dev,
 376                         "DMA failed: %d, falling back to PIO\n", ret);
 377                sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 378        }
 379
 380        dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
 381                desc, cookie);
 382}
 383
 384static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
 385                                 struct sh_mmcif_plat_data *pdata)
 386{
 387        struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
 388        struct dma_slave_config cfg;
 389        dma_cap_mask_t mask;
 390        int ret;
 391
 392        host->dma_active = false;
 393
 394        if (pdata) {
 395                if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
 396                        return;
 397        } else if (!host->pd->dev.of_node) {
 398                return;
 399        }
 400
 401        /* We can only either use DMA for both Tx and Rx or not use it at all */
 402        dma_cap_zero(mask);
 403        dma_cap_set(DMA_SLAVE, mask);
 404
 405        host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
 406                                pdata ? (void *)pdata->slave_id_tx : NULL,
 407                                &host->pd->dev, "tx");
 408        dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
 409                host->chan_tx);
 410
 411        if (!host->chan_tx)
 412                return;
 413
 414        /* In the OF case the driver will get the slave ID from the DT */
 415        if (pdata)
 416                cfg.slave_id = pdata->slave_id_tx;
 417        cfg.direction = DMA_MEM_TO_DEV;
 418        cfg.dst_addr = res->start + MMCIF_CE_DATA;
 419        cfg.src_addr = 0;
 420        ret = dmaengine_slave_config(host->chan_tx, &cfg);
 421        if (ret < 0)
 422                goto ecfgtx;
 423
 424        host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
 425                                pdata ? (void *)pdata->slave_id_rx : NULL,
 426                                &host->pd->dev, "rx");
 427        dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
 428                host->chan_rx);
 429
 430        if (!host->chan_rx)
 431                goto erqrx;
 432
 433        if (pdata)
 434                cfg.slave_id = pdata->slave_id_rx;
 435        cfg.direction = DMA_DEV_TO_MEM;
 436        cfg.dst_addr = 0;
 437        cfg.src_addr = res->start + MMCIF_CE_DATA;
 438        ret = dmaengine_slave_config(host->chan_rx, &cfg);
 439        if (ret < 0)
 440                goto ecfgrx;
 441
 442        return;
 443
 444ecfgrx:
 445        dma_release_channel(host->chan_rx);
 446        host->chan_rx = NULL;
 447erqrx:
 448ecfgtx:
 449        dma_release_channel(host->chan_tx);
 450        host->chan_tx = NULL;
 451}
 452
 453static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
 454{
 455        sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 456        /* Descriptors are freed automatically */
 457        if (host->chan_tx) {
 458                struct dma_chan *chan = host->chan_tx;
 459                host->chan_tx = NULL;
 460                dma_release_channel(chan);
 461        }
 462        if (host->chan_rx) {
 463                struct dma_chan *chan = host->chan_rx;
 464                host->chan_rx = NULL;
 465                dma_release_channel(chan);
 466        }
 467
 468        host->dma_active = false;
 469}
 470
 471static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 472{
 473        struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
 474        bool sup_pclk = p ? p->sup_pclk : false;
 475
 476        sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 477        sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
 478
 479        if (!clk)
 480                return;
 481        if (sup_pclk && clk == host->clk)
 482                sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
 483        else
 484                sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
 485                                ((fls(DIV_ROUND_UP(host->clk,
 486                                                   clk) - 1) - 1) << 16));
 487
 488        sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 489}
 490
 491static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 492{
 493        u32 tmp;
 494
 495        tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
 496
 497        sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
 498        sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
 499        if (host->ccs_enable)
 500                tmp |= SCCSTO_29;
 501        if (host->clk_ctrl2_enable)
 502                sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
 503        sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
 504                SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
 505        /* byte swap on */
 506        sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
 507}
 508
 509static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 510{
 511        u32 state1, state2;
 512        int ret, timeout;
 513
 514        host->sd_error = false;
 515
 516        state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 517        state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
 518        dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
 519        dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
 520
 521        if (state1 & STS1_CMDSEQ) {
 522                sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 523                sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
 524                for (timeout = 10000000; timeout; timeout--) {
 525                        if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
 526                              & STS1_CMDSEQ))
 527                                break;
 528                        mdelay(1);
 529                }
 530                if (!timeout) {
 531                        dev_err(&host->pd->dev,
 532                                "Forced end of command sequence timeout err\n");
 533                        return -EIO;
 534                }
 535                sh_mmcif_sync_reset(host);
 536                dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
 537                return -EIO;
 538        }
 539
 540        if (state2 & STS2_CRC_ERR) {
 541                dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
 542                        host->state, host->wait_for);
 543                ret = -EIO;
 544        } else if (state2 & STS2_TIMEOUT_ERR) {
 545                dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
 546                        host->state, host->wait_for);
 547                ret = -ETIMEDOUT;
 548        } else {
 549                dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
 550                        host->state, host->wait_for);
 551                ret = -EIO;
 552        }
 553        return ret;
 554}
 555
 556static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 557{
 558        struct mmc_data *data = host->mrq->data;
 559
 560        host->sg_blkidx += host->blocksize;
 561
 562        /* data->sg->length must be a multiple of host->blocksize? */
 563        BUG_ON(host->sg_blkidx > data->sg->length);
 564
 565        if (host->sg_blkidx == data->sg->length) {
 566                host->sg_blkidx = 0;
 567                if (++host->sg_idx < data->sg_len)
 568                        host->pio_ptr = sg_virt(++data->sg);
 569        } else {
 570                host->pio_ptr = p;
 571        }
 572
 573        return host->sg_idx != data->sg_len;
 574}
 575
 576static void sh_mmcif_single_read(struct sh_mmcif_host *host,
 577                                 struct mmc_request *mrq)
 578{
 579        host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 580                           BLOCK_SIZE_MASK) + 3;
 581
 582        host->wait_for = MMCIF_WAIT_FOR_READ;
 583
 584        /* buf read enable */
 585        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 586}
 587
 588static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 589{
 590        struct mmc_data *data = host->mrq->data;
 591        u32 *p = sg_virt(data->sg);
 592        int i;
 593
 594        if (host->sd_error) {
 595                data->error = sh_mmcif_error_manage(host);
 596                dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
 597                return false;
 598        }
 599
 600        for (i = 0; i < host->blocksize / 4; i++)
 601                *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 602
 603        /* buffer read end */
 604        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
 605        host->wait_for = MMCIF_WAIT_FOR_READ_END;
 606
 607        return true;
 608}
 609
 610static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
 611                                struct mmc_request *mrq)
 612{
 613        struct mmc_data *data = mrq->data;
 614
 615        if (!data->sg_len || !data->sg->length)
 616                return;
 617
 618        host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 619                BLOCK_SIZE_MASK;
 620
 621        host->wait_for = MMCIF_WAIT_FOR_MREAD;
 622        host->sg_idx = 0;
 623        host->sg_blkidx = 0;
 624        host->pio_ptr = sg_virt(data->sg);
 625
 626        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 627}
 628
 629static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 630{
 631        struct mmc_data *data = host->mrq->data;
 632        u32 *p = host->pio_ptr;
 633        int i;
 634
 635        if (host->sd_error) {
 636                data->error = sh_mmcif_error_manage(host);
 637                dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
 638                return false;
 639        }
 640
 641        BUG_ON(!data->sg->length);
 642
 643        for (i = 0; i < host->blocksize / 4; i++)
 644                *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 645
 646        if (!sh_mmcif_next_block(host, p))
 647                return false;
 648
 649        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 650
 651        return true;
 652}
 653
 654static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 655                                        struct mmc_request *mrq)
 656{
 657        host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 658                           BLOCK_SIZE_MASK) + 3;
 659
 660        host->wait_for = MMCIF_WAIT_FOR_WRITE;
 661
 662        /* buf write enable */
 663        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 664}
 665
 666static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 667{
 668        struct mmc_data *data = host->mrq->data;
 669        u32 *p = sg_virt(data->sg);
 670        int i;
 671
 672        if (host->sd_error) {
 673                data->error = sh_mmcif_error_manage(host);
 674                dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
 675                return false;
 676        }
 677
 678        for (i = 0; i < host->blocksize / 4; i++)
 679                sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 680
 681        /* buffer write end */
 682        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
 683        host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 684
 685        return true;
 686}
 687
 688static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
 689                                struct mmc_request *mrq)
 690{
 691        struct mmc_data *data = mrq->data;
 692
 693        if (!data->sg_len || !data->sg->length)
 694                return;
 695
 696        host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 697                BLOCK_SIZE_MASK;
 698
 699        host->wait_for = MMCIF_WAIT_FOR_MWRITE;
 700        host->sg_idx = 0;
 701        host->sg_blkidx = 0;
 702        host->pio_ptr = sg_virt(data->sg);
 703
 704        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 705}
 706
 707static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
 708{
 709        struct mmc_data *data = host->mrq->data;
 710        u32 *p = host->pio_ptr;
 711        int i;
 712
 713        if (host->sd_error) {
 714                data->error = sh_mmcif_error_manage(host);
 715                dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
 716                return false;
 717        }
 718
 719        BUG_ON(!data->sg->length);
 720
 721        for (i = 0; i < host->blocksize / 4; i++)
 722                sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 723
 724        if (!sh_mmcif_next_block(host, p))
 725                return false;
 726
 727        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 728
 729        return true;
 730}
 731
 732static void sh_mmcif_get_response(struct sh_mmcif_host *host,
 733                                                struct mmc_command *cmd)
 734{
 735        if (cmd->flags & MMC_RSP_136) {
 736                cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
 737                cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
 738                cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
 739                cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 740        } else
 741                cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 742}
 743
 744static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 745                                                struct mmc_command *cmd)
 746{
 747        cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
 748}
 749
 750static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 751                            struct mmc_request *mrq)
 752{
 753        struct mmc_data *data = mrq->data;
 754        struct mmc_command *cmd = mrq->cmd;
 755        u32 opc = cmd->opcode;
 756        u32 tmp = 0;
 757
 758        /* Response Type check */
 759        switch (mmc_resp_type(cmd)) {
 760        case MMC_RSP_NONE:
 761                tmp |= CMD_SET_RTYP_NO;
 762                break;
 763        case MMC_RSP_R1:
 764        case MMC_RSP_R1B:
 765        case MMC_RSP_R3:
 766                tmp |= CMD_SET_RTYP_6B;
 767                break;
 768        case MMC_RSP_R2:
 769                tmp |= CMD_SET_RTYP_17B;
 770                break;
 771        default:
 772                dev_err(&host->pd->dev, "Unsupported response type.\n");
 773                break;
 774        }
 775        switch (opc) {
 776        /* RBSY */
 777        case MMC_SLEEP_AWAKE:
 778        case MMC_SWITCH:
 779        case MMC_STOP_TRANSMISSION:
 780        case MMC_SET_WRITE_PROT:
 781        case MMC_CLR_WRITE_PROT:
 782        case MMC_ERASE:
 783                tmp |= CMD_SET_RBSY;
 784                break;
 785        }
 786        /* WDAT / DATW */
 787        if (data) {
 788                tmp |= CMD_SET_WDAT;
 789                switch (host->bus_width) {
 790                case MMC_BUS_WIDTH_1:
 791                        tmp |= CMD_SET_DATW_1;
 792                        break;
 793                case MMC_BUS_WIDTH_4:
 794                        tmp |= CMD_SET_DATW_4;
 795                        break;
 796                case MMC_BUS_WIDTH_8:
 797                        tmp |= CMD_SET_DATW_8;
 798                        break;
 799                default:
 800                        dev_err(&host->pd->dev, "Unsupported bus width.\n");
 801                        break;
 802                }
 803                switch (host->timing) {
 804                case MMC_TIMING_UHS_DDR50:
 805                        /*
 806                         * MMC core will only set this timing, if the host
 807                         * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
 808                         * implementations with this capability, e.g. sh73a0,
 809                         * will have to set it in their platform data.
 810                         */
 811                        tmp |= CMD_SET_DARS;
 812                        break;
 813                }
 814        }
 815        /* DWEN */
 816        if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
 817                tmp |= CMD_SET_DWEN;
 818        /* CMLTE/CMD12EN */
 819        if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
 820                tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
 821                sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
 822                                data->blocks << 16);
 823        }
 824        /* RIDXC[1:0] check bits */
 825        if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
 826            opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 827                tmp |= CMD_SET_RIDXC_BITS;
 828        /* RCRC7C[1:0] check bits */
 829        if (opc == MMC_SEND_OP_COND)
 830                tmp |= CMD_SET_CRC7C_BITS;
 831        /* RCRC7C[1:0] internal CRC7 */
 832        if (opc == MMC_ALL_SEND_CID ||
 833                opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 834                tmp |= CMD_SET_CRC7C_INTERNAL;
 835
 836        return (opc << 24) | tmp;
 837}
 838
 839static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 840                               struct mmc_request *mrq, u32 opc)
 841{
 842        switch (opc) {
 843        case MMC_READ_MULTIPLE_BLOCK:
 844                sh_mmcif_multi_read(host, mrq);
 845                return 0;
 846        case MMC_WRITE_MULTIPLE_BLOCK:
 847                sh_mmcif_multi_write(host, mrq);
 848                return 0;
 849        case MMC_WRITE_BLOCK:
 850                sh_mmcif_single_write(host, mrq);
 851                return 0;
 852        case MMC_READ_SINGLE_BLOCK:
 853        case MMC_SEND_EXT_CSD:
 854                sh_mmcif_single_read(host, mrq);
 855                return 0;
 856        default:
 857                dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
 858                return -EINVAL;
 859        }
 860}
 861
 862static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 863                               struct mmc_request *mrq)
 864{
 865        struct mmc_command *cmd = mrq->cmd;
 866        u32 opc = cmd->opcode;
 867        u32 mask;
 868
 869        switch (opc) {
 870        /* response busy check */
 871        case MMC_SLEEP_AWAKE:
 872        case MMC_SWITCH:
 873        case MMC_STOP_TRANSMISSION:
 874        case MMC_SET_WRITE_PROT:
 875        case MMC_CLR_WRITE_PROT:
 876        case MMC_ERASE:
 877                mask = MASK_START_CMD | MASK_MRBSYE;
 878                break;
 879        default:
 880                mask = MASK_START_CMD | MASK_MCRSPE;
 881                break;
 882        }
 883
 884        if (host->ccs_enable)
 885                mask |= MASK_MCCSTO;
 886
 887        if (mrq->data) {
 888                sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 889                sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
 890                                mrq->data->blksz);
 891        }
 892        opc = sh_mmcif_set_cmd(host, mrq);
 893
 894        if (host->ccs_enable)
 895                sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
 896        else
 897                sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
 898        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
 899        /* set arg */
 900        sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
 901        /* set cmd */
 902        sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 903
 904        host->wait_for = MMCIF_WAIT_FOR_CMD;
 905        schedule_delayed_work(&host->timeout_work, host->timeout);
 906}
 907
 908static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 909                              struct mmc_request *mrq)
 910{
 911        switch (mrq->cmd->opcode) {
 912        case MMC_READ_MULTIPLE_BLOCK:
 913                sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
 914                break;
 915        case MMC_WRITE_MULTIPLE_BLOCK:
 916                sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 917                break;
 918        default:
 919                dev_err(&host->pd->dev, "unsupported stop cmd\n");
 920                mrq->stop->error = sh_mmcif_error_manage(host);
 921                return;
 922        }
 923
 924        host->wait_for = MMCIF_WAIT_FOR_STOP;
 925}
 926
 927static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 928{
 929        struct sh_mmcif_host *host = mmc_priv(mmc);
 930        unsigned long flags;
 931
 932        spin_lock_irqsave(&host->lock, flags);
 933        if (host->state != STATE_IDLE) {
 934                dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
 935                spin_unlock_irqrestore(&host->lock, flags);
 936                mrq->cmd->error = -EAGAIN;
 937                mmc_request_done(mmc, mrq);
 938                return;
 939        }
 940
 941        host->state = STATE_REQUEST;
 942        spin_unlock_irqrestore(&host->lock, flags);
 943
 944        switch (mrq->cmd->opcode) {
 945        /* MMCIF does not support SD/SDIO command */
 946        case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
 947        case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
 948                if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
 949                        break;
 950        case MMC_APP_CMD:
 951        case SD_IO_RW_DIRECT:
 952                host->state = STATE_IDLE;
 953                mrq->cmd->error = -ETIMEDOUT;
 954                mmc_request_done(mmc, mrq);
 955                return;
 956        default:
 957                break;
 958        }
 959
 960        host->mrq = mrq;
 961
 962        sh_mmcif_start_cmd(host, mrq);
 963}
 964
 965static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
 966{
 967        int ret = clk_enable(host->hclk);
 968
 969        if (!ret) {
 970                host->clk = clk_get_rate(host->hclk);
 971                host->mmc->f_max = host->clk / 2;
 972                host->mmc->f_min = host->clk / 512;
 973        }
 974
 975        return ret;
 976}
 977
 978static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
 979{
 980        struct mmc_host *mmc = host->mmc;
 981
 982        if (!IS_ERR(mmc->supply.vmmc))
 983                /* Errors ignored... */
 984                mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
 985                                      ios->power_mode ? ios->vdd : 0);
 986}
 987
 988static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 989{
 990        struct sh_mmcif_host *host = mmc_priv(mmc);
 991        unsigned long flags;
 992
 993        spin_lock_irqsave(&host->lock, flags);
 994        if (host->state != STATE_IDLE) {
 995                dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
 996                spin_unlock_irqrestore(&host->lock, flags);
 997                return;
 998        }
 999
1000        host->state = STATE_IOS;
1001        spin_unlock_irqrestore(&host->lock, flags);
1002
1003        if (ios->power_mode == MMC_POWER_UP) {
1004                if (!host->card_present) {
1005                        /* See if we also get DMA */
1006                        sh_mmcif_request_dma(host, host->pd->dev.platform_data);
1007                        host->card_present = true;
1008                }
1009                sh_mmcif_set_power(host, ios);
1010        } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
1011                /* clock stop */
1012                sh_mmcif_clock_control(host, 0);
1013                if (ios->power_mode == MMC_POWER_OFF) {
1014                        if (host->card_present) {
1015                                sh_mmcif_release_dma(host);
1016                                host->card_present = false;
1017                        }
1018                }
1019                if (host->power) {
1020                        pm_runtime_put_sync(&host->pd->dev);
1021                        clk_disable(host->hclk);
1022                        host->power = false;
1023                        if (ios->power_mode == MMC_POWER_OFF)
1024                                sh_mmcif_set_power(host, ios);
1025                }
1026                host->state = STATE_IDLE;
1027                return;
1028        }
1029
1030        if (ios->clock) {
1031                if (!host->power) {
1032                        sh_mmcif_clk_update(host);
1033                        pm_runtime_get_sync(&host->pd->dev);
1034                        host->power = true;
1035                        sh_mmcif_sync_reset(host);
1036                }
1037                sh_mmcif_clock_control(host, ios->clock);
1038        }
1039
1040        host->timing = ios->timing;
1041        host->bus_width = ios->bus_width;
1042        host->state = STATE_IDLE;
1043}
1044
1045static int sh_mmcif_get_cd(struct mmc_host *mmc)
1046{
1047        struct sh_mmcif_host *host = mmc_priv(mmc);
1048        struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
1049        int ret = mmc_gpio_get_cd(mmc);
1050
1051        if (ret >= 0)
1052                return ret;
1053
1054        if (!p || !p->get_cd)
1055                return -ENOSYS;
1056        else
1057                return p->get_cd(host->pd);
1058}
1059
1060static struct mmc_host_ops sh_mmcif_ops = {
1061        .request        = sh_mmcif_request,
1062        .set_ios        = sh_mmcif_set_ios,
1063        .get_cd         = sh_mmcif_get_cd,
1064};
1065
1066static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1067{
1068        struct mmc_command *cmd = host->mrq->cmd;
1069        struct mmc_data *data = host->mrq->data;
1070        long time;
1071
1072        if (host->sd_error) {
1073                switch (cmd->opcode) {
1074                case MMC_ALL_SEND_CID:
1075                case MMC_SELECT_CARD:
1076                case MMC_APP_CMD:
1077                        cmd->error = -ETIMEDOUT;
1078                        break;
1079                default:
1080                        cmd->error = sh_mmcif_error_manage(host);
1081                        break;
1082                }
1083                dev_dbg(&host->pd->dev, "CMD%d error %d\n",
1084                        cmd->opcode, cmd->error);
1085                host->sd_error = false;
1086                return false;
1087        }
1088        if (!(cmd->flags & MMC_RSP_PRESENT)) {
1089                cmd->error = 0;
1090                return false;
1091        }
1092
1093        sh_mmcif_get_response(host, cmd);
1094
1095        if (!data)
1096                return false;
1097
1098        /*
1099         * Completion can be signalled from DMA callback and error, so, have to
1100         * reset here, before setting .dma_active
1101         */
1102        init_completion(&host->dma_complete);
1103
1104        if (data->flags & MMC_DATA_READ) {
1105                if (host->chan_rx)
1106                        sh_mmcif_start_dma_rx(host);
1107        } else {
1108                if (host->chan_tx)
1109                        sh_mmcif_start_dma_tx(host);
1110        }
1111
1112        if (!host->dma_active) {
1113                data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1114                return !data->error;
1115        }
1116
1117        /* Running in the IRQ thread, can sleep */
1118        time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1119                                                         host->timeout);
1120
1121        if (data->flags & MMC_DATA_READ)
1122                dma_unmap_sg(host->chan_rx->device->dev,
1123                             data->sg, data->sg_len,
1124                             DMA_FROM_DEVICE);
1125        else
1126                dma_unmap_sg(host->chan_tx->device->dev,
1127                             data->sg, data->sg_len,
1128                             DMA_TO_DEVICE);
1129
1130        if (host->sd_error) {
1131                dev_err(host->mmc->parent,
1132                        "Error IRQ while waiting for DMA completion!\n");
1133                /* Woken up by an error IRQ: abort DMA */
1134                data->error = sh_mmcif_error_manage(host);
1135        } else if (!time) {
1136                dev_err(host->mmc->parent, "DMA timeout!\n");
1137                data->error = -ETIMEDOUT;
1138        } else if (time < 0) {
1139                dev_err(host->mmc->parent,
1140                        "wait_for_completion_...() error %ld!\n", time);
1141                data->error = time;
1142        }
1143        sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1144                        BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1145        host->dma_active = false;
1146
1147        if (data->error) {
1148                data->bytes_xfered = 0;
1149                /* Abort DMA */
1150                if (data->flags & MMC_DATA_READ)
1151                        dmaengine_terminate_all(host->chan_rx);
1152                else
1153                        dmaengine_terminate_all(host->chan_tx);
1154        }
1155
1156        return false;
1157}
1158
1159static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1160{
1161        struct sh_mmcif_host *host = dev_id;
1162        struct mmc_request *mrq;
1163        bool wait = false;
1164
1165        cancel_delayed_work_sync(&host->timeout_work);
1166
1167        mutex_lock(&host->thread_lock);
1168
1169        mrq = host->mrq;
1170        if (!mrq) {
1171                dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1172                        host->state, host->wait_for);
1173                mutex_unlock(&host->thread_lock);
1174                return IRQ_HANDLED;
1175        }
1176
1177        /*
1178         * All handlers return true, if processing continues, and false, if the
1179         * request has to be completed - successfully or not
1180         */
1181        switch (host->wait_for) {
1182        case MMCIF_WAIT_FOR_REQUEST:
1183                /* We're too late, the timeout has already kicked in */
1184                mutex_unlock(&host->thread_lock);
1185                return IRQ_HANDLED;
1186        case MMCIF_WAIT_FOR_CMD:
1187                /* Wait for data? */
1188                wait = sh_mmcif_end_cmd(host);
1189                break;
1190        case MMCIF_WAIT_FOR_MREAD:
1191                /* Wait for more data? */
1192                wait = sh_mmcif_mread_block(host);
1193                break;
1194        case MMCIF_WAIT_FOR_READ:
1195                /* Wait for data end? */
1196                wait = sh_mmcif_read_block(host);
1197                break;
1198        case MMCIF_WAIT_FOR_MWRITE:
1199                /* Wait data to write? */
1200                wait = sh_mmcif_mwrite_block(host);
1201                break;
1202        case MMCIF_WAIT_FOR_WRITE:
1203                /* Wait for data end? */
1204                wait = sh_mmcif_write_block(host);
1205                break;
1206        case MMCIF_WAIT_FOR_STOP:
1207                if (host->sd_error) {
1208                        mrq->stop->error = sh_mmcif_error_manage(host);
1209                        dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
1210                        break;
1211                }
1212                sh_mmcif_get_cmd12response(host, mrq->stop);
1213                mrq->stop->error = 0;
1214                break;
1215        case MMCIF_WAIT_FOR_READ_END:
1216        case MMCIF_WAIT_FOR_WRITE_END:
1217                if (host->sd_error) {
1218                        mrq->data->error = sh_mmcif_error_manage(host);
1219                        dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
1220                }
1221                break;
1222        default:
1223                BUG();
1224        }
1225
1226        if (wait) {
1227                schedule_delayed_work(&host->timeout_work, host->timeout);
1228                /* Wait for more data */
1229                mutex_unlock(&host->thread_lock);
1230                return IRQ_HANDLED;
1231        }
1232
1233        if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1234                struct mmc_data *data = mrq->data;
1235                if (!mrq->cmd->error && data && !data->error)
1236                        data->bytes_xfered =
1237                                data->blocks * data->blksz;
1238
1239                if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1240                        sh_mmcif_stop_cmd(host, mrq);
1241                        if (!mrq->stop->error) {
1242                                schedule_delayed_work(&host->timeout_work, host->timeout);
1243                                mutex_unlock(&host->thread_lock);
1244                                return IRQ_HANDLED;
1245                        }
1246                }
1247        }
1248
1249        host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1250        host->state = STATE_IDLE;
1251        host->mrq = NULL;
1252        mmc_request_done(host->mmc, mrq);
1253
1254        mutex_unlock(&host->thread_lock);
1255
1256        return IRQ_HANDLED;
1257}
1258
1259static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1260{
1261        struct sh_mmcif_host *host = dev_id;
1262        u32 state, mask;
1263
1264        state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1265        mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1266        if (host->ccs_enable)
1267                sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1268        else
1269                sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1270        sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1271
1272        if (state & ~MASK_CLEAN)
1273                dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
1274                        state);
1275
1276        if (state & INT_ERR_STS || state & ~INT_ALL) {
1277                host->sd_error = true;
1278                dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
1279        }
1280        if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1281                if (!host->mrq)
1282                        dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
1283                if (!host->dma_active)
1284                        return IRQ_WAKE_THREAD;
1285                else if (host->sd_error)
1286                        mmcif_dma_complete(host);
1287        } else {
1288                dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1289        }
1290
1291        return IRQ_HANDLED;
1292}
1293
1294static void mmcif_timeout_work(struct work_struct *work)
1295{
1296        struct delayed_work *d = container_of(work, struct delayed_work, work);
1297        struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1298        struct mmc_request *mrq = host->mrq;
1299        unsigned long flags;
1300
1301        if (host->dying)
1302                /* Don't run after mmc_remove_host() */
1303                return;
1304
1305        dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1306                host->wait_for, mrq->cmd->opcode);
1307
1308        spin_lock_irqsave(&host->lock, flags);
1309        if (host->state == STATE_IDLE) {
1310                spin_unlock_irqrestore(&host->lock, flags);
1311                return;
1312        }
1313
1314        host->state = STATE_TIMEOUT;
1315        spin_unlock_irqrestore(&host->lock, flags);
1316
1317        /*
1318         * Handle races with cancel_delayed_work(), unless
1319         * cancel_delayed_work_sync() is used
1320         */
1321        switch (host->wait_for) {
1322        case MMCIF_WAIT_FOR_CMD:
1323                mrq->cmd->error = sh_mmcif_error_manage(host);
1324                break;
1325        case MMCIF_WAIT_FOR_STOP:
1326                mrq->stop->error = sh_mmcif_error_manage(host);
1327                break;
1328        case MMCIF_WAIT_FOR_MREAD:
1329        case MMCIF_WAIT_FOR_MWRITE:
1330        case MMCIF_WAIT_FOR_READ:
1331        case MMCIF_WAIT_FOR_WRITE:
1332        case MMCIF_WAIT_FOR_READ_END:
1333        case MMCIF_WAIT_FOR_WRITE_END:
1334                mrq->data->error = sh_mmcif_error_manage(host);
1335                break;
1336        default:
1337                BUG();
1338        }
1339
1340        host->state = STATE_IDLE;
1341        host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1342        host->mrq = NULL;
1343        mmc_request_done(host->mmc, mrq);
1344}
1345
1346static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1347{
1348        struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
1349        struct mmc_host *mmc = host->mmc;
1350
1351        mmc_regulator_get_supply(mmc);
1352
1353        if (!pd)
1354                return;
1355
1356        if (!mmc->ocr_avail)
1357                mmc->ocr_avail = pd->ocr;
1358        else if (pd->ocr)
1359                dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1360}
1361
1362static int sh_mmcif_probe(struct platform_device *pdev)
1363{
1364        int ret = 0, irq[2];
1365        struct mmc_host *mmc;
1366        struct sh_mmcif_host *host;
1367        struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
1368        struct resource *res;
1369        void __iomem *reg;
1370        const char *name;
1371
1372        irq[0] = platform_get_irq(pdev, 0);
1373        irq[1] = platform_get_irq(pdev, 1);
1374        if (irq[0] < 0) {
1375                dev_err(&pdev->dev, "Get irq error\n");
1376                return -ENXIO;
1377        }
1378        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1379        if (!res) {
1380                dev_err(&pdev->dev, "platform_get_resource error.\n");
1381                return -ENXIO;
1382        }
1383        reg = ioremap(res->start, resource_size(res));
1384        if (!reg) {
1385                dev_err(&pdev->dev, "ioremap error.\n");
1386                return -ENOMEM;
1387        }
1388
1389        mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1390        if (!mmc) {
1391                ret = -ENOMEM;
1392                goto ealloch;
1393        }
1394
1395        ret = mmc_of_parse(mmc);
1396        if (ret < 0)
1397                goto eofparse;
1398
1399        host            = mmc_priv(mmc);
1400        host->mmc       = mmc;
1401        host->addr      = reg;
1402        host->timeout   = msecs_to_jiffies(1000);
1403        host->ccs_enable = !pd || !pd->ccs_unsupported;
1404        host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
1405
1406        host->pd = pdev;
1407
1408        spin_lock_init(&host->lock);
1409
1410        mmc->ops = &sh_mmcif_ops;
1411        sh_mmcif_init_ocr(host);
1412
1413        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1414        if (pd && pd->caps)
1415                mmc->caps |= pd->caps;
1416        mmc->max_segs = 32;
1417        mmc->max_blk_size = 512;
1418        mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1419        mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1420        mmc->max_seg_size = mmc->max_req_size;
1421
1422        platform_set_drvdata(pdev, host);
1423
1424        pm_runtime_enable(&pdev->dev);
1425        host->power = false;
1426
1427        host->hclk = clk_get(&pdev->dev, NULL);
1428        if (IS_ERR(host->hclk)) {
1429                ret = PTR_ERR(host->hclk);
1430                dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
1431                goto eclkget;
1432        }
1433        ret = sh_mmcif_clk_update(host);
1434        if (ret < 0)
1435                goto eclkupdate;
1436
1437        ret = pm_runtime_resume(&pdev->dev);
1438        if (ret < 0)
1439                goto eresume;
1440
1441        INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1442
1443        sh_mmcif_sync_reset(host);
1444        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1445
1446        name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
1447        ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);
1448        if (ret) {
1449                dev_err(&pdev->dev, "request_irq error (%s)\n", name);
1450                goto ereqirq0;
1451        }
1452        if (irq[1] >= 0) {
1453                ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt,
1454                                           0, "sh_mmc:int", host);
1455                if (ret) {
1456                        dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1457                        goto ereqirq1;
1458                }
1459        }
1460
1461        if (pd && pd->use_cd_gpio) {
1462                ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
1463                if (ret < 0)
1464                        goto erqcd;
1465        }
1466
1467        mutex_init(&host->thread_lock);
1468
1469        clk_disable(host->hclk);
1470        ret = mmc_add_host(mmc);
1471        if (ret < 0)
1472                goto emmcaddh;
1473
1474        dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1475
1476        dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1477        dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1478                sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1479        return ret;
1480
1481emmcaddh:
1482erqcd:
1483        if (irq[1] >= 0)
1484                free_irq(irq[1], host);
1485ereqirq1:
1486        free_irq(irq[0], host);
1487ereqirq0:
1488        pm_runtime_suspend(&pdev->dev);
1489eresume:
1490        clk_disable(host->hclk);
1491eclkupdate:
1492        clk_put(host->hclk);
1493eclkget:
1494        pm_runtime_disable(&pdev->dev);
1495eofparse:
1496        mmc_free_host(mmc);
1497ealloch:
1498        iounmap(reg);
1499        return ret;
1500}
1501
1502static int sh_mmcif_remove(struct platform_device *pdev)
1503{
1504        struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1505        int irq[2];
1506
1507        host->dying = true;
1508        clk_enable(host->hclk);
1509        pm_runtime_get_sync(&pdev->dev);
1510
1511        dev_pm_qos_hide_latency_limit(&pdev->dev);
1512
1513        mmc_remove_host(host->mmc);
1514        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1515
1516        /*
1517         * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1518         * mmc_remove_host() call above. But swapping order doesn't help either
1519         * (a query on the linux-mmc mailing list didn't bring any replies).
1520         */
1521        cancel_delayed_work_sync(&host->timeout_work);
1522
1523        if (host->addr)
1524                iounmap(host->addr);
1525
1526        irq[0] = platform_get_irq(pdev, 0);
1527        irq[1] = platform_get_irq(pdev, 1);
1528
1529        free_irq(irq[0], host);
1530        if (irq[1] >= 0)
1531                free_irq(irq[1], host);
1532
1533        clk_disable(host->hclk);
1534        mmc_free_host(host->mmc);
1535        pm_runtime_put_sync(&pdev->dev);
1536        pm_runtime_disable(&pdev->dev);
1537
1538        return 0;
1539}
1540
1541#ifdef CONFIG_PM
1542static int sh_mmcif_suspend(struct device *dev)
1543{
1544        struct sh_mmcif_host *host = dev_get_drvdata(dev);
1545        int ret = mmc_suspend_host(host->mmc);
1546
1547        if (!ret)
1548                sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1549
1550        return ret;
1551}
1552
1553static int sh_mmcif_resume(struct device *dev)
1554{
1555        struct sh_mmcif_host *host = dev_get_drvdata(dev);
1556
1557        return mmc_resume_host(host->mmc);
1558}
1559#else
1560#define sh_mmcif_suspend        NULL
1561#define sh_mmcif_resume         NULL
1562#endif  /* CONFIG_PM */
1563
1564static const struct of_device_id mmcif_of_match[] = {
1565        { .compatible = "renesas,sh-mmcif" },
1566        { }
1567};
1568MODULE_DEVICE_TABLE(of, mmcif_of_match);
1569
1570static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1571        .suspend = sh_mmcif_suspend,
1572        .resume = sh_mmcif_resume,
1573};
1574
1575static struct platform_driver sh_mmcif_driver = {
1576        .probe          = sh_mmcif_probe,
1577        .remove         = sh_mmcif_remove,
1578        .driver         = {
1579                .name   = DRIVER_NAME,
1580                .pm     = &sh_mmcif_dev_pm_ops,
1581                .owner  = THIS_MODULE,
1582                .of_match_table = mmcif_of_match,
1583        },
1584};
1585
1586module_platform_driver(sh_mmcif_driver);
1587
1588MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1589MODULE_LICENSE("GPL");
1590MODULE_ALIAS("platform:" DRIVER_NAME);
1591MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1592