linux/drivers/mmc/host/sh_mmcif.c
<<
>>
Prefs
   1/*
   2 * MMCIF eMMC driver.
   3 *
   4 * Copyright (C) 2010 Renesas Solutions Corp.
   5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License.
  10 */
  11
  12/*
  13 * The MMCIF driver is now processing MMC requests asynchronously, according
  14 * to the Linux MMC API requirement.
  15 *
  16 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
  17 * data, and optional stop. To achieve asynchronous processing each of these
  18 * stages is split into two halves: a top and a bottom half. The top half
  19 * initialises the hardware, installs a timeout handler to handle completion
  20 * timeouts, and returns. In case of the command stage this immediately returns
  21 * control to the caller, leaving all further processing to run asynchronously.
  22 * All further request processing is performed by the bottom halves.
  23 *
  24 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
  25 * thread, a DMA completion callback, if DMA is used, a timeout work, and
  26 * request- and stage-specific handler methods.
  27 *
  28 * Each bottom half run begins with either a hardware interrupt, a DMA callback
  29 * invocation, or a timeout work run. In case of an error or a successful
  30 * processing completion, the MMC core is informed and the request processing is
  31 * finished. In case processing has to continue, i.e., if data has to be read
  32 * from or written to the card, or if a stop command has to be sent, the next
  33 * top half is called, which performs the necessary hardware handling and
  34 * reschedules the timeout work. This returns the driver state machine into the
  35 * bottom half waiting state.
  36 */
  37
  38#include <linux/bitops.h>
  39#include <linux/clk.h>
  40#include <linux/completion.h>
  41#include <linux/delay.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dmaengine.h>
  44#include <linux/mmc/card.h>
  45#include <linux/mmc/core.h>
  46#include <linux/mmc/host.h>
  47#include <linux/mmc/mmc.h>
  48#include <linux/mmc/sdio.h>
  49#include <linux/mmc/sh_mmcif.h>
  50#include <linux/mmc/slot-gpio.h>
  51#include <linux/mod_devicetable.h>
  52#include <linux/mutex.h>
  53#include <linux/of_device.h>
  54#include <linux/pagemap.h>
  55#include <linux/platform_device.h>
  56#include <linux/pm_qos.h>
  57#include <linux/pm_runtime.h>
  58#include <linux/sh_dma.h>
  59#include <linux/spinlock.h>
  60#include <linux/module.h>
  61
  62#define DRIVER_NAME     "sh_mmcif"
  63
  64/* CE_CMD_SET */
  65#define CMD_MASK                0x3f000000
  66#define CMD_SET_RTYP_NO         ((0 << 23) | (0 << 22))
  67#define CMD_SET_RTYP_6B         ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
  68#define CMD_SET_RTYP_17B        ((1 << 23) | (0 << 22)) /* R2 */
  69#define CMD_SET_RBSY            (1 << 21) /* R1b */
  70#define CMD_SET_CCSEN           (1 << 20)
  71#define CMD_SET_WDAT            (1 << 19) /* 1: on data, 0: no data */
  72#define CMD_SET_DWEN            (1 << 18) /* 1: write, 0: read */
  73#define CMD_SET_CMLTE           (1 << 17) /* 1: multi block trans, 0: single */
  74#define CMD_SET_CMD12EN         (1 << 16) /* 1: CMD12 auto issue */
  75#define CMD_SET_RIDXC_INDEX     ((0 << 15) | (0 << 14)) /* index check */
  76#define CMD_SET_RIDXC_BITS      ((0 << 15) | (1 << 14)) /* check bits check */
  77#define CMD_SET_RIDXC_NO        ((1 << 15) | (0 << 14)) /* no check */
  78#define CMD_SET_CRC7C           ((0 << 13) | (0 << 12)) /* CRC7 check*/
  79#define CMD_SET_CRC7C_BITS      ((0 << 13) | (1 << 12)) /* check bits check*/
  80#define CMD_SET_CRC7C_INTERNAL  ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
  81#define CMD_SET_CRC16C          (1 << 10) /* 0: CRC16 check*/
  82#define CMD_SET_CRCSTE          (1 << 8) /* 1: not receive CRC status */
  83#define CMD_SET_TBIT            (1 << 7) /* 1: tran mission bit "Low" */
  84#define CMD_SET_OPDM            (1 << 6) /* 1: open/drain */
  85#define CMD_SET_CCSH            (1 << 5)
  86#define CMD_SET_DARS            (1 << 2) /* Dual Data Rate */
  87#define CMD_SET_DATW_1          ((0 << 1) | (0 << 0)) /* 1bit */
  88#define CMD_SET_DATW_4          ((0 << 1) | (1 << 0)) /* 4bit */
  89#define CMD_SET_DATW_8          ((1 << 1) | (0 << 0)) /* 8bit */
  90
  91/* CE_CMD_CTRL */
  92#define CMD_CTRL_BREAK          (1 << 0)
  93
  94/* CE_BLOCK_SET */
  95#define BLOCK_SIZE_MASK         0x0000ffff
  96
  97/* CE_INT */
  98#define INT_CCSDE               (1 << 29)
  99#define INT_CMD12DRE            (1 << 26)
 100#define INT_CMD12RBE            (1 << 25)
 101#define INT_CMD12CRE            (1 << 24)
 102#define INT_DTRANE              (1 << 23)
 103#define INT_BUFRE               (1 << 22)
 104#define INT_BUFWEN              (1 << 21)
 105#define INT_BUFREN              (1 << 20)
 106#define INT_CCSRCV              (1 << 19)
 107#define INT_RBSYE               (1 << 17)
 108#define INT_CRSPE               (1 << 16)
 109#define INT_CMDVIO              (1 << 15)
 110#define INT_BUFVIO              (1 << 14)
 111#define INT_WDATERR             (1 << 11)
 112#define INT_RDATERR             (1 << 10)
 113#define INT_RIDXERR             (1 << 9)
 114#define INT_RSPERR              (1 << 8)
 115#define INT_CCSTO               (1 << 5)
 116#define INT_CRCSTO              (1 << 4)
 117#define INT_WDATTO              (1 << 3)
 118#define INT_RDATTO              (1 << 2)
 119#define INT_RBSYTO              (1 << 1)
 120#define INT_RSPTO               (1 << 0)
 121#define INT_ERR_STS             (INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
 122                                 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
 123                                 INT_CCSTO | INT_CRCSTO | INT_WDATTO |    \
 124                                 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
 125
 126#define INT_ALL                 (INT_RBSYE | INT_CRSPE | INT_BUFREN |    \
 127                                 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
 128                                 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
 129
 130#define INT_CCS                 (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
 131
 132/* CE_INT_MASK */
 133#define MASK_ALL                0x00000000
 134#define MASK_MCCSDE             (1 << 29)
 135#define MASK_MCMD12DRE          (1 << 26)
 136#define MASK_MCMD12RBE          (1 << 25)
 137#define MASK_MCMD12CRE          (1 << 24)
 138#define MASK_MDTRANE            (1 << 23)
 139#define MASK_MBUFRE             (1 << 22)
 140#define MASK_MBUFWEN            (1 << 21)
 141#define MASK_MBUFREN            (1 << 20)
 142#define MASK_MCCSRCV            (1 << 19)
 143#define MASK_MRBSYE             (1 << 17)
 144#define MASK_MCRSPE             (1 << 16)
 145#define MASK_MCMDVIO            (1 << 15)
 146#define MASK_MBUFVIO            (1 << 14)
 147#define MASK_MWDATERR           (1 << 11)
 148#define MASK_MRDATERR           (1 << 10)
 149#define MASK_MRIDXERR           (1 << 9)
 150#define MASK_MRSPERR            (1 << 8)
 151#define MASK_MCCSTO             (1 << 5)
 152#define MASK_MCRCSTO            (1 << 4)
 153#define MASK_MWDATTO            (1 << 3)
 154#define MASK_MRDATTO            (1 << 2)
 155#define MASK_MRBSYTO            (1 << 1)
 156#define MASK_MRSPTO             (1 << 0)
 157
 158#define MASK_START_CMD          (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
 159                                 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
 160                                 MASK_MCRCSTO | MASK_MWDATTO | \
 161                                 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
 162
 163#define MASK_CLEAN              (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |      \
 164                                 MASK_MBUFREN | MASK_MBUFWEN |                  \
 165                                 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |  \
 166                                 MASK_MCMD12RBE | MASK_MCMD12CRE)
 167
 168/* CE_HOST_STS1 */
 169#define STS1_CMDSEQ             (1 << 31)
 170
 171/* CE_HOST_STS2 */
 172#define STS2_CRCSTE             (1 << 31)
 173#define STS2_CRC16E             (1 << 30)
 174#define STS2_AC12CRCE           (1 << 29)
 175#define STS2_RSPCRC7E           (1 << 28)
 176#define STS2_CRCSTEBE           (1 << 27)
 177#define STS2_RDATEBE            (1 << 26)
 178#define STS2_AC12REBE           (1 << 25)
 179#define STS2_RSPEBE             (1 << 24)
 180#define STS2_AC12IDXE           (1 << 23)
 181#define STS2_RSPIDXE            (1 << 22)
 182#define STS2_CCSTO              (1 << 15)
 183#define STS2_RDATTO             (1 << 14)
 184#define STS2_DATBSYTO           (1 << 13)
 185#define STS2_CRCSTTO            (1 << 12)
 186#define STS2_AC12BSYTO          (1 << 11)
 187#define STS2_RSPBSYTO           (1 << 10)
 188#define STS2_AC12RSPTO          (1 << 9)
 189#define STS2_RSPTO              (1 << 8)
 190#define STS2_CRC_ERR            (STS2_CRCSTE | STS2_CRC16E |            \
 191                                 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
 192#define STS2_TIMEOUT_ERR        (STS2_CCSTO | STS2_RDATTO |             \
 193                                 STS2_DATBSYTO | STS2_CRCSTTO |         \
 194                                 STS2_AC12BSYTO | STS2_RSPBSYTO |       \
 195                                 STS2_AC12RSPTO | STS2_RSPTO)
 196
 197#define CLKDEV_EMMC_DATA        52000000 /* 52MHz */
 198#define CLKDEV_MMC_DATA         20000000 /* 20MHz */
 199#define CLKDEV_INIT             400000   /* 400 KHz */
 200
 201enum sh_mmcif_state {
 202        STATE_IDLE,
 203        STATE_REQUEST,
 204        STATE_IOS,
 205        STATE_TIMEOUT,
 206};
 207
 208enum sh_mmcif_wait_for {
 209        MMCIF_WAIT_FOR_REQUEST,
 210        MMCIF_WAIT_FOR_CMD,
 211        MMCIF_WAIT_FOR_MREAD,
 212        MMCIF_WAIT_FOR_MWRITE,
 213        MMCIF_WAIT_FOR_READ,
 214        MMCIF_WAIT_FOR_WRITE,
 215        MMCIF_WAIT_FOR_READ_END,
 216        MMCIF_WAIT_FOR_WRITE_END,
 217        MMCIF_WAIT_FOR_STOP,
 218};
 219
 220/*
 221 * difference for each SoC
 222 */
 223struct sh_mmcif_host {
 224        struct mmc_host *mmc;
 225        struct mmc_request *mrq;
 226        struct platform_device *pd;
 227        struct clk *clk;
 228        int bus_width;
 229        unsigned char timing;
 230        bool sd_error;
 231        bool dying;
 232        long timeout;
 233        void __iomem *addr;
 234        u32 *pio_ptr;
 235        spinlock_t lock;                /* protect sh_mmcif_host::state */
 236        enum sh_mmcif_state state;
 237        enum sh_mmcif_wait_for wait_for;
 238        struct delayed_work timeout_work;
 239        size_t blocksize;
 240        int sg_idx;
 241        int sg_blkidx;
 242        bool power;
 243        bool ccs_enable;                /* Command Completion Signal support */
 244        bool clk_ctrl2_enable;
 245        struct mutex thread_lock;
 246        u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
 247
 248        /* DMA support */
 249        struct dma_chan         *chan_rx;
 250        struct dma_chan         *chan_tx;
 251        struct completion       dma_complete;
 252        bool                    dma_active;
 253};
 254
 255static const struct of_device_id sh_mmcif_of_match[] = {
 256        { .compatible = "renesas,sh-mmcif" },
 257        { }
 258};
 259MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
 260
 261#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
 262
 263static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 264                                        unsigned int reg, u32 val)
 265{
 266        writel(val | readl(host->addr + reg), host->addr + reg);
 267}
 268
 269static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
 270                                        unsigned int reg, u32 val)
 271{
 272        writel(~val & readl(host->addr + reg), host->addr + reg);
 273}
 274
 275static void sh_mmcif_dma_complete(void *arg)
 276{
 277        struct sh_mmcif_host *host = arg;
 278        struct mmc_request *mrq = host->mrq;
 279        struct device *dev = sh_mmcif_host_to_dev(host);
 280
 281        dev_dbg(dev, "Command completed\n");
 282
 283        if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
 284                 dev_name(dev)))
 285                return;
 286
 287        complete(&host->dma_complete);
 288}
 289
 290static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 291{
 292        struct mmc_data *data = host->mrq->data;
 293        struct scatterlist *sg = data->sg;
 294        struct dma_async_tx_descriptor *desc = NULL;
 295        struct dma_chan *chan = host->chan_rx;
 296        struct device *dev = sh_mmcif_host_to_dev(host);
 297        dma_cookie_t cookie = -EINVAL;
 298        int ret;
 299
 300        ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 301                         DMA_FROM_DEVICE);
 302        if (ret > 0) {
 303                host->dma_active = true;
 304                desc = dmaengine_prep_slave_sg(chan, sg, ret,
 305                        DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 306        }
 307
 308        if (desc) {
 309                desc->callback = sh_mmcif_dma_complete;
 310                desc->callback_param = host;
 311                cookie = dmaengine_submit(desc);
 312                sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
 313                dma_async_issue_pending(chan);
 314        }
 315        dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 316                __func__, data->sg_len, ret, cookie);
 317
 318        if (!desc) {
 319                /* DMA failed, fall back to PIO */
 320                if (ret >= 0)
 321                        ret = -EIO;
 322                host->chan_rx = NULL;
 323                host->dma_active = false;
 324                dma_release_channel(chan);
 325                /* Free the Tx channel too */
 326                chan = host->chan_tx;
 327                if (chan) {
 328                        host->chan_tx = NULL;
 329                        dma_release_channel(chan);
 330                }
 331                dev_warn(dev,
 332                         "DMA failed: %d, falling back to PIO\n", ret);
 333                sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 334        }
 335
 336        dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
 337                desc, cookie, data->sg_len);
 338}
 339
 340static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 341{
 342        struct mmc_data *data = host->mrq->data;
 343        struct scatterlist *sg = data->sg;
 344        struct dma_async_tx_descriptor *desc = NULL;
 345        struct dma_chan *chan = host->chan_tx;
 346        struct device *dev = sh_mmcif_host_to_dev(host);
 347        dma_cookie_t cookie = -EINVAL;
 348        int ret;
 349
 350        ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 351                         DMA_TO_DEVICE);
 352        if (ret > 0) {
 353                host->dma_active = true;
 354                desc = dmaengine_prep_slave_sg(chan, sg, ret,
 355                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 356        }
 357
 358        if (desc) {
 359                desc->callback = sh_mmcif_dma_complete;
 360                desc->callback_param = host;
 361                cookie = dmaengine_submit(desc);
 362                sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
 363                dma_async_issue_pending(chan);
 364        }
 365        dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 366                __func__, data->sg_len, ret, cookie);
 367
 368        if (!desc) {
 369                /* DMA failed, fall back to PIO */
 370                if (ret >= 0)
 371                        ret = -EIO;
 372                host->chan_tx = NULL;
 373                host->dma_active = false;
 374                dma_release_channel(chan);
 375                /* Free the Rx channel too */
 376                chan = host->chan_rx;
 377                if (chan) {
 378                        host->chan_rx = NULL;
 379                        dma_release_channel(chan);
 380                }
 381                dev_warn(dev,
 382                         "DMA failed: %d, falling back to PIO\n", ret);
 383                sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 384        }
 385
 386        dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
 387                desc, cookie);
 388}
 389
 390static struct dma_chan *
 391sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
 392{
 393        dma_cap_mask_t mask;
 394
 395        dma_cap_zero(mask);
 396        dma_cap_set(DMA_SLAVE, mask);
 397        if (slave_id <= 0)
 398                return NULL;
 399
 400        return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
 401}
 402
 403static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
 404                                     struct dma_chan *chan,
 405                                     enum dma_transfer_direction direction)
 406{
 407        struct resource *res;
 408        struct dma_slave_config cfg = { 0, };
 409
 410        res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
 411        cfg.direction = direction;
 412
 413        if (direction == DMA_DEV_TO_MEM) {
 414                cfg.src_addr = res->start + MMCIF_CE_DATA;
 415                cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 416        } else {
 417                cfg.dst_addr = res->start + MMCIF_CE_DATA;
 418                cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 419        }
 420
 421        return dmaengine_slave_config(chan, &cfg);
 422}
 423
 424static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
 425{
 426        struct device *dev = sh_mmcif_host_to_dev(host);
 427        host->dma_active = false;
 428
 429        /* We can only either use DMA for both Tx and Rx or not use it at all */
 430        if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
 431                struct sh_mmcif_plat_data *pdata = dev->platform_data;
 432
 433                host->chan_tx = sh_mmcif_request_dma_pdata(host,
 434                                                        pdata->slave_id_tx);
 435                host->chan_rx = sh_mmcif_request_dma_pdata(host,
 436                                                        pdata->slave_id_rx);
 437        } else {
 438                host->chan_tx = dma_request_slave_channel(dev, "tx");
 439                host->chan_rx = dma_request_slave_channel(dev, "rx");
 440        }
 441        dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
 442                host->chan_rx);
 443
 444        if (!host->chan_tx || !host->chan_rx ||
 445            sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
 446            sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
 447                goto error;
 448
 449        return;
 450
 451error:
 452        if (host->chan_tx)
 453                dma_release_channel(host->chan_tx);
 454        if (host->chan_rx)
 455                dma_release_channel(host->chan_rx);
 456        host->chan_tx = host->chan_rx = NULL;
 457}
 458
 459static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
 460{
 461        sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 462        /* Descriptors are freed automatically */
 463        if (host->chan_tx) {
 464                struct dma_chan *chan = host->chan_tx;
 465                host->chan_tx = NULL;
 466                dma_release_channel(chan);
 467        }
 468        if (host->chan_rx) {
 469                struct dma_chan *chan = host->chan_rx;
 470                host->chan_rx = NULL;
 471                dma_release_channel(chan);
 472        }
 473
 474        host->dma_active = false;
 475}
 476
 477static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 478{
 479        struct device *dev = sh_mmcif_host_to_dev(host);
 480        struct sh_mmcif_plat_data *p = dev->platform_data;
 481        bool sup_pclk = p ? p->sup_pclk : false;
 482        unsigned int current_clk = clk_get_rate(host->clk);
 483        unsigned int clkdiv;
 484
 485        sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 486        sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
 487
 488        if (!clk)
 489                return;
 490
 491        if (host->clkdiv_map) {
 492                unsigned int freq, best_freq, myclk, div, diff_min, diff;
 493                int i;
 494
 495                clkdiv = 0;
 496                diff_min = ~0;
 497                best_freq = 0;
 498                for (i = 31; i >= 0; i--) {
 499                        if (!((1 << i) & host->clkdiv_map))
 500                                continue;
 501
 502                        /*
 503                         * clk = parent_freq / div
 504                         * -> parent_freq = clk x div
 505                         */
 506
 507                        div = 1 << (i + 1);
 508                        freq = clk_round_rate(host->clk, clk * div);
 509                        myclk = freq / div;
 510                        diff = (myclk > clk) ? myclk - clk : clk - myclk;
 511
 512                        if (diff <= diff_min) {
 513                                best_freq = freq;
 514                                clkdiv = i;
 515                                diff_min = diff;
 516                        }
 517                }
 518
 519                dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
 520                        (best_freq / (1 << (clkdiv + 1))), clk,
 521                        best_freq, clkdiv);
 522
 523                clk_set_rate(host->clk, best_freq);
 524                clkdiv = clkdiv << 16;
 525        } else if (sup_pclk && clk == current_clk) {
 526                clkdiv = CLK_SUP_PCLK;
 527        } else {
 528                clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
 529        }
 530
 531        sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
 532        sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 533}
 534
 535static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 536{
 537        u32 tmp;
 538
 539        tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
 540
 541        sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
 542        sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
 543        if (host->ccs_enable)
 544                tmp |= SCCSTO_29;
 545        if (host->clk_ctrl2_enable)
 546                sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
 547        sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
 548                SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
 549        /* byte swap on */
 550        sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
 551}
 552
 553static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 554{
 555        struct device *dev = sh_mmcif_host_to_dev(host);
 556        u32 state1, state2;
 557        int ret, timeout;
 558
 559        host->sd_error = false;
 560
 561        state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 562        state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
 563        dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
 564        dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
 565
 566        if (state1 & STS1_CMDSEQ) {
 567                sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 568                sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
 569                for (timeout = 10000; timeout; timeout--) {
 570                        if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
 571                              & STS1_CMDSEQ))
 572                                break;
 573                        mdelay(1);
 574                }
 575                if (!timeout) {
 576                        dev_err(dev,
 577                                "Forced end of command sequence timeout err\n");
 578                        return -EIO;
 579                }
 580                sh_mmcif_sync_reset(host);
 581                dev_dbg(dev, "Forced end of command sequence\n");
 582                return -EIO;
 583        }
 584
 585        if (state2 & STS2_CRC_ERR) {
 586                dev_err(dev, " CRC error: state %u, wait %u\n",
 587                        host->state, host->wait_for);
 588                ret = -EIO;
 589        } else if (state2 & STS2_TIMEOUT_ERR) {
 590                dev_err(dev, " Timeout: state %u, wait %u\n",
 591                        host->state, host->wait_for);
 592                ret = -ETIMEDOUT;
 593        } else {
 594                dev_dbg(dev, " End/Index error: state %u, wait %u\n",
 595                        host->state, host->wait_for);
 596                ret = -EIO;
 597        }
 598        return ret;
 599}
 600
 601static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 602{
 603        struct mmc_data *data = host->mrq->data;
 604
 605        host->sg_blkidx += host->blocksize;
 606
 607        /* data->sg->length must be a multiple of host->blocksize? */
 608        BUG_ON(host->sg_blkidx > data->sg->length);
 609
 610        if (host->sg_blkidx == data->sg->length) {
 611                host->sg_blkidx = 0;
 612                if (++host->sg_idx < data->sg_len)
 613                        host->pio_ptr = sg_virt(++data->sg);
 614        } else {
 615                host->pio_ptr = p;
 616        }
 617
 618        return host->sg_idx != data->sg_len;
 619}
 620
 621static void sh_mmcif_single_read(struct sh_mmcif_host *host,
 622                                 struct mmc_request *mrq)
 623{
 624        host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 625                           BLOCK_SIZE_MASK) + 3;
 626
 627        host->wait_for = MMCIF_WAIT_FOR_READ;
 628
 629        /* buf read enable */
 630        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 631}
 632
 633static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 634{
 635        struct device *dev = sh_mmcif_host_to_dev(host);
 636        struct mmc_data *data = host->mrq->data;
 637        u32 *p = sg_virt(data->sg);
 638        int i;
 639
 640        if (host->sd_error) {
 641                data->error = sh_mmcif_error_manage(host);
 642                dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 643                return false;
 644        }
 645
 646        for (i = 0; i < host->blocksize / 4; i++)
 647                *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 648
 649        /* buffer read end */
 650        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
 651        host->wait_for = MMCIF_WAIT_FOR_READ_END;
 652
 653        return true;
 654}
 655
 656static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
 657                                struct mmc_request *mrq)
 658{
 659        struct mmc_data *data = mrq->data;
 660
 661        if (!data->sg_len || !data->sg->length)
 662                return;
 663
 664        host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 665                BLOCK_SIZE_MASK;
 666
 667        host->wait_for = MMCIF_WAIT_FOR_MREAD;
 668        host->sg_idx = 0;
 669        host->sg_blkidx = 0;
 670        host->pio_ptr = sg_virt(data->sg);
 671
 672        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 673}
 674
 675static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 676{
 677        struct device *dev = sh_mmcif_host_to_dev(host);
 678        struct mmc_data *data = host->mrq->data;
 679        u32 *p = host->pio_ptr;
 680        int i;
 681
 682        if (host->sd_error) {
 683                data->error = sh_mmcif_error_manage(host);
 684                dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 685                return false;
 686        }
 687
 688        BUG_ON(!data->sg->length);
 689
 690        for (i = 0; i < host->blocksize / 4; i++)
 691                *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 692
 693        if (!sh_mmcif_next_block(host, p))
 694                return false;
 695
 696        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 697
 698        return true;
 699}
 700
 701static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 702                                        struct mmc_request *mrq)
 703{
 704        host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 705                           BLOCK_SIZE_MASK) + 3;
 706
 707        host->wait_for = MMCIF_WAIT_FOR_WRITE;
 708
 709        /* buf write enable */
 710        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 711}
 712
 713static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 714{
 715        struct device *dev = sh_mmcif_host_to_dev(host);
 716        struct mmc_data *data = host->mrq->data;
 717        u32 *p = sg_virt(data->sg);
 718        int i;
 719
 720        if (host->sd_error) {
 721                data->error = sh_mmcif_error_manage(host);
 722                dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 723                return false;
 724        }
 725
 726        for (i = 0; i < host->blocksize / 4; i++)
 727                sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 728
 729        /* buffer write end */
 730        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
 731        host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 732
 733        return true;
 734}
 735
 736static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
 737                                struct mmc_request *mrq)
 738{
 739        struct mmc_data *data = mrq->data;
 740
 741        if (!data->sg_len || !data->sg->length)
 742                return;
 743
 744        host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
 745                BLOCK_SIZE_MASK;
 746
 747        host->wait_for = MMCIF_WAIT_FOR_MWRITE;
 748        host->sg_idx = 0;
 749        host->sg_blkidx = 0;
 750        host->pio_ptr = sg_virt(data->sg);
 751
 752        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 753}
 754
 755static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
 756{
 757        struct device *dev = sh_mmcif_host_to_dev(host);
 758        struct mmc_data *data = host->mrq->data;
 759        u32 *p = host->pio_ptr;
 760        int i;
 761
 762        if (host->sd_error) {
 763                data->error = sh_mmcif_error_manage(host);
 764                dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 765                return false;
 766        }
 767
 768        BUG_ON(!data->sg->length);
 769
 770        for (i = 0; i < host->blocksize / 4; i++)
 771                sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 772
 773        if (!sh_mmcif_next_block(host, p))
 774                return false;
 775
 776        sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 777
 778        return true;
 779}
 780
 781static void sh_mmcif_get_response(struct sh_mmcif_host *host,
 782                                                struct mmc_command *cmd)
 783{
 784        if (cmd->flags & MMC_RSP_136) {
 785                cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
 786                cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
 787                cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
 788                cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 789        } else
 790                cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
 791}
 792
 793static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 794                                                struct mmc_command *cmd)
 795{
 796        cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
 797}
 798
 799static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 800                            struct mmc_request *mrq)
 801{
 802        struct device *dev = sh_mmcif_host_to_dev(host);
 803        struct mmc_data *data = mrq->data;
 804        struct mmc_command *cmd = mrq->cmd;
 805        u32 opc = cmd->opcode;
 806        u32 tmp = 0;
 807
 808        /* Response Type check */
 809        switch (mmc_resp_type(cmd)) {
 810        case MMC_RSP_NONE:
 811                tmp |= CMD_SET_RTYP_NO;
 812                break;
 813        case MMC_RSP_R1:
 814        case MMC_RSP_R3:
 815                tmp |= CMD_SET_RTYP_6B;
 816                break;
 817        case MMC_RSP_R1B:
 818                tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
 819                break;
 820        case MMC_RSP_R2:
 821                tmp |= CMD_SET_RTYP_17B;
 822                break;
 823        default:
 824                dev_err(dev, "Unsupported response type.\n");
 825                break;
 826        }
 827
 828        /* WDAT / DATW */
 829        if (data) {
 830                tmp |= CMD_SET_WDAT;
 831                switch (host->bus_width) {
 832                case MMC_BUS_WIDTH_1:
 833                        tmp |= CMD_SET_DATW_1;
 834                        break;
 835                case MMC_BUS_WIDTH_4:
 836                        tmp |= CMD_SET_DATW_4;
 837                        break;
 838                case MMC_BUS_WIDTH_8:
 839                        tmp |= CMD_SET_DATW_8;
 840                        break;
 841                default:
 842                        dev_err(dev, "Unsupported bus width.\n");
 843                        break;
 844                }
 845                switch (host->timing) {
 846                case MMC_TIMING_MMC_DDR52:
 847                        /*
 848                         * MMC core will only set this timing, if the host
 849                         * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
 850                         * capability. MMCIF implementations with this
 851                         * capability, e.g. sh73a0, will have to set it
 852                         * in their platform data.
 853                         */
 854                        tmp |= CMD_SET_DARS;
 855                        break;
 856                }
 857        }
 858        /* DWEN */
 859        if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
 860                tmp |= CMD_SET_DWEN;
 861        /* CMLTE/CMD12EN */
 862        if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
 863                tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
 864                sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
 865                                data->blocks << 16);
 866        }
 867        /* RIDXC[1:0] check bits */
 868        if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
 869            opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 870                tmp |= CMD_SET_RIDXC_BITS;
 871        /* RCRC7C[1:0] check bits */
 872        if (opc == MMC_SEND_OP_COND)
 873                tmp |= CMD_SET_CRC7C_BITS;
 874        /* RCRC7C[1:0] internal CRC7 */
 875        if (opc == MMC_ALL_SEND_CID ||
 876                opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 877                tmp |= CMD_SET_CRC7C_INTERNAL;
 878
 879        return (opc << 24) | tmp;
 880}
 881
 882static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 883                               struct mmc_request *mrq, u32 opc)
 884{
 885        struct device *dev = sh_mmcif_host_to_dev(host);
 886
 887        switch (opc) {
 888        case MMC_READ_MULTIPLE_BLOCK:
 889                sh_mmcif_multi_read(host, mrq);
 890                return 0;
 891        case MMC_WRITE_MULTIPLE_BLOCK:
 892                sh_mmcif_multi_write(host, mrq);
 893                return 0;
 894        case MMC_WRITE_BLOCK:
 895                sh_mmcif_single_write(host, mrq);
 896                return 0;
 897        case MMC_READ_SINGLE_BLOCK:
 898        case MMC_SEND_EXT_CSD:
 899                sh_mmcif_single_read(host, mrq);
 900                return 0;
 901        default:
 902                dev_err(dev, "Unsupported CMD%d\n", opc);
 903                return -EINVAL;
 904        }
 905}
 906
 907static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 908                               struct mmc_request *mrq)
 909{
 910        struct mmc_command *cmd = mrq->cmd;
 911        u32 opc;
 912        u32 mask = 0;
 913        unsigned long flags;
 914
 915        if (cmd->flags & MMC_RSP_BUSY)
 916                mask = MASK_START_CMD | MASK_MRBSYE;
 917        else
 918                mask = MASK_START_CMD | MASK_MCRSPE;
 919
 920        if (host->ccs_enable)
 921                mask |= MASK_MCCSTO;
 922
 923        if (mrq->data) {
 924                sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 925                sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
 926                                mrq->data->blksz);
 927        }
 928        opc = sh_mmcif_set_cmd(host, mrq);
 929
 930        if (host->ccs_enable)
 931                sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
 932        else
 933                sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
 934        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
 935        /* set arg */
 936        sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
 937        /* set cmd */
 938        spin_lock_irqsave(&host->lock, flags);
 939        sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 940
 941        host->wait_for = MMCIF_WAIT_FOR_CMD;
 942        schedule_delayed_work(&host->timeout_work, host->timeout);
 943        spin_unlock_irqrestore(&host->lock, flags);
 944}
 945
 946static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 947                              struct mmc_request *mrq)
 948{
 949        struct device *dev = sh_mmcif_host_to_dev(host);
 950
 951        switch (mrq->cmd->opcode) {
 952        case MMC_READ_MULTIPLE_BLOCK:
 953                sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
 954                break;
 955        case MMC_WRITE_MULTIPLE_BLOCK:
 956                sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 957                break;
 958        default:
 959                dev_err(dev, "unsupported stop cmd\n");
 960                mrq->stop->error = sh_mmcif_error_manage(host);
 961                return;
 962        }
 963
 964        host->wait_for = MMCIF_WAIT_FOR_STOP;
 965}
 966
 967static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 968{
 969        struct sh_mmcif_host *host = mmc_priv(mmc);
 970        struct device *dev = sh_mmcif_host_to_dev(host);
 971        unsigned long flags;
 972
 973        spin_lock_irqsave(&host->lock, flags);
 974        if (host->state != STATE_IDLE) {
 975                dev_dbg(dev, "%s() rejected, state %u\n",
 976                        __func__, host->state);
 977                spin_unlock_irqrestore(&host->lock, flags);
 978                mrq->cmd->error = -EAGAIN;
 979                mmc_request_done(mmc, mrq);
 980                return;
 981        }
 982
 983        host->state = STATE_REQUEST;
 984        spin_unlock_irqrestore(&host->lock, flags);
 985
 986        host->mrq = mrq;
 987
 988        sh_mmcif_start_cmd(host, mrq);
 989}
 990
 991static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
 992{
 993        struct device *dev = sh_mmcif_host_to_dev(host);
 994
 995        if (host->mmc->f_max) {
 996                unsigned int f_max, f_min = 0, f_min_old;
 997
 998                f_max = host->mmc->f_max;
 999                for (f_min_old = f_max; f_min_old > 2;) {
1000                        f_min = clk_round_rate(host->clk, f_min_old / 2);
1001                        if (f_min == f_min_old)
1002                                break;
1003                        f_min_old = f_min;
1004                }
1005
1006                /*
1007                 * This driver assumes this SoC is R-Car Gen2 or later
1008                 */
1009                host->clkdiv_map = 0x3ff;
1010
1011                host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
1012                host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
1013        } else {
1014                unsigned int clk = clk_get_rate(host->clk);
1015
1016                host->mmc->f_max = clk / 2;
1017                host->mmc->f_min = clk / 512;
1018        }
1019
1020        dev_dbg(dev, "clk max/min = %d/%d\n",
1021                host->mmc->f_max, host->mmc->f_min);
1022}
1023
1024static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1025{
1026        struct sh_mmcif_host *host = mmc_priv(mmc);
1027        struct device *dev = sh_mmcif_host_to_dev(host);
1028        unsigned long flags;
1029
1030        spin_lock_irqsave(&host->lock, flags);
1031        if (host->state != STATE_IDLE) {
1032                dev_dbg(dev, "%s() rejected, state %u\n",
1033                        __func__, host->state);
1034                spin_unlock_irqrestore(&host->lock, flags);
1035                return;
1036        }
1037
1038        host->state = STATE_IOS;
1039        spin_unlock_irqrestore(&host->lock, flags);
1040
1041        switch (ios->power_mode) {
1042        case MMC_POWER_UP:
1043                if (!IS_ERR(mmc->supply.vmmc))
1044                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1045                if (!host->power) {
1046                        clk_prepare_enable(host->clk);
1047                        pm_runtime_get_sync(dev);
1048                        sh_mmcif_sync_reset(host);
1049                        sh_mmcif_request_dma(host);
1050                        host->power = true;
1051                }
1052                break;
1053        case MMC_POWER_OFF:
1054                if (!IS_ERR(mmc->supply.vmmc))
1055                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1056                if (host->power) {
1057                        sh_mmcif_clock_control(host, 0);
1058                        sh_mmcif_release_dma(host);
1059                        pm_runtime_put(dev);
1060                        clk_disable_unprepare(host->clk);
1061                        host->power = false;
1062                }
1063                break;
1064        case MMC_POWER_ON:
1065                sh_mmcif_clock_control(host, ios->clock);
1066                break;
1067        }
1068
1069        host->timing = ios->timing;
1070        host->bus_width = ios->bus_width;
1071        host->state = STATE_IDLE;
1072}
1073
1074static const struct mmc_host_ops sh_mmcif_ops = {
1075        .request        = sh_mmcif_request,
1076        .set_ios        = sh_mmcif_set_ios,
1077        .get_cd         = mmc_gpio_get_cd,
1078};
1079
1080static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1081{
1082        struct mmc_command *cmd = host->mrq->cmd;
1083        struct mmc_data *data = host->mrq->data;
1084        struct device *dev = sh_mmcif_host_to_dev(host);
1085        long time;
1086
1087        if (host->sd_error) {
1088                switch (cmd->opcode) {
1089                case MMC_ALL_SEND_CID:
1090                case MMC_SELECT_CARD:
1091                case MMC_APP_CMD:
1092                        cmd->error = -ETIMEDOUT;
1093                        break;
1094                default:
1095                        cmd->error = sh_mmcif_error_manage(host);
1096                        break;
1097                }
1098                dev_dbg(dev, "CMD%d error %d\n",
1099                        cmd->opcode, cmd->error);
1100                host->sd_error = false;
1101                return false;
1102        }
1103        if (!(cmd->flags & MMC_RSP_PRESENT)) {
1104                cmd->error = 0;
1105                return false;
1106        }
1107
1108        sh_mmcif_get_response(host, cmd);
1109
1110        if (!data)
1111                return false;
1112
1113        /*
1114         * Completion can be signalled from DMA callback and error, so, have to
1115         * reset here, before setting .dma_active
1116         */
1117        init_completion(&host->dma_complete);
1118
1119        if (data->flags & MMC_DATA_READ) {
1120                if (host->chan_rx)
1121                        sh_mmcif_start_dma_rx(host);
1122        } else {
1123                if (host->chan_tx)
1124                        sh_mmcif_start_dma_tx(host);
1125        }
1126
1127        if (!host->dma_active) {
1128                data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1129                return !data->error;
1130        }
1131
1132        /* Running in the IRQ thread, can sleep */
1133        time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1134                                                         host->timeout);
1135
1136        if (data->flags & MMC_DATA_READ)
1137                dma_unmap_sg(host->chan_rx->device->dev,
1138                             data->sg, data->sg_len,
1139                             DMA_FROM_DEVICE);
1140        else
1141                dma_unmap_sg(host->chan_tx->device->dev,
1142                             data->sg, data->sg_len,
1143                             DMA_TO_DEVICE);
1144
1145        if (host->sd_error) {
1146                dev_err(host->mmc->parent,
1147                        "Error IRQ while waiting for DMA completion!\n");
1148                /* Woken up by an error IRQ: abort DMA */
1149                data->error = sh_mmcif_error_manage(host);
1150        } else if (!time) {
1151                dev_err(host->mmc->parent, "DMA timeout!\n");
1152                data->error = -ETIMEDOUT;
1153        } else if (time < 0) {
1154                dev_err(host->mmc->parent,
1155                        "wait_for_completion_...() error %ld!\n", time);
1156                data->error = time;
1157        }
1158        sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1159                        BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1160        host->dma_active = false;
1161
1162        if (data->error) {
1163                data->bytes_xfered = 0;
1164                /* Abort DMA */
1165                if (data->flags & MMC_DATA_READ)
1166                        dmaengine_terminate_all(host->chan_rx);
1167                else
1168                        dmaengine_terminate_all(host->chan_tx);
1169        }
1170
1171        return false;
1172}
1173
1174static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1175{
1176        struct sh_mmcif_host *host = dev_id;
1177        struct mmc_request *mrq;
1178        struct device *dev = sh_mmcif_host_to_dev(host);
1179        bool wait = false;
1180        unsigned long flags;
1181        int wait_work;
1182
1183        spin_lock_irqsave(&host->lock, flags);
1184        wait_work = host->wait_for;
1185        spin_unlock_irqrestore(&host->lock, flags);
1186
1187        cancel_delayed_work_sync(&host->timeout_work);
1188
1189        mutex_lock(&host->thread_lock);
1190
1191        mrq = host->mrq;
1192        if (!mrq) {
1193                dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1194                        host->state, host->wait_for);
1195                mutex_unlock(&host->thread_lock);
1196                return IRQ_HANDLED;
1197        }
1198
1199        /*
1200         * All handlers return true, if processing continues, and false, if the
1201         * request has to be completed - successfully or not
1202         */
1203        switch (wait_work) {
1204        case MMCIF_WAIT_FOR_REQUEST:
1205                /* We're too late, the timeout has already kicked in */
1206                mutex_unlock(&host->thread_lock);
1207                return IRQ_HANDLED;
1208        case MMCIF_WAIT_FOR_CMD:
1209                /* Wait for data? */
1210                wait = sh_mmcif_end_cmd(host);
1211                break;
1212        case MMCIF_WAIT_FOR_MREAD:
1213                /* Wait for more data? */
1214                wait = sh_mmcif_mread_block(host);
1215                break;
1216        case MMCIF_WAIT_FOR_READ:
1217                /* Wait for data end? */
1218                wait = sh_mmcif_read_block(host);
1219                break;
1220        case MMCIF_WAIT_FOR_MWRITE:
1221                /* Wait data to write? */
1222                wait = sh_mmcif_mwrite_block(host);
1223                break;
1224        case MMCIF_WAIT_FOR_WRITE:
1225                /* Wait for data end? */
1226                wait = sh_mmcif_write_block(host);
1227                break;
1228        case MMCIF_WAIT_FOR_STOP:
1229                if (host->sd_error) {
1230                        mrq->stop->error = sh_mmcif_error_manage(host);
1231                        dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1232                        break;
1233                }
1234                sh_mmcif_get_cmd12response(host, mrq->stop);
1235                mrq->stop->error = 0;
1236                break;
1237        case MMCIF_WAIT_FOR_READ_END:
1238        case MMCIF_WAIT_FOR_WRITE_END:
1239                if (host->sd_error) {
1240                        mrq->data->error = sh_mmcif_error_manage(host);
1241                        dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1242                }
1243                break;
1244        default:
1245                BUG();
1246        }
1247
1248        if (wait) {
1249                schedule_delayed_work(&host->timeout_work, host->timeout);
1250                /* Wait for more data */
1251                mutex_unlock(&host->thread_lock);
1252                return IRQ_HANDLED;
1253        }
1254
1255        if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1256                struct mmc_data *data = mrq->data;
1257                if (!mrq->cmd->error && data && !data->error)
1258                        data->bytes_xfered =
1259                                data->blocks * data->blksz;
1260
1261                if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1262                        sh_mmcif_stop_cmd(host, mrq);
1263                        if (!mrq->stop->error) {
1264                                schedule_delayed_work(&host->timeout_work, host->timeout);
1265                                mutex_unlock(&host->thread_lock);
1266                                return IRQ_HANDLED;
1267                        }
1268                }
1269        }
1270
1271        host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1272        host->state = STATE_IDLE;
1273        host->mrq = NULL;
1274        mmc_request_done(host->mmc, mrq);
1275
1276        mutex_unlock(&host->thread_lock);
1277
1278        return IRQ_HANDLED;
1279}
1280
1281static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1282{
1283        struct sh_mmcif_host *host = dev_id;
1284        struct device *dev = sh_mmcif_host_to_dev(host);
1285        u32 state, mask;
1286
1287        state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1288        mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1289        if (host->ccs_enable)
1290                sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1291        else
1292                sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1293        sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1294
1295        if (state & ~MASK_CLEAN)
1296                dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1297                        state);
1298
1299        if (state & INT_ERR_STS || state & ~INT_ALL) {
1300                host->sd_error = true;
1301                dev_dbg(dev, "int err state = 0x%08x\n", state);
1302        }
1303        if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1304                if (!host->mrq)
1305                        dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1306                if (!host->dma_active)
1307                        return IRQ_WAKE_THREAD;
1308                else if (host->sd_error)
1309                        sh_mmcif_dma_complete(host);
1310        } else {
1311                dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1312        }
1313
1314        return IRQ_HANDLED;
1315}
1316
1317static void sh_mmcif_timeout_work(struct work_struct *work)
1318{
1319        struct delayed_work *d = to_delayed_work(work);
1320        struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1321        struct mmc_request *mrq = host->mrq;
1322        struct device *dev = sh_mmcif_host_to_dev(host);
1323        unsigned long flags;
1324
1325        if (host->dying)
1326                /* Don't run after mmc_remove_host() */
1327                return;
1328
1329        spin_lock_irqsave(&host->lock, flags);
1330        if (host->state == STATE_IDLE) {
1331                spin_unlock_irqrestore(&host->lock, flags);
1332                return;
1333        }
1334
1335        dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1336                host->wait_for, mrq->cmd->opcode);
1337
1338        host->state = STATE_TIMEOUT;
1339        spin_unlock_irqrestore(&host->lock, flags);
1340
1341        /*
1342         * Handle races with cancel_delayed_work(), unless
1343         * cancel_delayed_work_sync() is used
1344         */
1345        switch (host->wait_for) {
1346        case MMCIF_WAIT_FOR_CMD:
1347                mrq->cmd->error = sh_mmcif_error_manage(host);
1348                break;
1349        case MMCIF_WAIT_FOR_STOP:
1350                mrq->stop->error = sh_mmcif_error_manage(host);
1351                break;
1352        case MMCIF_WAIT_FOR_MREAD:
1353        case MMCIF_WAIT_FOR_MWRITE:
1354        case MMCIF_WAIT_FOR_READ:
1355        case MMCIF_WAIT_FOR_WRITE:
1356        case MMCIF_WAIT_FOR_READ_END:
1357        case MMCIF_WAIT_FOR_WRITE_END:
1358                mrq->data->error = sh_mmcif_error_manage(host);
1359                break;
1360        default:
1361                BUG();
1362        }
1363
1364        host->state = STATE_IDLE;
1365        host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1366        host->mrq = NULL;
1367        mmc_request_done(host->mmc, mrq);
1368}
1369
1370static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1371{
1372        struct device *dev = sh_mmcif_host_to_dev(host);
1373        struct sh_mmcif_plat_data *pd = dev->platform_data;
1374        struct mmc_host *mmc = host->mmc;
1375
1376        mmc_regulator_get_supply(mmc);
1377
1378        if (!pd)
1379                return;
1380
1381        if (!mmc->ocr_avail)
1382                mmc->ocr_avail = pd->ocr;
1383        else if (pd->ocr)
1384                dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1385}
1386
1387static int sh_mmcif_probe(struct platform_device *pdev)
1388{
1389        int ret = 0, irq[2];
1390        struct mmc_host *mmc;
1391        struct sh_mmcif_host *host;
1392        struct device *dev = &pdev->dev;
1393        struct sh_mmcif_plat_data *pd = dev->platform_data;
1394        struct resource *res;
1395        void __iomem *reg;
1396        const char *name;
1397
1398        irq[0] = platform_get_irq(pdev, 0);
1399        irq[1] = platform_get_irq(pdev, 1);
1400        if (irq[0] < 0) {
1401                dev_err(dev, "Get irq error\n");
1402                return -ENXIO;
1403        }
1404
1405        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1406        reg = devm_ioremap_resource(dev, res);
1407        if (IS_ERR(reg))
1408                return PTR_ERR(reg);
1409
1410        mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1411        if (!mmc)
1412                return -ENOMEM;
1413
1414        ret = mmc_of_parse(mmc);
1415        if (ret < 0)
1416                goto err_host;
1417
1418        host            = mmc_priv(mmc);
1419        host->mmc       = mmc;
1420        host->addr      = reg;
1421        host->timeout   = msecs_to_jiffies(10000);
1422        host->ccs_enable = true;
1423        host->clk_ctrl2_enable = false;
1424
1425        host->pd = pdev;
1426
1427        spin_lock_init(&host->lock);
1428
1429        mmc->ops = &sh_mmcif_ops;
1430        sh_mmcif_init_ocr(host);
1431
1432        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1433        mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1434        mmc->max_busy_timeout = 10000;
1435
1436        if (pd && pd->caps)
1437                mmc->caps |= pd->caps;
1438        mmc->max_segs = 32;
1439        mmc->max_blk_size = 512;
1440        mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1441        mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1442        mmc->max_seg_size = mmc->max_req_size;
1443
1444        platform_set_drvdata(pdev, host);
1445
1446        host->clk = devm_clk_get(dev, NULL);
1447        if (IS_ERR(host->clk)) {
1448                ret = PTR_ERR(host->clk);
1449                dev_err(dev, "cannot get clock: %d\n", ret);
1450                goto err_host;
1451        }
1452
1453        ret = clk_prepare_enable(host->clk);
1454        if (ret < 0)
1455                goto err_host;
1456
1457        sh_mmcif_clk_setup(host);
1458
1459        pm_runtime_enable(dev);
1460        host->power = false;
1461
1462        ret = pm_runtime_get_sync(dev);
1463        if (ret < 0)
1464                goto err_clk;
1465
1466        INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1467
1468        sh_mmcif_sync_reset(host);
1469        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1470
1471        name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1472        ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1473                                        sh_mmcif_irqt, 0, name, host);
1474        if (ret) {
1475                dev_err(dev, "request_irq error (%s)\n", name);
1476                goto err_clk;
1477        }
1478        if (irq[1] >= 0) {
1479                ret = devm_request_threaded_irq(dev, irq[1],
1480                                                sh_mmcif_intr, sh_mmcif_irqt,
1481                                                0, "sh_mmc:int", host);
1482                if (ret) {
1483                        dev_err(dev, "request_irq error (sh_mmc:int)\n");
1484                        goto err_clk;
1485                }
1486        }
1487
1488        mutex_init(&host->thread_lock);
1489
1490        ret = mmc_add_host(mmc);
1491        if (ret < 0)
1492                goto err_clk;
1493
1494        dev_pm_qos_expose_latency_limit(dev, 100);
1495
1496        dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1497                 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1498                 clk_get_rate(host->clk) / 1000000UL);
1499
1500        pm_runtime_put(dev);
1501        clk_disable_unprepare(host->clk);
1502        return ret;
1503
1504err_clk:
1505        clk_disable_unprepare(host->clk);
1506        pm_runtime_put_sync(dev);
1507        pm_runtime_disable(dev);
1508err_host:
1509        mmc_free_host(mmc);
1510        return ret;
1511}
1512
1513static int sh_mmcif_remove(struct platform_device *pdev)
1514{
1515        struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1516
1517        host->dying = true;
1518        clk_prepare_enable(host->clk);
1519        pm_runtime_get_sync(&pdev->dev);
1520
1521        dev_pm_qos_hide_latency_limit(&pdev->dev);
1522
1523        mmc_remove_host(host->mmc);
1524        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1525
1526        /*
1527         * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1528         * mmc_remove_host() call above. But swapping order doesn't help either
1529         * (a query on the linux-mmc mailing list didn't bring any replies).
1530         */
1531        cancel_delayed_work_sync(&host->timeout_work);
1532
1533        clk_disable_unprepare(host->clk);
1534        mmc_free_host(host->mmc);
1535        pm_runtime_put_sync(&pdev->dev);
1536        pm_runtime_disable(&pdev->dev);
1537
1538        return 0;
1539}
1540
1541#ifdef CONFIG_PM_SLEEP
1542static int sh_mmcif_suspend(struct device *dev)
1543{
1544        struct sh_mmcif_host *host = dev_get_drvdata(dev);
1545
1546        pm_runtime_get_sync(dev);
1547        sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1548        pm_runtime_put(dev);
1549
1550        return 0;
1551}
1552
1553static int sh_mmcif_resume(struct device *dev)
1554{
1555        return 0;
1556}
1557#endif
1558
1559static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1560        SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1561};
1562
1563static struct platform_driver sh_mmcif_driver = {
1564        .probe          = sh_mmcif_probe,
1565        .remove         = sh_mmcif_remove,
1566        .driver         = {
1567                .name   = DRIVER_NAME,
1568                .pm     = &sh_mmcif_dev_pm_ops,
1569                .of_match_table = sh_mmcif_of_match,
1570        },
1571};
1572
1573module_platform_driver(sh_mmcif_driver);
1574
1575MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1576MODULE_LICENSE("GPL");
1577MODULE_ALIAS("platform:" DRIVER_NAME);
1578MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1579