linux/drivers/mmc/host/dw_mmc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Synopsys DesignWare Multimedia Card Interface driver
   4 *  (Based on NXP driver for lpc 31xx)
   5 *
   6 * Copyright (C) 2009 NXP Semiconductors
   7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
   8 */
   9
  10#include <linux/blkdev.h>
  11#include <linux/clk.h>
  12#include <linux/debugfs.h>
  13#include <linux/device.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/init.h>
  17#include <linux/interrupt.h>
  18#include <linux/iopoll.h>
  19#include <linux/ioport.h>
  20#include <linux/ktime.h>
  21#include <linux/module.h>
  22#include <linux/platform_device.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/prandom.h>
  25#include <linux/seq_file.h>
  26#include <linux/slab.h>
  27#include <linux/stat.h>
  28#include <linux/delay.h>
  29#include <linux/irq.h>
  30#include <linux/mmc/card.h>
  31#include <linux/mmc/host.h>
  32#include <linux/mmc/mmc.h>
  33#include <linux/mmc/sd.h>
  34#include <linux/mmc/sdio.h>
  35#include <linux/bitops.h>
  36#include <linux/regulator/consumer.h>
  37#include <linux/of.h>
  38#include <linux/of_gpio.h>
  39#include <linux/mmc/slot-gpio.h>
  40
  41#include "dw_mmc.h"
  42
  43/* Common flag combinations */
  44#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  45                                 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
  46                                 SDMMC_INT_EBE | SDMMC_INT_HLE)
  47#define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  48                                 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
  49#define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
  50                                 DW_MCI_CMD_ERROR_FLAGS)
  51#define DW_MCI_SEND_STATUS      1
  52#define DW_MCI_RECV_STATUS      2
  53#define DW_MCI_DMA_THRESHOLD    16
  54
  55#define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
  56#define DW_MCI_FREQ_MIN 100000          /* unit: HZ */
  57
  58#define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  59                                 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  60                                 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  61                                 SDMMC_IDMAC_INT_TI)
  62
  63#define DESC_RING_BUF_SZ        PAGE_SIZE
  64
  65struct idmac_desc_64addr {
  66        u32             des0;   /* Control Descriptor */
  67#define IDMAC_OWN_CLR64(x) \
  68        !((x) & cpu_to_le32(IDMAC_DES0_OWN))
  69
  70        u32             des1;   /* Reserved */
  71
  72        u32             des2;   /*Buffer sizes */
  73#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
  74        ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
  75         ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
  76
  77        u32             des3;   /* Reserved */
  78
  79        u32             des4;   /* Lower 32-bits of Buffer Address Pointer 1*/
  80        u32             des5;   /* Upper 32-bits of Buffer Address Pointer 1*/
  81
  82        u32             des6;   /* Lower 32-bits of Next Descriptor Address */
  83        u32             des7;   /* Upper 32-bits of Next Descriptor Address */
  84};
  85
  86struct idmac_desc {
  87        __le32          des0;   /* Control Descriptor */
  88#define IDMAC_DES0_DIC  BIT(1)
  89#define IDMAC_DES0_LD   BIT(2)
  90#define IDMAC_DES0_FD   BIT(3)
  91#define IDMAC_DES0_CH   BIT(4)
  92#define IDMAC_DES0_ER   BIT(5)
  93#define IDMAC_DES0_CES  BIT(30)
  94#define IDMAC_DES0_OWN  BIT(31)
  95
  96        __le32          des1;   /* Buffer sizes */
  97#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  98        ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
  99
 100        __le32          des2;   /* buffer 1 physical address */
 101
 102        __le32          des3;   /* buffer 2 physical address */
 103};
 104
 105/* Each descriptor can transfer up to 4KB of data in chained mode */
 106#define DW_MCI_DESC_DATA_LENGTH 0x1000
 107
 108#if defined(CONFIG_DEBUG_FS)
 109static int dw_mci_req_show(struct seq_file *s, void *v)
 110{
 111        struct dw_mci_slot *slot = s->private;
 112        struct mmc_request *mrq;
 113        struct mmc_command *cmd;
 114        struct mmc_command *stop;
 115        struct mmc_data *data;
 116
 117        /* Make sure we get a consistent snapshot */
 118        spin_lock_bh(&slot->host->lock);
 119        mrq = slot->mrq;
 120
 121        if (mrq) {
 122                cmd = mrq->cmd;
 123                data = mrq->data;
 124                stop = mrq->stop;
 125
 126                if (cmd)
 127                        seq_printf(s,
 128                                   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 129                                   cmd->opcode, cmd->arg, cmd->flags,
 130                                   cmd->resp[0], cmd->resp[1], cmd->resp[2],
 131                                   cmd->resp[2], cmd->error);
 132                if (data)
 133                        seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 134                                   data->bytes_xfered, data->blocks,
 135                                   data->blksz, data->flags, data->error);
 136                if (stop)
 137                        seq_printf(s,
 138                                   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 139                                   stop->opcode, stop->arg, stop->flags,
 140                                   stop->resp[0], stop->resp[1], stop->resp[2],
 141                                   stop->resp[2], stop->error);
 142        }
 143
 144        spin_unlock_bh(&slot->host->lock);
 145
 146        return 0;
 147}
 148DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
 149
 150static int dw_mci_regs_show(struct seq_file *s, void *v)
 151{
 152        struct dw_mci *host = s->private;
 153
 154        pm_runtime_get_sync(host->dev);
 155
 156        seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
 157        seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
 158        seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
 159        seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
 160        seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
 161        seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
 162
 163        pm_runtime_put_autosuspend(host->dev);
 164
 165        return 0;
 166}
 167DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
 168
 169static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
 170{
 171        struct mmc_host *mmc = slot->mmc;
 172        struct dw_mci *host = slot->host;
 173        struct dentry *root;
 174
 175        root = mmc->debugfs_root;
 176        if (!root)
 177                return;
 178
 179        debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
 180        debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
 181        debugfs_create_u32("state", S_IRUSR, root, &host->state);
 182        debugfs_create_xul("pending_events", S_IRUSR, root,
 183                           &host->pending_events);
 184        debugfs_create_xul("completed_events", S_IRUSR, root,
 185                           &host->completed_events);
 186#ifdef CONFIG_FAULT_INJECTION
 187        fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
 188#endif
 189}
 190#endif /* defined(CONFIG_DEBUG_FS) */
 191
 192static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
 193{
 194        u32 ctrl;
 195
 196        ctrl = mci_readl(host, CTRL);
 197        ctrl |= reset;
 198        mci_writel(host, CTRL, ctrl);
 199
 200        /* wait till resets clear */
 201        if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
 202                                      !(ctrl & reset),
 203                                      1, 500 * USEC_PER_MSEC)) {
 204                dev_err(host->dev,
 205                        "Timeout resetting block (ctrl reset %#x)\n",
 206                        ctrl & reset);
 207                return false;
 208        }
 209
 210        return true;
 211}
 212
 213static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
 214{
 215        u32 status;
 216
 217        /*
 218         * Databook says that before issuing a new data transfer command
 219         * we need to check to see if the card is busy.  Data transfer commands
 220         * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
 221         *
 222         * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
 223         * expected.
 224         */
 225        if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
 226            !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
 227                if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
 228                                              status,
 229                                              !(status & SDMMC_STATUS_BUSY),
 230                                              10, 500 * USEC_PER_MSEC))
 231                        dev_err(host->dev, "Busy; trying anyway\n");
 232        }
 233}
 234
 235static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
 236{
 237        struct dw_mci *host = slot->host;
 238        unsigned int cmd_status = 0;
 239
 240        mci_writel(host, CMDARG, arg);
 241        wmb(); /* drain writebuffer */
 242        dw_mci_wait_while_busy(host, cmd);
 243        mci_writel(host, CMD, SDMMC_CMD_START | cmd);
 244
 245        if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
 246                                      !(cmd_status & SDMMC_CMD_START),
 247                                      1, 500 * USEC_PER_MSEC))
 248                dev_err(&slot->mmc->class_dev,
 249                        "Timeout sending command (cmd %#x arg %#x status %#x)\n",
 250                        cmd, arg, cmd_status);
 251}
 252
 253static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 254{
 255        struct dw_mci_slot *slot = mmc_priv(mmc);
 256        struct dw_mci *host = slot->host;
 257        u32 cmdr;
 258
 259        cmd->error = -EINPROGRESS;
 260        cmdr = cmd->opcode;
 261
 262        if (cmd->opcode == MMC_STOP_TRANSMISSION ||
 263            cmd->opcode == MMC_GO_IDLE_STATE ||
 264            cmd->opcode == MMC_GO_INACTIVE_STATE ||
 265            (cmd->opcode == SD_IO_RW_DIRECT &&
 266             ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
 267                cmdr |= SDMMC_CMD_STOP;
 268        else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
 269                cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
 270
 271        if (cmd->opcode == SD_SWITCH_VOLTAGE) {
 272                u32 clk_en_a;
 273
 274                /* Special bit makes CMD11 not die */
 275                cmdr |= SDMMC_CMD_VOLT_SWITCH;
 276
 277                /* Change state to continue to handle CMD11 weirdness */
 278                WARN_ON(slot->host->state != STATE_SENDING_CMD);
 279                slot->host->state = STATE_SENDING_CMD11;
 280
 281                /*
 282                 * We need to disable low power mode (automatic clock stop)
 283                 * while doing voltage switch so we don't confuse the card,
 284                 * since stopping the clock is a specific part of the UHS
 285                 * voltage change dance.
 286                 *
 287                 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
 288                 * unconditionally turned back on in dw_mci_setup_bus() if it's
 289                 * ever called with a non-zero clock.  That shouldn't happen
 290                 * until the voltage change is all done.
 291                 */
 292                clk_en_a = mci_readl(host, CLKENA);
 293                clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
 294                mci_writel(host, CLKENA, clk_en_a);
 295                mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
 296                             SDMMC_CMD_PRV_DAT_WAIT, 0);
 297        }
 298
 299        if (cmd->flags & MMC_RSP_PRESENT) {
 300                /* We expect a response, so set this bit */
 301                cmdr |= SDMMC_CMD_RESP_EXP;
 302                if (cmd->flags & MMC_RSP_136)
 303                        cmdr |= SDMMC_CMD_RESP_LONG;
 304        }
 305
 306        if (cmd->flags & MMC_RSP_CRC)
 307                cmdr |= SDMMC_CMD_RESP_CRC;
 308
 309        if (cmd->data) {
 310                cmdr |= SDMMC_CMD_DAT_EXP;
 311                if (cmd->data->flags & MMC_DATA_WRITE)
 312                        cmdr |= SDMMC_CMD_DAT_WR;
 313        }
 314
 315        if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
 316                cmdr |= SDMMC_CMD_USE_HOLD_REG;
 317
 318        return cmdr;
 319}
 320
 321static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 322{
 323        struct mmc_command *stop;
 324        u32 cmdr;
 325
 326        if (!cmd->data)
 327                return 0;
 328
 329        stop = &host->stop_abort;
 330        cmdr = cmd->opcode;
 331        memset(stop, 0, sizeof(struct mmc_command));
 332
 333        if (cmdr == MMC_READ_SINGLE_BLOCK ||
 334            cmdr == MMC_READ_MULTIPLE_BLOCK ||
 335            cmdr == MMC_WRITE_BLOCK ||
 336            cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
 337            cmdr == MMC_SEND_TUNING_BLOCK ||
 338            cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
 339                stop->opcode = MMC_STOP_TRANSMISSION;
 340                stop->arg = 0;
 341                stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 342        } else if (cmdr == SD_IO_RW_EXTENDED) {
 343                stop->opcode = SD_IO_RW_DIRECT;
 344                stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
 345                             ((cmd->arg >> 28) & 0x7);
 346                stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
 347        } else {
 348                return 0;
 349        }
 350
 351        cmdr = stop->opcode | SDMMC_CMD_STOP |
 352                SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 353
 354        if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
 355                cmdr |= SDMMC_CMD_USE_HOLD_REG;
 356
 357        return cmdr;
 358}
 359
 360static inline void dw_mci_set_cto(struct dw_mci *host)
 361{
 362        unsigned int cto_clks;
 363        unsigned int cto_div;
 364        unsigned int cto_ms;
 365        unsigned long irqflags;
 366
 367        cto_clks = mci_readl(host, TMOUT) & 0xff;
 368        cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
 369        if (cto_div == 0)
 370                cto_div = 1;
 371
 372        cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
 373                                  host->bus_hz);
 374
 375        /* add a bit spare time */
 376        cto_ms += 10;
 377
 378        /*
 379         * The durations we're working with are fairly short so we have to be
 380         * extra careful about synchronization here.  Specifically in hardware a
 381         * command timeout is _at most_ 5.1 ms, so that means we expect an
 382         * interrupt (either command done or timeout) to come rather quickly
 383         * after the mci_writel.  ...but just in case we have a long interrupt
 384         * latency let's add a bit of paranoia.
 385         *
 386         * In general we'll assume that at least an interrupt will be asserted
 387         * in hardware by the time the cto_timer runs.  ...and if it hasn't
 388         * been asserted in hardware by that time then we'll assume it'll never
 389         * come.
 390         */
 391        spin_lock_irqsave(&host->irq_lock, irqflags);
 392        if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
 393                mod_timer(&host->cto_timer,
 394                        jiffies + msecs_to_jiffies(cto_ms) + 1);
 395        spin_unlock_irqrestore(&host->irq_lock, irqflags);
 396}
 397
 398static void dw_mci_start_command(struct dw_mci *host,
 399                                 struct mmc_command *cmd, u32 cmd_flags)
 400{
 401        host->cmd = cmd;
 402        dev_vdbg(host->dev,
 403                 "start command: ARGR=0x%08x CMDR=0x%08x\n",
 404                 cmd->arg, cmd_flags);
 405
 406        mci_writel(host, CMDARG, cmd->arg);
 407        wmb(); /* drain writebuffer */
 408        dw_mci_wait_while_busy(host, cmd_flags);
 409
 410        mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
 411
 412        /* response expected command only */
 413        if (cmd_flags & SDMMC_CMD_RESP_EXP)
 414                dw_mci_set_cto(host);
 415}
 416
 417static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 418{
 419        struct mmc_command *stop = &host->stop_abort;
 420
 421        dw_mci_start_command(host, stop, host->stop_cmdr);
 422}
 423
 424/* DMA interface functions */
 425static void dw_mci_stop_dma(struct dw_mci *host)
 426{
 427        if (host->using_dma) {
 428                host->dma_ops->stop(host);
 429                host->dma_ops->cleanup(host);
 430        }
 431
 432        /* Data transfer was stopped by the interrupt handler */
 433        set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 434}
 435
 436static void dw_mci_dma_cleanup(struct dw_mci *host)
 437{
 438        struct mmc_data *data = host->data;
 439
 440        if (data && data->host_cookie == COOKIE_MAPPED) {
 441                dma_unmap_sg(host->dev,
 442                             data->sg,
 443                             data->sg_len,
 444                             mmc_get_dma_dir(data));
 445                data->host_cookie = COOKIE_UNMAPPED;
 446        }
 447}
 448
 449static void dw_mci_idmac_reset(struct dw_mci *host)
 450{
 451        u32 bmod = mci_readl(host, BMOD);
 452        /* Software reset of DMA */
 453        bmod |= SDMMC_IDMAC_SWRESET;
 454        mci_writel(host, BMOD, bmod);
 455}
 456
 457static void dw_mci_idmac_stop_dma(struct dw_mci *host)
 458{
 459        u32 temp;
 460
 461        /* Disable and reset the IDMAC interface */
 462        temp = mci_readl(host, CTRL);
 463        temp &= ~SDMMC_CTRL_USE_IDMAC;
 464        temp |= SDMMC_CTRL_DMA_RESET;
 465        mci_writel(host, CTRL, temp);
 466
 467        /* Stop the IDMAC running */
 468        temp = mci_readl(host, BMOD);
 469        temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
 470        temp |= SDMMC_IDMAC_SWRESET;
 471        mci_writel(host, BMOD, temp);
 472}
 473
 474static void dw_mci_dmac_complete_dma(void *arg)
 475{
 476        struct dw_mci *host = arg;
 477        struct mmc_data *data = host->data;
 478
 479        dev_vdbg(host->dev, "DMA complete\n");
 480
 481        if ((host->use_dma == TRANS_MODE_EDMAC) &&
 482            data && (data->flags & MMC_DATA_READ))
 483                /* Invalidate cache after read */
 484                dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
 485                                    data->sg,
 486                                    data->sg_len,
 487                                    DMA_FROM_DEVICE);
 488
 489        host->dma_ops->cleanup(host);
 490
 491        /*
 492         * If the card was removed, data will be NULL. No point in trying to
 493         * send the stop command or waiting for NBUSY in this case.
 494         */
 495        if (data) {
 496                set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
 497                tasklet_schedule(&host->tasklet);
 498        }
 499}
 500
 501static int dw_mci_idmac_init(struct dw_mci *host)
 502{
 503        int i;
 504
 505        if (host->dma_64bit_address == 1) {
 506                struct idmac_desc_64addr *p;
 507                /* Number of descriptors in the ring buffer */
 508                host->ring_size =
 509                        DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
 510
 511                /* Forward link the descriptor list */
 512                for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
 513                                                                i++, p++) {
 514                        p->des6 = (host->sg_dma +
 515                                        (sizeof(struct idmac_desc_64addr) *
 516                                                        (i + 1))) & 0xffffffff;
 517
 518                        p->des7 = (u64)(host->sg_dma +
 519                                        (sizeof(struct idmac_desc_64addr) *
 520                                                        (i + 1))) >> 32;
 521                        /* Initialize reserved and buffer size fields to "0" */
 522                        p->des0 = 0;
 523                        p->des1 = 0;
 524                        p->des2 = 0;
 525                        p->des3 = 0;
 526                }
 527
 528                /* Set the last descriptor as the end-of-ring descriptor */
 529                p->des6 = host->sg_dma & 0xffffffff;
 530                p->des7 = (u64)host->sg_dma >> 32;
 531                p->des0 = IDMAC_DES0_ER;
 532
 533        } else {
 534                struct idmac_desc *p;
 535                /* Number of descriptors in the ring buffer */
 536                host->ring_size =
 537                        DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
 538
 539                /* Forward link the descriptor list */
 540                for (i = 0, p = host->sg_cpu;
 541                     i < host->ring_size - 1;
 542                     i++, p++) {
 543                        p->des3 = cpu_to_le32(host->sg_dma +
 544                                        (sizeof(struct idmac_desc) * (i + 1)));
 545                        p->des0 = 0;
 546                        p->des1 = 0;
 547                }
 548
 549                /* Set the last descriptor as the end-of-ring descriptor */
 550                p->des3 = cpu_to_le32(host->sg_dma);
 551                p->des0 = cpu_to_le32(IDMAC_DES0_ER);
 552        }
 553
 554        dw_mci_idmac_reset(host);
 555
 556        if (host->dma_64bit_address == 1) {
 557                /* Mask out interrupts - get Tx & Rx complete only */
 558                mci_writel(host, IDSTS64, IDMAC_INT_CLR);
 559                mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
 560                                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 561
 562                /* Set the descriptor base address */
 563                mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
 564                mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
 565
 566        } else {
 567                /* Mask out interrupts - get Tx & Rx complete only */
 568                mci_writel(host, IDSTS, IDMAC_INT_CLR);
 569                mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
 570                                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
 571
 572                /* Set the descriptor base address */
 573                mci_writel(host, DBADDR, host->sg_dma);
 574        }
 575
 576        return 0;
 577}
 578
 579static inline int dw_mci_prepare_desc64(struct dw_mci *host,
 580                                         struct mmc_data *data,
 581                                         unsigned int sg_len)
 582{
 583        unsigned int desc_len;
 584        struct idmac_desc_64addr *desc_first, *desc_last, *desc;
 585        u32 val;
 586        int i;
 587
 588        desc_first = desc_last = desc = host->sg_cpu;
 589
 590        for (i = 0; i < sg_len; i++) {
 591                unsigned int length = sg_dma_len(&data->sg[i]);
 592
 593                u64 mem_addr = sg_dma_address(&data->sg[i]);
 594
 595                for ( ; length ; desc++) {
 596                        desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 597                                   length : DW_MCI_DESC_DATA_LENGTH;
 598
 599                        length -= desc_len;
 600
 601                        /*
 602                         * Wait for the former clear OWN bit operation
 603                         * of IDMAC to make sure that this descriptor
 604                         * isn't still owned by IDMAC as IDMAC's write
 605                         * ops and CPU's read ops are asynchronous.
 606                         */
 607                        if (readl_poll_timeout_atomic(&desc->des0, val,
 608                                                !(val & IDMAC_DES0_OWN),
 609                                                10, 100 * USEC_PER_MSEC))
 610                                goto err_own_bit;
 611
 612                        /*
 613                         * Set the OWN bit and disable interrupts
 614                         * for this descriptor
 615                         */
 616                        desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
 617                                                IDMAC_DES0_CH;
 618
 619                        /* Buffer length */
 620                        IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
 621
 622                        /* Physical address to DMA to/from */
 623                        desc->des4 = mem_addr & 0xffffffff;
 624                        desc->des5 = mem_addr >> 32;
 625
 626                        /* Update physical address for the next desc */
 627                        mem_addr += desc_len;
 628
 629                        /* Save pointer to the last descriptor */
 630                        desc_last = desc;
 631                }
 632        }
 633
 634        /* Set first descriptor */
 635        desc_first->des0 |= IDMAC_DES0_FD;
 636
 637        /* Set last descriptor */
 638        desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 639        desc_last->des0 |= IDMAC_DES0_LD;
 640
 641        return 0;
 642err_own_bit:
 643        /* restore the descriptor chain as it's polluted */
 644        dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
 645        memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 646        dw_mci_idmac_init(host);
 647        return -EINVAL;
 648}
 649
 650
 651static inline int dw_mci_prepare_desc32(struct dw_mci *host,
 652                                         struct mmc_data *data,
 653                                         unsigned int sg_len)
 654{
 655        unsigned int desc_len;
 656        struct idmac_desc *desc_first, *desc_last, *desc;
 657        u32 val;
 658        int i;
 659
 660        desc_first = desc_last = desc = host->sg_cpu;
 661
 662        for (i = 0; i < sg_len; i++) {
 663                unsigned int length = sg_dma_len(&data->sg[i]);
 664
 665                u32 mem_addr = sg_dma_address(&data->sg[i]);
 666
 667                for ( ; length ; desc++) {
 668                        desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
 669                                   length : DW_MCI_DESC_DATA_LENGTH;
 670
 671                        length -= desc_len;
 672
 673                        /*
 674                         * Wait for the former clear OWN bit operation
 675                         * of IDMAC to make sure that this descriptor
 676                         * isn't still owned by IDMAC as IDMAC's write
 677                         * ops and CPU's read ops are asynchronous.
 678                         */
 679                        if (readl_poll_timeout_atomic(&desc->des0, val,
 680                                                      IDMAC_OWN_CLR64(val),
 681                                                      10,
 682                                                      100 * USEC_PER_MSEC))
 683                                goto err_own_bit;
 684
 685                        /*
 686                         * Set the OWN bit and disable interrupts
 687                         * for this descriptor
 688                         */
 689                        desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
 690                                                 IDMAC_DES0_DIC |
 691                                                 IDMAC_DES0_CH);
 692
 693                        /* Buffer length */
 694                        IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
 695
 696                        /* Physical address to DMA to/from */
 697                        desc->des2 = cpu_to_le32(mem_addr);
 698
 699                        /* Update physical address for the next desc */
 700                        mem_addr += desc_len;
 701
 702                        /* Save pointer to the last descriptor */
 703                        desc_last = desc;
 704                }
 705        }
 706
 707        /* Set first descriptor */
 708        desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
 709
 710        /* Set last descriptor */
 711        desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
 712                                       IDMAC_DES0_DIC));
 713        desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
 714
 715        return 0;
 716err_own_bit:
 717        /* restore the descriptor chain as it's polluted */
 718        dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
 719        memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 720        dw_mci_idmac_init(host);
 721        return -EINVAL;
 722}
 723
 724static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 725{
 726        u32 temp;
 727        int ret;
 728
 729        if (host->dma_64bit_address == 1)
 730                ret = dw_mci_prepare_desc64(host, host->data, sg_len);
 731        else
 732                ret = dw_mci_prepare_desc32(host, host->data, sg_len);
 733
 734        if (ret)
 735                goto out;
 736
 737        /* drain writebuffer */
 738        wmb();
 739
 740        /* Make sure to reset DMA in case we did PIO before this */
 741        dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
 742        dw_mci_idmac_reset(host);
 743
 744        /* Select IDMAC interface */
 745        temp = mci_readl(host, CTRL);
 746        temp |= SDMMC_CTRL_USE_IDMAC;
 747        mci_writel(host, CTRL, temp);
 748
 749        /* drain writebuffer */
 750        wmb();
 751
 752        /* Enable the IDMAC */
 753        temp = mci_readl(host, BMOD);
 754        temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
 755        mci_writel(host, BMOD, temp);
 756
 757        /* Start it running */
 758        mci_writel(host, PLDMND, 1);
 759
 760out:
 761        return ret;
 762}
 763
 764static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
 765        .init = dw_mci_idmac_init,
 766        .start = dw_mci_idmac_start_dma,
 767        .stop = dw_mci_idmac_stop_dma,
 768        .complete = dw_mci_dmac_complete_dma,
 769        .cleanup = dw_mci_dma_cleanup,
 770};
 771
 772static void dw_mci_edmac_stop_dma(struct dw_mci *host)
 773{
 774        dmaengine_terminate_async(host->dms->ch);
 775}
 776
 777static int dw_mci_edmac_start_dma(struct dw_mci *host,
 778                                            unsigned int sg_len)
 779{
 780        struct dma_slave_config cfg;
 781        struct dma_async_tx_descriptor *desc = NULL;
 782        struct scatterlist *sgl = host->data->sg;
 783        static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 784        u32 sg_elems = host->data->sg_len;
 785        u32 fifoth_val;
 786        u32 fifo_offset = host->fifo_reg - host->regs;
 787        int ret = 0;
 788
 789        /* Set external dma config: burst size, burst width */
 790        memset(&cfg, 0, sizeof(cfg));
 791        cfg.dst_addr = host->phy_regs + fifo_offset;
 792        cfg.src_addr = cfg.dst_addr;
 793        cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 794        cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 795
 796        /* Match burst msize with external dma config */
 797        fifoth_val = mci_readl(host, FIFOTH);
 798        cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
 799        cfg.src_maxburst = cfg.dst_maxburst;
 800
 801        if (host->data->flags & MMC_DATA_WRITE)
 802                cfg.direction = DMA_MEM_TO_DEV;
 803        else
 804                cfg.direction = DMA_DEV_TO_MEM;
 805
 806        ret = dmaengine_slave_config(host->dms->ch, &cfg);
 807        if (ret) {
 808                dev_err(host->dev, "Failed to config edmac.\n");
 809                return -EBUSY;
 810        }
 811
 812        desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
 813                                       sg_len, cfg.direction,
 814                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 815        if (!desc) {
 816                dev_err(host->dev, "Can't prepare slave sg.\n");
 817                return -EBUSY;
 818        }
 819
 820        /* Set dw_mci_dmac_complete_dma as callback */
 821        desc->callback = dw_mci_dmac_complete_dma;
 822        desc->callback_param = (void *)host;
 823        dmaengine_submit(desc);
 824
 825        /* Flush cache before write */
 826        if (host->data->flags & MMC_DATA_WRITE)
 827                dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
 828                                       sg_elems, DMA_TO_DEVICE);
 829
 830        dma_async_issue_pending(host->dms->ch);
 831
 832        return 0;
 833}
 834
 835static int dw_mci_edmac_init(struct dw_mci *host)
 836{
 837        /* Request external dma channel */
 838        host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
 839        if (!host->dms)
 840                return -ENOMEM;
 841
 842        host->dms->ch = dma_request_chan(host->dev, "rx-tx");
 843        if (IS_ERR(host->dms->ch)) {
 844                int ret = PTR_ERR(host->dms->ch);
 845
 846                dev_err(host->dev, "Failed to get external DMA channel.\n");
 847                kfree(host->dms);
 848                host->dms = NULL;
 849                return ret;
 850        }
 851
 852        return 0;
 853}
 854
 855static void dw_mci_edmac_exit(struct dw_mci *host)
 856{
 857        if (host->dms) {
 858                if (host->dms->ch) {
 859                        dma_release_channel(host->dms->ch);
 860                        host->dms->ch = NULL;
 861                }
 862                kfree(host->dms);
 863                host->dms = NULL;
 864        }
 865}
 866
 867static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
 868        .init = dw_mci_edmac_init,
 869        .exit = dw_mci_edmac_exit,
 870        .start = dw_mci_edmac_start_dma,
 871        .stop = dw_mci_edmac_stop_dma,
 872        .complete = dw_mci_dmac_complete_dma,
 873        .cleanup = dw_mci_dma_cleanup,
 874};
 875
 876static int dw_mci_pre_dma_transfer(struct dw_mci *host,
 877                                   struct mmc_data *data,
 878                                   int cookie)
 879{
 880        struct scatterlist *sg;
 881        unsigned int i, sg_len;
 882
 883        if (data->host_cookie == COOKIE_PRE_MAPPED)
 884                return data->sg_len;
 885
 886        /*
 887         * We don't do DMA on "complex" transfers, i.e. with
 888         * non-word-aligned buffers or lengths. Also, we don't bother
 889         * with all the DMA setup overhead for short transfers.
 890         */
 891        if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 892                return -EINVAL;
 893
 894        if (data->blksz & 3)
 895                return -EINVAL;
 896
 897        for_each_sg(data->sg, sg, data->sg_len, i) {
 898                if (sg->offset & 3 || sg->length & 3)
 899                        return -EINVAL;
 900        }
 901
 902        sg_len = dma_map_sg(host->dev,
 903                            data->sg,
 904                            data->sg_len,
 905                            mmc_get_dma_dir(data));
 906        if (sg_len == 0)
 907                return -EINVAL;
 908
 909        data->host_cookie = cookie;
 910
 911        return sg_len;
 912}
 913
 914static void dw_mci_pre_req(struct mmc_host *mmc,
 915                           struct mmc_request *mrq)
 916{
 917        struct dw_mci_slot *slot = mmc_priv(mmc);
 918        struct mmc_data *data = mrq->data;
 919
 920        if (!slot->host->use_dma || !data)
 921                return;
 922
 923        /* This data might be unmapped at this time */
 924        data->host_cookie = COOKIE_UNMAPPED;
 925
 926        if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
 927                                COOKIE_PRE_MAPPED) < 0)
 928                data->host_cookie = COOKIE_UNMAPPED;
 929}
 930
 931static void dw_mci_post_req(struct mmc_host *mmc,
 932                            struct mmc_request *mrq,
 933                            int err)
 934{
 935        struct dw_mci_slot *slot = mmc_priv(mmc);
 936        struct mmc_data *data = mrq->data;
 937
 938        if (!slot->host->use_dma || !data)
 939                return;
 940
 941        if (data->host_cookie != COOKIE_UNMAPPED)
 942                dma_unmap_sg(slot->host->dev,
 943                             data->sg,
 944                             data->sg_len,
 945                             mmc_get_dma_dir(data));
 946        data->host_cookie = COOKIE_UNMAPPED;
 947}
 948
 949static int dw_mci_get_cd(struct mmc_host *mmc)
 950{
 951        int present;
 952        struct dw_mci_slot *slot = mmc_priv(mmc);
 953        struct dw_mci *host = slot->host;
 954        int gpio_cd = mmc_gpio_get_cd(mmc);
 955
 956        /* Use platform get_cd function, else try onboard card detect */
 957        if (((mmc->caps & MMC_CAP_NEEDS_POLL)
 958                                || !mmc_card_is_removable(mmc))) {
 959                present = 1;
 960
 961                if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 962                        if (mmc->caps & MMC_CAP_NEEDS_POLL) {
 963                                dev_info(&mmc->class_dev,
 964                                        "card is polling.\n");
 965                        } else {
 966                                dev_info(&mmc->class_dev,
 967                                        "card is non-removable.\n");
 968                        }
 969                        set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
 970                }
 971
 972                return present;
 973        } else if (gpio_cd >= 0)
 974                present = gpio_cd;
 975        else
 976                present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
 977                        == 0 ? 1 : 0;
 978
 979        spin_lock_bh(&host->lock);
 980        if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
 981                dev_dbg(&mmc->class_dev, "card is present\n");
 982        else if (!present &&
 983                        !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
 984                dev_dbg(&mmc->class_dev, "card is not present\n");
 985        spin_unlock_bh(&host->lock);
 986
 987        return present;
 988}
 989
 990static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
 991{
 992        unsigned int blksz = data->blksz;
 993        static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
 994        u32 fifo_width = 1 << host->data_shift;
 995        u32 blksz_depth = blksz / fifo_width, fifoth_val;
 996        u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
 997        int idx = ARRAY_SIZE(mszs) - 1;
 998
 999        /* pio should ship this scenario */
1000        if (!host->use_dma)
1001                return;
1002
1003        tx_wmark = (host->fifo_depth) / 2;
1004        tx_wmark_invers = host->fifo_depth - tx_wmark;
1005
1006        /*
1007         * MSIZE is '1',
1008         * if blksz is not a multiple of the FIFO width
1009         */
1010        if (blksz % fifo_width)
1011                goto done;
1012
1013        do {
1014                if (!((blksz_depth % mszs[idx]) ||
1015                     (tx_wmark_invers % mszs[idx]))) {
1016                        msize = idx;
1017                        rx_wmark = mszs[idx] - 1;
1018                        break;
1019                }
1020        } while (--idx > 0);
1021        /*
1022         * If idx is '0', it won't be tried
1023         * Thus, initial values are uesed
1024         */
1025done:
1026        fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1027        mci_writel(host, FIFOTH, fifoth_val);
1028}
1029
1030static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1031{
1032        unsigned int blksz = data->blksz;
1033        u32 blksz_depth, fifo_depth;
1034        u16 thld_size;
1035        u8 enable;
1036
1037        /*
1038         * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1039         * in the FIFO region, so we really shouldn't access it).
1040         */
1041        if (host->verid < DW_MMC_240A ||
1042                (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1043                return;
1044
1045        /*
1046         * Card write Threshold is introduced since 2.80a
1047         * It's used when HS400 mode is enabled.
1048         */
1049        if (data->flags & MMC_DATA_WRITE &&
1050                host->timing != MMC_TIMING_MMC_HS400)
1051                goto disable;
1052
1053        if (data->flags & MMC_DATA_WRITE)
1054                enable = SDMMC_CARD_WR_THR_EN;
1055        else
1056                enable = SDMMC_CARD_RD_THR_EN;
1057
1058        if (host->timing != MMC_TIMING_MMC_HS200 &&
1059            host->timing != MMC_TIMING_UHS_SDR104 &&
1060            host->timing != MMC_TIMING_MMC_HS400)
1061                goto disable;
1062
1063        blksz_depth = blksz / (1 << host->data_shift);
1064        fifo_depth = host->fifo_depth;
1065
1066        if (blksz_depth > fifo_depth)
1067                goto disable;
1068
1069        /*
1070         * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1071         * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
1072         * Currently just choose blksz.
1073         */
1074        thld_size = blksz;
1075        mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1076        return;
1077
1078disable:
1079        mci_writel(host, CDTHRCTL, 0);
1080}
1081
1082static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1083{
1084        unsigned long irqflags;
1085        int sg_len;
1086        u32 temp;
1087
1088        host->using_dma = 0;
1089
1090        /* If we don't have a channel, we can't do DMA */
1091        if (!host->use_dma)
1092                return -ENODEV;
1093
1094        sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1095        if (sg_len < 0) {
1096                host->dma_ops->stop(host);
1097                return sg_len;
1098        }
1099
1100        host->using_dma = 1;
1101
1102        if (host->use_dma == TRANS_MODE_IDMAC)
1103                dev_vdbg(host->dev,
1104                         "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1105                         (unsigned long)host->sg_cpu,
1106                         (unsigned long)host->sg_dma,
1107                         sg_len);
1108
1109        /*
1110         * Decide the MSIZE and RX/TX Watermark.
1111         * If current block size is same with previous size,
1112         * no need to update fifoth.
1113         */
1114        if (host->prev_blksz != data->blksz)
1115                dw_mci_adjust_fifoth(host, data);
1116
1117        /* Enable the DMA interface */
1118        temp = mci_readl(host, CTRL);
1119        temp |= SDMMC_CTRL_DMA_ENABLE;
1120        mci_writel(host, CTRL, temp);
1121
1122        /* Disable RX/TX IRQs, let DMA handle it */
1123        spin_lock_irqsave(&host->irq_lock, irqflags);
1124        temp = mci_readl(host, INTMASK);
1125        temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1126        mci_writel(host, INTMASK, temp);
1127        spin_unlock_irqrestore(&host->irq_lock, irqflags);
1128
1129        if (host->dma_ops->start(host, sg_len)) {
1130                host->dma_ops->stop(host);
1131                /* We can't do DMA, try PIO for this one */
1132                dev_dbg(host->dev,
1133                        "%s: fall back to PIO mode for current transfer\n",
1134                        __func__);
1135                return -ENODEV;
1136        }
1137
1138        return 0;
1139}
1140
1141static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1142{
1143        unsigned long irqflags;
1144        int flags = SG_MITER_ATOMIC;
1145        u32 temp;
1146
1147        data->error = -EINPROGRESS;
1148
1149        WARN_ON(host->data);
1150        host->sg = NULL;
1151        host->data = data;
1152
1153        if (data->flags & MMC_DATA_READ)
1154                host->dir_status = DW_MCI_RECV_STATUS;
1155        else
1156                host->dir_status = DW_MCI_SEND_STATUS;
1157
1158        dw_mci_ctrl_thld(host, data);
1159
1160        if (dw_mci_submit_data_dma(host, data)) {
1161                if (host->data->flags & MMC_DATA_READ)
1162                        flags |= SG_MITER_TO_SG;
1163                else
1164                        flags |= SG_MITER_FROM_SG;
1165
1166                sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1167                host->sg = data->sg;
1168                host->part_buf_start = 0;
1169                host->part_buf_count = 0;
1170
1171                mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1172
1173                spin_lock_irqsave(&host->irq_lock, irqflags);
1174                temp = mci_readl(host, INTMASK);
1175                temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1176                mci_writel(host, INTMASK, temp);
1177                spin_unlock_irqrestore(&host->irq_lock, irqflags);
1178
1179                temp = mci_readl(host, CTRL);
1180                temp &= ~SDMMC_CTRL_DMA_ENABLE;
1181                mci_writel(host, CTRL, temp);
1182
1183                /*
1184                 * Use the initial fifoth_val for PIO mode. If wm_algined
1185                 * is set, we set watermark same as data size.
1186                 * If next issued data may be transfered by DMA mode,
1187                 * prev_blksz should be invalidated.
1188                 */
1189                if (host->wm_aligned)
1190                        dw_mci_adjust_fifoth(host, data);
1191                else
1192                        mci_writel(host, FIFOTH, host->fifoth_val);
1193                host->prev_blksz = 0;
1194        } else {
1195                /*
1196                 * Keep the current block size.
1197                 * It will be used to decide whether to update
1198                 * fifoth register next time.
1199                 */
1200                host->prev_blksz = data->blksz;
1201        }
1202}
1203
1204static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1205{
1206        struct dw_mci *host = slot->host;
1207        unsigned int clock = slot->clock;
1208        u32 div;
1209        u32 clk_en_a;
1210        u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1211
1212        /* We must continue to set bit 28 in CMD until the change is complete */
1213        if (host->state == STATE_WAITING_CMD11_DONE)
1214                sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1215
1216        slot->mmc->actual_clock = 0;
1217
1218        if (!clock) {
1219                mci_writel(host, CLKENA, 0);
1220                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1221        } else if (clock != host->current_speed || force_clkinit) {
1222                div = host->bus_hz / clock;
1223                if (host->bus_hz % clock && host->bus_hz > clock)
1224                        /*
1225                         * move the + 1 after the divide to prevent
1226                         * over-clocking the card.
1227                         */
1228                        div += 1;
1229
1230                div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1231
1232                if ((clock != slot->__clk_old &&
1233                        !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1234                        force_clkinit) {
1235                        /* Silent the verbose log if calling from PM context */
1236                        if (!force_clkinit)
1237                                dev_info(&slot->mmc->class_dev,
1238                                         "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1239                                         slot->id, host->bus_hz, clock,
1240                                         div ? ((host->bus_hz / div) >> 1) :
1241                                         host->bus_hz, div);
1242
1243                        /*
1244                         * If card is polling, display the message only
1245                         * one time at boot time.
1246                         */
1247                        if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1248                                        slot->mmc->f_min == clock)
1249                                set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1250                }
1251
1252                /* disable clock */
1253                mci_writel(host, CLKENA, 0);
1254                mci_writel(host, CLKSRC, 0);
1255
1256                /* inform CIU */
1257                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1258
1259                /* set clock to desired speed */
1260                mci_writel(host, CLKDIV, div);
1261
1262                /* inform CIU */
1263                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1264
1265                /* enable clock; only low power if no SDIO */
1266                clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1267                if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1268                        clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1269                mci_writel(host, CLKENA, clk_en_a);
1270
1271                /* inform CIU */
1272                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1273
1274                /* keep the last clock value that was requested from core */
1275                slot->__clk_old = clock;
1276                slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1277                                          host->bus_hz;
1278        }
1279
1280        host->current_speed = clock;
1281
1282        /* Set the current slot bus width */
1283        mci_writel(host, CTYPE, (slot->ctype << slot->id));
1284}
1285
1286static void __dw_mci_start_request(struct dw_mci *host,
1287                                   struct dw_mci_slot *slot,
1288                                   struct mmc_command *cmd)
1289{
1290        struct mmc_request *mrq;
1291        struct mmc_data *data;
1292        u32 cmdflags;
1293
1294        mrq = slot->mrq;
1295
1296        host->mrq = mrq;
1297
1298        host->pending_events = 0;
1299        host->completed_events = 0;
1300        host->cmd_status = 0;
1301        host->data_status = 0;
1302        host->dir_status = 0;
1303
1304        data = cmd->data;
1305        if (data) {
1306                mci_writel(host, TMOUT, 0xFFFFFFFF);
1307                mci_writel(host, BYTCNT, data->blksz*data->blocks);
1308                mci_writel(host, BLKSIZ, data->blksz);
1309        }
1310
1311        cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1312
1313        /* this is the first command, send the initialization clock */
1314        if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1315                cmdflags |= SDMMC_CMD_INIT;
1316
1317        if (data) {
1318                dw_mci_submit_data(host, data);
1319                wmb(); /* drain writebuffer */
1320        }
1321
1322        dw_mci_start_command(host, cmd, cmdflags);
1323
1324        if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1325                unsigned long irqflags;
1326
1327                /*
1328                 * Databook says to fail after 2ms w/ no response, but evidence
1329                 * shows that sometimes the cmd11 interrupt takes over 130ms.
1330                 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1331                 * is just about to roll over.
1332                 *
1333                 * We do this whole thing under spinlock and only if the
1334                 * command hasn't already completed (indicating the the irq
1335                 * already ran so we don't want the timeout).
1336                 */
1337                spin_lock_irqsave(&host->irq_lock, irqflags);
1338                if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1339                        mod_timer(&host->cmd11_timer,
1340                                jiffies + msecs_to_jiffies(500) + 1);
1341                spin_unlock_irqrestore(&host->irq_lock, irqflags);
1342        }
1343
1344        host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1345}
1346
1347static void dw_mci_start_request(struct dw_mci *host,
1348                                 struct dw_mci_slot *slot)
1349{
1350        struct mmc_request *mrq = slot->mrq;
1351        struct mmc_command *cmd;
1352
1353        cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1354        __dw_mci_start_request(host, slot, cmd);
1355}
1356
1357/* must be called with host->lock held */
1358static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1359                                 struct mmc_request *mrq)
1360{
1361        dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1362                 host->state);
1363
1364        slot->mrq = mrq;
1365
1366        if (host->state == STATE_WAITING_CMD11_DONE) {
1367                dev_warn(&slot->mmc->class_dev,
1368                         "Voltage change didn't complete\n");
1369                /*
1370                 * this case isn't expected to happen, so we can
1371                 * either crash here or just try to continue on
1372                 * in the closest possible state
1373                 */
1374                host->state = STATE_IDLE;
1375        }
1376
1377        if (host->state == STATE_IDLE) {
1378                host->state = STATE_SENDING_CMD;
1379                dw_mci_start_request(host, slot);
1380        } else {
1381                list_add_tail(&slot->queue_node, &host->queue);
1382        }
1383}
1384
1385static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1386{
1387        struct dw_mci_slot *slot = mmc_priv(mmc);
1388        struct dw_mci *host = slot->host;
1389
1390        WARN_ON(slot->mrq);
1391
1392        /*
1393         * The check for card presence and queueing of the request must be
1394         * atomic, otherwise the card could be removed in between and the
1395         * request wouldn't fail until another card was inserted.
1396         */
1397
1398        if (!dw_mci_get_cd(mmc)) {
1399                mrq->cmd->error = -ENOMEDIUM;
1400                mmc_request_done(mmc, mrq);
1401                return;
1402        }
1403
1404        spin_lock_bh(&host->lock);
1405
1406        dw_mci_queue_request(host, slot, mrq);
1407
1408        spin_unlock_bh(&host->lock);
1409}
1410
1411static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1412{
1413        struct dw_mci_slot *slot = mmc_priv(mmc);
1414        const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1415        u32 regs;
1416        int ret;
1417
1418        switch (ios->bus_width) {
1419        case MMC_BUS_WIDTH_4:
1420                slot->ctype = SDMMC_CTYPE_4BIT;
1421                break;
1422        case MMC_BUS_WIDTH_8:
1423                slot->ctype = SDMMC_CTYPE_8BIT;
1424                break;
1425        default:
1426                /* set default 1 bit mode */
1427                slot->ctype = SDMMC_CTYPE_1BIT;
1428        }
1429
1430        regs = mci_readl(slot->host, UHS_REG);
1431
1432        /* DDR mode set */
1433        if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1434            ios->timing == MMC_TIMING_UHS_DDR50 ||
1435            ios->timing == MMC_TIMING_MMC_HS400)
1436                regs |= ((0x1 << slot->id) << 16);
1437        else
1438                regs &= ~((0x1 << slot->id) << 16);
1439
1440        mci_writel(slot->host, UHS_REG, regs);
1441        slot->host->timing = ios->timing;
1442
1443        /*
1444         * Use mirror of ios->clock to prevent race with mmc
1445         * core ios update when finding the minimum.
1446         */
1447        slot->clock = ios->clock;
1448
1449        if (drv_data && drv_data->set_ios)
1450                drv_data->set_ios(slot->host, ios);
1451
1452        switch (ios->power_mode) {
1453        case MMC_POWER_UP:
1454                if (!IS_ERR(mmc->supply.vmmc)) {
1455                        ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1456                                        ios->vdd);
1457                        if (ret) {
1458                                dev_err(slot->host->dev,
1459                                        "failed to enable vmmc regulator\n");
1460                                /*return, if failed turn on vmmc*/
1461                                return;
1462                        }
1463                }
1464                set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1465                regs = mci_readl(slot->host, PWREN);
1466                regs |= (1 << slot->id);
1467                mci_writel(slot->host, PWREN, regs);
1468                break;
1469        case MMC_POWER_ON:
1470                if (!slot->host->vqmmc_enabled) {
1471                        if (!IS_ERR(mmc->supply.vqmmc)) {
1472                                ret = regulator_enable(mmc->supply.vqmmc);
1473                                if (ret < 0)
1474                                        dev_err(slot->host->dev,
1475                                                "failed to enable vqmmc\n");
1476                                else
1477                                        slot->host->vqmmc_enabled = true;
1478
1479                        } else {
1480                                /* Keep track so we don't reset again */
1481                                slot->host->vqmmc_enabled = true;
1482                        }
1483
1484                        /* Reset our state machine after powering on */
1485                        dw_mci_ctrl_reset(slot->host,
1486                                          SDMMC_CTRL_ALL_RESET_FLAGS);
1487                }
1488
1489                /* Adjust clock / bus width after power is up */
1490                dw_mci_setup_bus(slot, false);
1491
1492                break;
1493        case MMC_POWER_OFF:
1494                /* Turn clock off before power goes down */
1495                dw_mci_setup_bus(slot, false);
1496
1497                if (!IS_ERR(mmc->supply.vmmc))
1498                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1499
1500                if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1501                        regulator_disable(mmc->supply.vqmmc);
1502                slot->host->vqmmc_enabled = false;
1503
1504                regs = mci_readl(slot->host, PWREN);
1505                regs &= ~(1 << slot->id);
1506                mci_writel(slot->host, PWREN, regs);
1507                break;
1508        default:
1509                break;
1510        }
1511
1512        if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1513                slot->host->state = STATE_IDLE;
1514}
1515
1516static int dw_mci_card_busy(struct mmc_host *mmc)
1517{
1518        struct dw_mci_slot *slot = mmc_priv(mmc);
1519        u32 status;
1520
1521        /*
1522         * Check the busy bit which is low when DAT[3:0]
1523         * (the data lines) are 0000
1524         */
1525        status = mci_readl(slot->host, STATUS);
1526
1527        return !!(status & SDMMC_STATUS_BUSY);
1528}
1529
1530static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1531{
1532        struct dw_mci_slot *slot = mmc_priv(mmc);
1533        struct dw_mci *host = slot->host;
1534        const struct dw_mci_drv_data *drv_data = host->drv_data;
1535        u32 uhs;
1536        u32 v18 = SDMMC_UHS_18V << slot->id;
1537        int ret;
1538
1539        if (drv_data && drv_data->switch_voltage)
1540                return drv_data->switch_voltage(mmc, ios);
1541
1542        /*
1543         * Program the voltage.  Note that some instances of dw_mmc may use
1544         * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1545         * does no harm but you need to set the regulator directly.  Try both.
1546         */
1547        uhs = mci_readl(host, UHS_REG);
1548        if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1549                uhs &= ~v18;
1550        else
1551                uhs |= v18;
1552
1553        if (!IS_ERR(mmc->supply.vqmmc)) {
1554                ret = mmc_regulator_set_vqmmc(mmc, ios);
1555                if (ret < 0) {
1556                        dev_dbg(&mmc->class_dev,
1557                                         "Regulator set error %d - %s V\n",
1558                                         ret, uhs & v18 ? "1.8" : "3.3");
1559                        return ret;
1560                }
1561        }
1562        mci_writel(host, UHS_REG, uhs);
1563
1564        return 0;
1565}
1566
1567static int dw_mci_get_ro(struct mmc_host *mmc)
1568{
1569        int read_only;
1570        struct dw_mci_slot *slot = mmc_priv(mmc);
1571        int gpio_ro = mmc_gpio_get_ro(mmc);
1572
1573        /* Use platform get_ro function, else try on board write protect */
1574        if (gpio_ro >= 0)
1575                read_only = gpio_ro;
1576        else
1577                read_only =
1578                        mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1579
1580        dev_dbg(&mmc->class_dev, "card is %s\n",
1581                read_only ? "read-only" : "read-write");
1582
1583        return read_only;
1584}
1585
1586static void dw_mci_hw_reset(struct mmc_host *mmc)
1587{
1588        struct dw_mci_slot *slot = mmc_priv(mmc);
1589        struct dw_mci *host = slot->host;
1590        int reset;
1591
1592        if (host->use_dma == TRANS_MODE_IDMAC)
1593                dw_mci_idmac_reset(host);
1594
1595        if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1596                                     SDMMC_CTRL_FIFO_RESET))
1597                return;
1598
1599        /*
1600         * According to eMMC spec, card reset procedure:
1601         * tRstW >= 1us:   RST_n pulse width
1602         * tRSCA >= 200us: RST_n to Command time
1603         * tRSTH >= 1us:   RST_n high period
1604         */
1605        reset = mci_readl(host, RST_N);
1606        reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1607        mci_writel(host, RST_N, reset);
1608        usleep_range(1, 2);
1609        reset |= SDMMC_RST_HWACTIVE << slot->id;
1610        mci_writel(host, RST_N, reset);
1611        usleep_range(200, 300);
1612}
1613
1614static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1615{
1616        struct dw_mci_slot *slot = mmc_priv(mmc);
1617        struct dw_mci *host = slot->host;
1618
1619        /*
1620         * Low power mode will stop the card clock when idle.  According to the
1621         * description of the CLKENA register we should disable low power mode
1622         * for SDIO cards if we need SDIO interrupts to work.
1623         */
1624        if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1625                const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1626                u32 clk_en_a_old;
1627                u32 clk_en_a;
1628
1629                clk_en_a_old = mci_readl(host, CLKENA);
1630
1631                if (card->type == MMC_TYPE_SDIO ||
1632                    card->type == MMC_TYPE_SD_COMBO) {
1633                        set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1634                        clk_en_a = clk_en_a_old & ~clken_low_pwr;
1635                } else {
1636                        clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1637                        clk_en_a = clk_en_a_old | clken_low_pwr;
1638                }
1639
1640                if (clk_en_a != clk_en_a_old) {
1641                        mci_writel(host, CLKENA, clk_en_a);
1642                        mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1643                                     SDMMC_CMD_PRV_DAT_WAIT, 0);
1644                }
1645        }
1646}
1647
1648static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1649{
1650        struct dw_mci *host = slot->host;
1651        unsigned long irqflags;
1652        u32 int_mask;
1653
1654        spin_lock_irqsave(&host->irq_lock, irqflags);
1655
1656        /* Enable/disable Slot Specific SDIO interrupt */
1657        int_mask = mci_readl(host, INTMASK);
1658        if (enb)
1659                int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1660        else
1661                int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1662        mci_writel(host, INTMASK, int_mask);
1663
1664        spin_unlock_irqrestore(&host->irq_lock, irqflags);
1665}
1666
1667static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1668{
1669        struct dw_mci_slot *slot = mmc_priv(mmc);
1670        struct dw_mci *host = slot->host;
1671
1672        __dw_mci_enable_sdio_irq(slot, enb);
1673
1674        /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1675        if (enb)
1676                pm_runtime_get_noresume(host->dev);
1677        else
1678                pm_runtime_put_noidle(host->dev);
1679}
1680
1681static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1682{
1683        struct dw_mci_slot *slot = mmc_priv(mmc);
1684
1685        __dw_mci_enable_sdio_irq(slot, 1);
1686}
1687
1688static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1689{
1690        struct dw_mci_slot *slot = mmc_priv(mmc);
1691        struct dw_mci *host = slot->host;
1692        const struct dw_mci_drv_data *drv_data = host->drv_data;
1693        int err = -EINVAL;
1694
1695        if (drv_data && drv_data->execute_tuning)
1696                err = drv_data->execute_tuning(slot, opcode);
1697        return err;
1698}
1699
1700static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1701                                       struct mmc_ios *ios)
1702{
1703        struct dw_mci_slot *slot = mmc_priv(mmc);
1704        struct dw_mci *host = slot->host;
1705        const struct dw_mci_drv_data *drv_data = host->drv_data;
1706
1707        if (drv_data && drv_data->prepare_hs400_tuning)
1708                return drv_data->prepare_hs400_tuning(host, ios);
1709
1710        return 0;
1711}
1712
1713static bool dw_mci_reset(struct dw_mci *host)
1714{
1715        u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1716        bool ret = false;
1717        u32 status = 0;
1718
1719        /*
1720         * Resetting generates a block interrupt, hence setting
1721         * the scatter-gather pointer to NULL.
1722         */
1723        if (host->sg) {
1724                sg_miter_stop(&host->sg_miter);
1725                host->sg = NULL;
1726        }
1727
1728        if (host->use_dma)
1729                flags |= SDMMC_CTRL_DMA_RESET;
1730
1731        if (dw_mci_ctrl_reset(host, flags)) {
1732                /*
1733                 * In all cases we clear the RAWINTS
1734                 * register to clear any interrupts.
1735                 */
1736                mci_writel(host, RINTSTS, 0xFFFFFFFF);
1737
1738                if (!host->use_dma) {
1739                        ret = true;
1740                        goto ciu_out;
1741                }
1742
1743                /* Wait for dma_req to be cleared */
1744                if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1745                                              status,
1746                                              !(status & SDMMC_STATUS_DMA_REQ),
1747                                              1, 500 * USEC_PER_MSEC)) {
1748                        dev_err(host->dev,
1749                                "%s: Timeout waiting for dma_req to be cleared\n",
1750                                __func__);
1751                        goto ciu_out;
1752                }
1753
1754                /* when using DMA next we reset the fifo again */
1755                if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1756                        goto ciu_out;
1757        } else {
1758                /* if the controller reset bit did clear, then set clock regs */
1759                if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1760                        dev_err(host->dev,
1761                                "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1762                                __func__);
1763                        goto ciu_out;
1764                }
1765        }
1766
1767        if (host->use_dma == TRANS_MODE_IDMAC)
1768                /* It is also required that we reinit idmac */
1769                dw_mci_idmac_init(host);
1770
1771        ret = true;
1772
1773ciu_out:
1774        /* After a CTRL reset we need to have CIU set clock registers  */
1775        mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1776
1777        return ret;
1778}
1779
1780static const struct mmc_host_ops dw_mci_ops = {
1781        .request                = dw_mci_request,
1782        .pre_req                = dw_mci_pre_req,
1783        .post_req               = dw_mci_post_req,
1784        .set_ios                = dw_mci_set_ios,
1785        .get_ro                 = dw_mci_get_ro,
1786        .get_cd                 = dw_mci_get_cd,
1787        .hw_reset               = dw_mci_hw_reset,
1788        .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1789        .ack_sdio_irq           = dw_mci_ack_sdio_irq,
1790        .execute_tuning         = dw_mci_execute_tuning,
1791        .card_busy              = dw_mci_card_busy,
1792        .start_signal_voltage_switch = dw_mci_switch_voltage,
1793        .init_card              = dw_mci_init_card,
1794        .prepare_hs400_tuning   = dw_mci_prepare_hs400_tuning,
1795};
1796
1797#ifdef CONFIG_FAULT_INJECTION
1798static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
1799{
1800        struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
1801        unsigned long flags;
1802
1803        spin_lock_irqsave(&host->irq_lock, flags);
1804
1805        /*
1806         * Only inject an error if we haven't already got an error or data over
1807         * interrupt.
1808         */
1809        if (!host->data_status) {
1810                host->data_status = SDMMC_INT_DCRC;
1811                set_bit(EVENT_DATA_ERROR, &host->pending_events);
1812                tasklet_schedule(&host->tasklet);
1813        }
1814
1815        spin_unlock_irqrestore(&host->irq_lock, flags);
1816
1817        return HRTIMER_NORESTART;
1818}
1819
1820static void dw_mci_start_fault_timer(struct dw_mci *host)
1821{
1822        struct mmc_data *data = host->data;
1823
1824        if (!data || data->blocks <= 1)
1825                return;
1826
1827        if (!should_fail(&host->fail_data_crc, 1))
1828                return;
1829
1830        /*
1831         * Try to inject the error at random points during the data transfer.
1832         */
1833        hrtimer_start(&host->fault_timer,
1834                      ms_to_ktime(prandom_u32() % 25),
1835                      HRTIMER_MODE_REL);
1836}
1837
1838static void dw_mci_stop_fault_timer(struct dw_mci *host)
1839{
1840        hrtimer_cancel(&host->fault_timer);
1841}
1842
1843static void dw_mci_init_fault(struct dw_mci *host)
1844{
1845        host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
1846
1847        hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1848        host->fault_timer.function = dw_mci_fault_timer;
1849}
1850#else
1851static void dw_mci_init_fault(struct dw_mci *host)
1852{
1853}
1854
1855static void dw_mci_start_fault_timer(struct dw_mci *host)
1856{
1857}
1858
1859static void dw_mci_stop_fault_timer(struct dw_mci *host)
1860{
1861}
1862#endif
1863
1864static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1865        __releases(&host->lock)
1866        __acquires(&host->lock)
1867{
1868        struct dw_mci_slot *slot;
1869        struct mmc_host *prev_mmc = host->slot->mmc;
1870
1871        WARN_ON(host->cmd || host->data);
1872
1873        host->slot->mrq = NULL;
1874        host->mrq = NULL;
1875        if (!list_empty(&host->queue)) {
1876                slot = list_entry(host->queue.next,
1877                                  struct dw_mci_slot, queue_node);
1878                list_del(&slot->queue_node);
1879                dev_vdbg(host->dev, "list not empty: %s is next\n",
1880                         mmc_hostname(slot->mmc));
1881                host->state = STATE_SENDING_CMD;
1882                dw_mci_start_request(host, slot);
1883        } else {
1884                dev_vdbg(host->dev, "list empty\n");
1885
1886                if (host->state == STATE_SENDING_CMD11)
1887                        host->state = STATE_WAITING_CMD11_DONE;
1888                else
1889                        host->state = STATE_IDLE;
1890        }
1891
1892        spin_unlock(&host->lock);
1893        mmc_request_done(prev_mmc, mrq);
1894        spin_lock(&host->lock);
1895}
1896
1897static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1898{
1899        u32 status = host->cmd_status;
1900
1901        host->cmd_status = 0;
1902
1903        /* Read the response from the card (up to 16 bytes) */
1904        if (cmd->flags & MMC_RSP_PRESENT) {
1905                if (cmd->flags & MMC_RSP_136) {
1906                        cmd->resp[3] = mci_readl(host, RESP0);
1907                        cmd->resp[2] = mci_readl(host, RESP1);
1908                        cmd->resp[1] = mci_readl(host, RESP2);
1909                        cmd->resp[0] = mci_readl(host, RESP3);
1910                } else {
1911                        cmd->resp[0] = mci_readl(host, RESP0);
1912                        cmd->resp[1] = 0;
1913                        cmd->resp[2] = 0;
1914                        cmd->resp[3] = 0;
1915                }
1916        }
1917
1918        if (status & SDMMC_INT_RTO)
1919                cmd->error = -ETIMEDOUT;
1920        else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1921                cmd->error = -EILSEQ;
1922        else if (status & SDMMC_INT_RESP_ERR)
1923                cmd->error = -EIO;
1924        else
1925                cmd->error = 0;
1926
1927        return cmd->error;
1928}
1929
1930static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1931{
1932        u32 status = host->data_status;
1933
1934        if (status & DW_MCI_DATA_ERROR_FLAGS) {
1935                if (status & SDMMC_INT_DRTO) {
1936                        data->error = -ETIMEDOUT;
1937                } else if (status & SDMMC_INT_DCRC) {
1938                        data->error = -EILSEQ;
1939                } else if (status & SDMMC_INT_EBE) {
1940                        if (host->dir_status ==
1941                                DW_MCI_SEND_STATUS) {
1942                                /*
1943                                 * No data CRC status was returned.
1944                                 * The number of bytes transferred
1945                                 * will be exaggerated in PIO mode.
1946                                 */
1947                                data->bytes_xfered = 0;
1948                                data->error = -ETIMEDOUT;
1949                        } else if (host->dir_status ==
1950                                        DW_MCI_RECV_STATUS) {
1951                                data->error = -EILSEQ;
1952                        }
1953                } else {
1954                        /* SDMMC_INT_SBE is included */
1955                        data->error = -EILSEQ;
1956                }
1957
1958                dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1959
1960                /*
1961                 * After an error, there may be data lingering
1962                 * in the FIFO
1963                 */
1964                dw_mci_reset(host);
1965        } else {
1966                data->bytes_xfered = data->blocks * data->blksz;
1967                data->error = 0;
1968        }
1969
1970        return data->error;
1971}
1972
1973static void dw_mci_set_drto(struct dw_mci *host)
1974{
1975        unsigned int drto_clks;
1976        unsigned int drto_div;
1977        unsigned int drto_ms;
1978        unsigned long irqflags;
1979
1980        drto_clks = mci_readl(host, TMOUT) >> 8;
1981        drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1982        if (drto_div == 0)
1983                drto_div = 1;
1984
1985        drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1986                                   host->bus_hz);
1987
1988        /* add a bit spare time */
1989        drto_ms += 10;
1990
1991        spin_lock_irqsave(&host->irq_lock, irqflags);
1992        if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1993                mod_timer(&host->dto_timer,
1994                          jiffies + msecs_to_jiffies(drto_ms));
1995        spin_unlock_irqrestore(&host->irq_lock, irqflags);
1996}
1997
1998static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1999{
2000        if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2001                return false;
2002
2003        /*
2004         * Really be certain that the timer has stopped.  This is a bit of
2005         * paranoia and could only really happen if we had really bad
2006         * interrupt latency and the interrupt routine and timeout were
2007         * running concurrently so that the del_timer() in the interrupt
2008         * handler couldn't run.
2009         */
2010        WARN_ON(del_timer_sync(&host->cto_timer));
2011        clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2012
2013        return true;
2014}
2015
2016static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2017{
2018        if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2019                return false;
2020
2021        /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2022        WARN_ON(del_timer_sync(&host->dto_timer));
2023        clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2024
2025        return true;
2026}
2027
2028static void dw_mci_tasklet_func(struct tasklet_struct *t)
2029{
2030        struct dw_mci *host = from_tasklet(host, t, tasklet);
2031        struct mmc_data *data;
2032        struct mmc_command *cmd;
2033        struct mmc_request *mrq;
2034        enum dw_mci_state state;
2035        enum dw_mci_state prev_state;
2036        unsigned int err;
2037
2038        spin_lock(&host->lock);
2039
2040        state = host->state;
2041        data = host->data;
2042        mrq = host->mrq;
2043
2044        do {
2045                prev_state = state;
2046
2047                switch (state) {
2048                case STATE_IDLE:
2049                case STATE_WAITING_CMD11_DONE:
2050                        break;
2051
2052                case STATE_SENDING_CMD11:
2053                case STATE_SENDING_CMD:
2054                        if (!dw_mci_clear_pending_cmd_complete(host))
2055                                break;
2056
2057                        cmd = host->cmd;
2058                        host->cmd = NULL;
2059                        set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2060                        err = dw_mci_command_complete(host, cmd);
2061                        if (cmd == mrq->sbc && !err) {
2062                                __dw_mci_start_request(host, host->slot,
2063                                                       mrq->cmd);
2064                                goto unlock;
2065                        }
2066
2067                        if (cmd->data && err) {
2068                                /*
2069                                 * During UHS tuning sequence, sending the stop
2070                                 * command after the response CRC error would
2071                                 * throw the system into a confused state
2072                                 * causing all future tuning phases to report
2073                                 * failure.
2074                                 *
2075                                 * In such case controller will move into a data
2076                                 * transfer state after a response error or
2077                                 * response CRC error. Let's let that finish
2078                                 * before trying to send a stop, so we'll go to
2079                                 * STATE_SENDING_DATA.
2080                                 *
2081                                 * Although letting the data transfer take place
2082                                 * will waste a bit of time (we already know
2083                                 * the command was bad), it can't cause any
2084                                 * errors since it's possible it would have
2085                                 * taken place anyway if this tasklet got
2086                                 * delayed. Allowing the transfer to take place
2087                                 * avoids races and keeps things simple.
2088                                 */
2089                                if (err != -ETIMEDOUT) {
2090                                        state = STATE_SENDING_DATA;
2091                                        continue;
2092                                }
2093
2094                                send_stop_abort(host, data);
2095                                dw_mci_stop_dma(host);
2096                                state = STATE_SENDING_STOP;
2097                                break;
2098                        }
2099
2100                        if (!cmd->data || err) {
2101                                dw_mci_request_end(host, mrq);
2102                                goto unlock;
2103                        }
2104
2105                        prev_state = state = STATE_SENDING_DATA;
2106                        fallthrough;
2107
2108                case STATE_SENDING_DATA:
2109                        /*
2110                         * We could get a data error and never a transfer
2111                         * complete so we'd better check for it here.
2112                         *
2113                         * Note that we don't really care if we also got a
2114                         * transfer complete; stopping the DMA and sending an
2115                         * abort won't hurt.
2116                         */
2117                        if (test_and_clear_bit(EVENT_DATA_ERROR,
2118                                               &host->pending_events)) {
2119                                if (!(host->data_status & (SDMMC_INT_DRTO |
2120                                                           SDMMC_INT_EBE)))
2121                                        send_stop_abort(host, data);
2122                                dw_mci_stop_dma(host);
2123                                state = STATE_DATA_ERROR;
2124                                break;
2125                        }
2126
2127                        if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2128                                                &host->pending_events)) {
2129                                /*
2130                                 * If all data-related interrupts don't come
2131                                 * within the given time in reading data state.
2132                                 */
2133                                if (host->dir_status == DW_MCI_RECV_STATUS)
2134                                        dw_mci_set_drto(host);
2135                                break;
2136                        }
2137
2138                        set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2139
2140                        /*
2141                         * Handle an EVENT_DATA_ERROR that might have shown up
2142                         * before the transfer completed.  This might not have
2143                         * been caught by the check above because the interrupt
2144                         * could have gone off between the previous check and
2145                         * the check for transfer complete.
2146                         *
2147                         * Technically this ought not be needed assuming we
2148                         * get a DATA_COMPLETE eventually (we'll notice the
2149                         * error and end the request), but it shouldn't hurt.
2150                         *
2151                         * This has the advantage of sending the stop command.
2152                         */
2153                        if (test_and_clear_bit(EVENT_DATA_ERROR,
2154                                               &host->pending_events)) {
2155                                if (!(host->data_status & (SDMMC_INT_DRTO |
2156                                                           SDMMC_INT_EBE)))
2157                                        send_stop_abort(host, data);
2158                                dw_mci_stop_dma(host);
2159                                state = STATE_DATA_ERROR;
2160                                break;
2161                        }
2162                        prev_state = state = STATE_DATA_BUSY;
2163
2164                        fallthrough;
2165
2166                case STATE_DATA_BUSY:
2167                        if (!dw_mci_clear_pending_data_complete(host)) {
2168                                /*
2169                                 * If data error interrupt comes but data over
2170                                 * interrupt doesn't come within the given time.
2171                                 * in reading data state.
2172                                 */
2173                                if (host->dir_status == DW_MCI_RECV_STATUS)
2174                                        dw_mci_set_drto(host);
2175                                break;
2176                        }
2177
2178                        dw_mci_stop_fault_timer(host);
2179                        host->data = NULL;
2180                        set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2181                        err = dw_mci_data_complete(host, data);
2182
2183                        if (!err) {
2184                                if (!data->stop || mrq->sbc) {
2185                                        if (mrq->sbc && data->stop)
2186                                                data->stop->error = 0;
2187                                        dw_mci_request_end(host, mrq);
2188                                        goto unlock;
2189                                }
2190
2191                                /* stop command for open-ended transfer*/
2192                                if (data->stop)
2193                                        send_stop_abort(host, data);
2194                        } else {
2195                                /*
2196                                 * If we don't have a command complete now we'll
2197                                 * never get one since we just reset everything;
2198                                 * better end the request.
2199                                 *
2200                                 * If we do have a command complete we'll fall
2201                                 * through to the SENDING_STOP command and
2202                                 * everything will be peachy keen.
2203                                 */
2204                                if (!test_bit(EVENT_CMD_COMPLETE,
2205                                              &host->pending_events)) {
2206                                        host->cmd = NULL;
2207                                        dw_mci_request_end(host, mrq);
2208                                        goto unlock;
2209                                }
2210                        }
2211
2212                        /*
2213                         * If err has non-zero,
2214                         * stop-abort command has been already issued.
2215                         */
2216                        prev_state = state = STATE_SENDING_STOP;
2217
2218                        fallthrough;
2219
2220                case STATE_SENDING_STOP:
2221                        if (!dw_mci_clear_pending_cmd_complete(host))
2222                                break;
2223
2224                        /* CMD error in data command */
2225                        if (mrq->cmd->error && mrq->data)
2226                                dw_mci_reset(host);
2227
2228                        dw_mci_stop_fault_timer(host);
2229                        host->cmd = NULL;
2230                        host->data = NULL;
2231
2232                        if (!mrq->sbc && mrq->stop)
2233                                dw_mci_command_complete(host, mrq->stop);
2234                        else
2235                                host->cmd_status = 0;
2236
2237                        dw_mci_request_end(host, mrq);
2238                        goto unlock;
2239
2240                case STATE_DATA_ERROR:
2241                        if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2242                                                &host->pending_events))
2243                                break;
2244
2245                        state = STATE_DATA_BUSY;
2246                        break;
2247                }
2248        } while (state != prev_state);
2249
2250        host->state = state;
2251unlock:
2252        spin_unlock(&host->lock);
2253
2254}
2255
2256/* push final bytes to part_buf, only use during push */
2257static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2258{
2259        memcpy((void *)&host->part_buf, buf, cnt);
2260        host->part_buf_count = cnt;
2261}
2262
2263/* append bytes to part_buf, only use during push */
2264static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2265{
2266        cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2267        memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2268        host->part_buf_count += cnt;
2269        return cnt;
2270}
2271
2272/* pull first bytes from part_buf, only use during pull */
2273static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2274{
2275        cnt = min_t(int, cnt, host->part_buf_count);
2276        if (cnt) {
2277                memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2278                       cnt);
2279                host->part_buf_count -= cnt;
2280                host->part_buf_start += cnt;
2281        }
2282        return cnt;
2283}
2284
2285/* pull final bytes from the part_buf, assuming it's just been filled */
2286static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2287{
2288        memcpy(buf, &host->part_buf, cnt);
2289        host->part_buf_start = cnt;
2290        host->part_buf_count = (1 << host->data_shift) - cnt;
2291}
2292
2293static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2294{
2295        struct mmc_data *data = host->data;
2296        int init_cnt = cnt;
2297
2298        /* try and push anything in the part_buf */
2299        if (unlikely(host->part_buf_count)) {
2300                int len = dw_mci_push_part_bytes(host, buf, cnt);
2301
2302                buf += len;
2303                cnt -= len;
2304                if (host->part_buf_count == 2) {
2305                        mci_fifo_writew(host->fifo_reg, host->part_buf16);
2306                        host->part_buf_count = 0;
2307                }
2308        }
2309#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2310        if (unlikely((unsigned long)buf & 0x1)) {
2311                while (cnt >= 2) {
2312                        u16 aligned_buf[64];
2313                        int len = min(cnt & -2, (int)sizeof(aligned_buf));
2314                        int items = len >> 1;
2315                        int i;
2316                        /* memcpy from input buffer into aligned buffer */
2317                        memcpy(aligned_buf, buf, len);
2318                        buf += len;
2319                        cnt -= len;
2320                        /* push data from aligned buffer into fifo */
2321                        for (i = 0; i < items; ++i)
2322                                mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2323                }
2324        } else
2325#endif
2326        {
2327                u16 *pdata = buf;
2328
2329                for (; cnt >= 2; cnt -= 2)
2330                        mci_fifo_writew(host->fifo_reg, *pdata++);
2331                buf = pdata;
2332        }
2333        /* put anything remaining in the part_buf */
2334        if (cnt) {
2335                dw_mci_set_part_bytes(host, buf, cnt);
2336                 /* Push data if we have reached the expected data length */
2337                if ((data->bytes_xfered + init_cnt) ==
2338                    (data->blksz * data->blocks))
2339                        mci_fifo_writew(host->fifo_reg, host->part_buf16);
2340        }
2341}
2342
2343static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2344{
2345#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2346        if (unlikely((unsigned long)buf & 0x1)) {
2347                while (cnt >= 2) {
2348                        /* pull data from fifo into aligned buffer */
2349                        u16 aligned_buf[64];
2350                        int len = min(cnt & -2, (int)sizeof(aligned_buf));
2351                        int items = len >> 1;
2352                        int i;
2353
2354                        for (i = 0; i < items; ++i)
2355                                aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2356                        /* memcpy from aligned buffer into output buffer */
2357                        memcpy(buf, aligned_buf, len);
2358                        buf += len;
2359                        cnt -= len;
2360                }
2361        } else
2362#endif
2363        {
2364                u16 *pdata = buf;
2365
2366                for (; cnt >= 2; cnt -= 2)
2367                        *pdata++ = mci_fifo_readw(host->fifo_reg);
2368                buf = pdata;
2369        }
2370        if (cnt) {
2371                host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2372                dw_mci_pull_final_bytes(host, buf, cnt);
2373        }
2374}
2375
2376static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2377{
2378        struct mmc_data *data = host->data;
2379        int init_cnt = cnt;
2380
2381        /* try and push anything in the part_buf */
2382        if (unlikely(host->part_buf_count)) {
2383                int len = dw_mci_push_part_bytes(host, buf, cnt);
2384
2385                buf += len;
2386                cnt -= len;
2387                if (host->part_buf_count == 4) {
2388                        mci_fifo_writel(host->fifo_reg, host->part_buf32);
2389                        host->part_buf_count = 0;
2390                }
2391        }
2392#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2393        if (unlikely((unsigned long)buf & 0x3)) {
2394                while (cnt >= 4) {
2395                        u32 aligned_buf[32];
2396                        int len = min(cnt & -4, (int)sizeof(aligned_buf));
2397                        int items = len >> 2;
2398                        int i;
2399                        /* memcpy from input buffer into aligned buffer */
2400                        memcpy(aligned_buf, buf, len);
2401                        buf += len;
2402                        cnt -= len;
2403                        /* push data from aligned buffer into fifo */
2404                        for (i = 0; i < items; ++i)
2405                                mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2406                }
2407        } else
2408#endif
2409        {
2410                u32 *pdata = buf;
2411
2412                for (; cnt >= 4; cnt -= 4)
2413                        mci_fifo_writel(host->fifo_reg, *pdata++);
2414                buf = pdata;
2415        }
2416        /* put anything remaining in the part_buf */
2417        if (cnt) {
2418                dw_mci_set_part_bytes(host, buf, cnt);
2419                 /* Push data if we have reached the expected data length */
2420                if ((data->bytes_xfered + init_cnt) ==
2421                    (data->blksz * data->blocks))
2422                        mci_fifo_writel(host->fifo_reg, host->part_buf32);
2423        }
2424}
2425
2426static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2427{
2428#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2429        if (unlikely((unsigned long)buf & 0x3)) {
2430                while (cnt >= 4) {
2431                        /* pull data from fifo into aligned buffer */
2432                        u32 aligned_buf[32];
2433                        int len = min(cnt & -4, (int)sizeof(aligned_buf));
2434                        int items = len >> 2;
2435                        int i;
2436
2437                        for (i = 0; i < items; ++i)
2438                                aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2439                        /* memcpy from aligned buffer into output buffer */
2440                        memcpy(buf, aligned_buf, len);
2441                        buf += len;
2442                        cnt -= len;
2443                }
2444        } else
2445#endif
2446        {
2447                u32 *pdata = buf;
2448
2449                for (; cnt >= 4; cnt -= 4)
2450                        *pdata++ = mci_fifo_readl(host->fifo_reg);
2451                buf = pdata;
2452        }
2453        if (cnt) {
2454                host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2455                dw_mci_pull_final_bytes(host, buf, cnt);
2456        }
2457}
2458
2459static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2460{
2461        struct mmc_data *data = host->data;
2462        int init_cnt = cnt;
2463
2464        /* try and push anything in the part_buf */
2465        if (unlikely(host->part_buf_count)) {
2466                int len = dw_mci_push_part_bytes(host, buf, cnt);
2467
2468                buf += len;
2469                cnt -= len;
2470
2471                if (host->part_buf_count == 8) {
2472                        mci_fifo_writeq(host->fifo_reg, host->part_buf);
2473                        host->part_buf_count = 0;
2474                }
2475        }
2476#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2477        if (unlikely((unsigned long)buf & 0x7)) {
2478                while (cnt >= 8) {
2479                        u64 aligned_buf[16];
2480                        int len = min(cnt & -8, (int)sizeof(aligned_buf));
2481                        int items = len >> 3;
2482                        int i;
2483                        /* memcpy from input buffer into aligned buffer */
2484                        memcpy(aligned_buf, buf, len);
2485                        buf += len;
2486                        cnt -= len;
2487                        /* push data from aligned buffer into fifo */
2488                        for (i = 0; i < items; ++i)
2489                                mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2490                }
2491        } else
2492#endif
2493        {
2494                u64 *pdata = buf;
2495
2496                for (; cnt >= 8; cnt -= 8)
2497                        mci_fifo_writeq(host->fifo_reg, *pdata++);
2498                buf = pdata;
2499        }
2500        /* put anything remaining in the part_buf */
2501        if (cnt) {
2502                dw_mci_set_part_bytes(host, buf, cnt);
2503                /* Push data if we have reached the expected data length */
2504                if ((data->bytes_xfered + init_cnt) ==
2505                    (data->blksz * data->blocks))
2506                        mci_fifo_writeq(host->fifo_reg, host->part_buf);
2507        }
2508}
2509
2510static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2511{
2512#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2513        if (unlikely((unsigned long)buf & 0x7)) {
2514                while (cnt >= 8) {
2515                        /* pull data from fifo into aligned buffer */
2516                        u64 aligned_buf[16];
2517                        int len = min(cnt & -8, (int)sizeof(aligned_buf));
2518                        int items = len >> 3;
2519                        int i;
2520
2521                        for (i = 0; i < items; ++i)
2522                                aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2523
2524                        /* memcpy from aligned buffer into output buffer */
2525                        memcpy(buf, aligned_buf, len);
2526                        buf += len;
2527                        cnt -= len;
2528                }
2529        } else
2530#endif
2531        {
2532                u64 *pdata = buf;
2533
2534                for (; cnt >= 8; cnt -= 8)
2535                        *pdata++ = mci_fifo_readq(host->fifo_reg);
2536                buf = pdata;
2537        }
2538        if (cnt) {
2539                host->part_buf = mci_fifo_readq(host->fifo_reg);
2540                dw_mci_pull_final_bytes(host, buf, cnt);
2541        }
2542}
2543
2544static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2545{
2546        int len;
2547
2548        /* get remaining partial bytes */
2549        len = dw_mci_pull_part_bytes(host, buf, cnt);
2550        if (unlikely(len == cnt))
2551                return;
2552        buf += len;
2553        cnt -= len;
2554
2555        /* get the rest of the data */
2556        host->pull_data(host, buf, cnt);
2557}
2558
2559static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2560{
2561        struct sg_mapping_iter *sg_miter = &host->sg_miter;
2562        void *buf;
2563        unsigned int offset;
2564        struct mmc_data *data = host->data;
2565        int shift = host->data_shift;
2566        u32 status;
2567        unsigned int len;
2568        unsigned int remain, fcnt;
2569
2570        do {
2571                if (!sg_miter_next(sg_miter))
2572                        goto done;
2573
2574                host->sg = sg_miter->piter.sg;
2575                buf = sg_miter->addr;
2576                remain = sg_miter->length;
2577                offset = 0;
2578
2579                do {
2580                        fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2581                                        << shift) + host->part_buf_count;
2582                        len = min(remain, fcnt);
2583                        if (!len)
2584                                break;
2585                        dw_mci_pull_data(host, (void *)(buf + offset), len);
2586                        data->bytes_xfered += len;
2587                        offset += len;
2588                        remain -= len;
2589                } while (remain);
2590
2591                sg_miter->consumed = offset;
2592                status = mci_readl(host, MINTSTS);
2593                mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2594        /* if the RXDR is ready read again */
2595        } while ((status & SDMMC_INT_RXDR) ||
2596                 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2597
2598        if (!remain) {
2599                if (!sg_miter_next(sg_miter))
2600                        goto done;
2601                sg_miter->consumed = 0;
2602        }
2603        sg_miter_stop(sg_miter);
2604        return;
2605
2606done:
2607        sg_miter_stop(sg_miter);
2608        host->sg = NULL;
2609        smp_wmb(); /* drain writebuffer */
2610        set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2611}
2612
2613static void dw_mci_write_data_pio(struct dw_mci *host)
2614{
2615        struct sg_mapping_iter *sg_miter = &host->sg_miter;
2616        void *buf;
2617        unsigned int offset;
2618        struct mmc_data *data = host->data;
2619        int shift = host->data_shift;
2620        u32 status;
2621        unsigned int len;
2622        unsigned int fifo_depth = host->fifo_depth;
2623        unsigned int remain, fcnt;
2624
2625        do {
2626                if (!sg_miter_next(sg_miter))
2627                        goto done;
2628
2629                host->sg = sg_miter->piter.sg;
2630                buf = sg_miter->addr;
2631                remain = sg_miter->length;
2632                offset = 0;
2633
2634                do {
2635                        fcnt = ((fifo_depth -
2636                                 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2637                                        << shift) - host->part_buf_count;
2638                        len = min(remain, fcnt);
2639                        if (!len)
2640                                break;
2641                        host->push_data(host, (void *)(buf + offset), len);
2642                        data->bytes_xfered += len;
2643                        offset += len;
2644                        remain -= len;
2645                } while (remain);
2646
2647                sg_miter->consumed = offset;
2648                status = mci_readl(host, MINTSTS);
2649                mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2650        } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2651
2652        if (!remain) {
2653                if (!sg_miter_next(sg_miter))
2654                        goto done;
2655                sg_miter->consumed = 0;
2656        }
2657        sg_miter_stop(sg_miter);
2658        return;
2659
2660done:
2661        sg_miter_stop(sg_miter);
2662        host->sg = NULL;
2663        smp_wmb(); /* drain writebuffer */
2664        set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2665}
2666
2667static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2668{
2669        del_timer(&host->cto_timer);
2670
2671        if (!host->cmd_status)
2672                host->cmd_status = status;
2673
2674        smp_wmb(); /* drain writebuffer */
2675
2676        set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2677        tasklet_schedule(&host->tasklet);
2678
2679        dw_mci_start_fault_timer(host);
2680}
2681
2682static void dw_mci_handle_cd(struct dw_mci *host)
2683{
2684        struct dw_mci_slot *slot = host->slot;
2685
2686        mmc_detect_change(slot->mmc,
2687                msecs_to_jiffies(host->pdata->detect_delay_ms));
2688}
2689
2690static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2691{
2692        struct dw_mci *host = dev_id;
2693        u32 pending;
2694        struct dw_mci_slot *slot = host->slot;
2695
2696        pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2697
2698        if (pending) {
2699                /* Check volt switch first, since it can look like an error */
2700                if ((host->state == STATE_SENDING_CMD11) &&
2701                    (pending & SDMMC_INT_VOLT_SWITCH)) {
2702                        mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2703                        pending &= ~SDMMC_INT_VOLT_SWITCH;
2704
2705                        /*
2706                         * Hold the lock; we know cmd11_timer can't be kicked
2707                         * off after the lock is released, so safe to delete.
2708                         */
2709                        spin_lock(&host->irq_lock);
2710                        dw_mci_cmd_interrupt(host, pending);
2711                        spin_unlock(&host->irq_lock);
2712
2713                        del_timer(&host->cmd11_timer);
2714                }
2715
2716                if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2717                        spin_lock(&host->irq_lock);
2718
2719                        del_timer(&host->cto_timer);
2720                        mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2721                        host->cmd_status = pending;
2722                        smp_wmb(); /* drain writebuffer */
2723                        set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2724
2725                        spin_unlock(&host->irq_lock);
2726                }
2727
2728                if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2729                        spin_lock(&host->irq_lock);
2730
2731                        /* if there is an error report DATA_ERROR */
2732                        mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2733                        host->data_status = pending;
2734                        smp_wmb(); /* drain writebuffer */
2735                        set_bit(EVENT_DATA_ERROR, &host->pending_events);
2736                        tasklet_schedule(&host->tasklet);
2737
2738                        spin_unlock(&host->irq_lock);
2739                }
2740
2741                if (pending & SDMMC_INT_DATA_OVER) {
2742                        spin_lock(&host->irq_lock);
2743
2744                        del_timer(&host->dto_timer);
2745
2746                        mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2747                        if (!host->data_status)
2748                                host->data_status = pending;
2749                        smp_wmb(); /* drain writebuffer */
2750                        if (host->dir_status == DW_MCI_RECV_STATUS) {
2751                                if (host->sg != NULL)
2752                                        dw_mci_read_data_pio(host, true);
2753                        }
2754                        set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2755                        tasklet_schedule(&host->tasklet);
2756
2757                        spin_unlock(&host->irq_lock);
2758                }
2759
2760                if (pending & SDMMC_INT_RXDR) {
2761                        mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2762                        if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2763                                dw_mci_read_data_pio(host, false);
2764                }
2765
2766                if (pending & SDMMC_INT_TXDR) {
2767                        mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2768                        if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2769                                dw_mci_write_data_pio(host);
2770                }
2771
2772                if (pending & SDMMC_INT_CMD_DONE) {
2773                        spin_lock(&host->irq_lock);
2774
2775                        mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2776                        dw_mci_cmd_interrupt(host, pending);
2777
2778                        spin_unlock(&host->irq_lock);
2779                }
2780
2781                if (pending & SDMMC_INT_CD) {
2782                        mci_writel(host, RINTSTS, SDMMC_INT_CD);
2783                        dw_mci_handle_cd(host);
2784                }
2785
2786                if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2787                        mci_writel(host, RINTSTS,
2788                                   SDMMC_INT_SDIO(slot->sdio_id));
2789                        __dw_mci_enable_sdio_irq(slot, 0);
2790                        sdio_signal_irq(slot->mmc);
2791                }
2792
2793        }
2794
2795        if (host->use_dma != TRANS_MODE_IDMAC)
2796                return IRQ_HANDLED;
2797
2798        /* Handle IDMA interrupts */
2799        if (host->dma_64bit_address == 1) {
2800                pending = mci_readl(host, IDSTS64);
2801                if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2802                        mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2803                                                        SDMMC_IDMAC_INT_RI);
2804                        mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2805                        if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2806                                host->dma_ops->complete((void *)host);
2807                }
2808        } else {
2809                pending = mci_readl(host, IDSTS);
2810                if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2811                        mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2812                                                        SDMMC_IDMAC_INT_RI);
2813                        mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2814                        if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2815                                host->dma_ops->complete((void *)host);
2816                }
2817        }
2818
2819        return IRQ_HANDLED;
2820}
2821
2822static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2823{
2824        struct dw_mci *host = slot->host;
2825        const struct dw_mci_drv_data *drv_data = host->drv_data;
2826        struct mmc_host *mmc = slot->mmc;
2827        int ctrl_id;
2828
2829        if (host->pdata->caps)
2830                mmc->caps = host->pdata->caps;
2831
2832        if (host->pdata->pm_caps)
2833                mmc->pm_caps = host->pdata->pm_caps;
2834
2835        if (host->dev->of_node) {
2836                ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2837                if (ctrl_id < 0)
2838                        ctrl_id = 0;
2839        } else {
2840                ctrl_id = to_platform_device(host->dev)->id;
2841        }
2842
2843        if (drv_data && drv_data->caps) {
2844                if (ctrl_id >= drv_data->num_caps) {
2845                        dev_err(host->dev, "invalid controller id %d\n",
2846                                ctrl_id);
2847                        return -EINVAL;
2848                }
2849                mmc->caps |= drv_data->caps[ctrl_id];
2850        }
2851
2852        if (host->pdata->caps2)
2853                mmc->caps2 = host->pdata->caps2;
2854
2855        mmc->f_min = DW_MCI_FREQ_MIN;
2856        if (!mmc->f_max)
2857                mmc->f_max = DW_MCI_FREQ_MAX;
2858
2859        /* Process SDIO IRQs through the sdio_irq_work. */
2860        if (mmc->caps & MMC_CAP_SDIO_IRQ)
2861                mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2862
2863        return 0;
2864}
2865
2866static int dw_mci_init_slot(struct dw_mci *host)
2867{
2868        struct mmc_host *mmc;
2869        struct dw_mci_slot *slot;
2870        int ret;
2871
2872        mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2873        if (!mmc)
2874                return -ENOMEM;
2875
2876        slot = mmc_priv(mmc);
2877        slot->id = 0;
2878        slot->sdio_id = host->sdio_id0 + slot->id;
2879        slot->mmc = mmc;
2880        slot->host = host;
2881        host->slot = slot;
2882
2883        mmc->ops = &dw_mci_ops;
2884
2885        /*if there are external regulators, get them*/
2886        ret = mmc_regulator_get_supply(mmc);
2887        if (ret)
2888                goto err_host_allocated;
2889
2890        if (!mmc->ocr_avail)
2891                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2892
2893        ret = mmc_of_parse(mmc);
2894        if (ret)
2895                goto err_host_allocated;
2896
2897        ret = dw_mci_init_slot_caps(slot);
2898        if (ret)
2899                goto err_host_allocated;
2900
2901        /* Useful defaults if platform data is unset. */
2902        if (host->use_dma == TRANS_MODE_IDMAC) {
2903                mmc->max_segs = host->ring_size;
2904                mmc->max_blk_size = 65535;
2905                mmc->max_seg_size = 0x1000;
2906                mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2907                mmc->max_blk_count = mmc->max_req_size / 512;
2908        } else if (host->use_dma == TRANS_MODE_EDMAC) {
2909                mmc->max_segs = 64;
2910                mmc->max_blk_size = 65535;
2911                mmc->max_blk_count = 65535;
2912                mmc->max_req_size =
2913                                mmc->max_blk_size * mmc->max_blk_count;
2914                mmc->max_seg_size = mmc->max_req_size;
2915        } else {
2916                /* TRANS_MODE_PIO */
2917                mmc->max_segs = 64;
2918                mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2919                mmc->max_blk_count = 512;
2920                mmc->max_req_size = mmc->max_blk_size *
2921                                    mmc->max_blk_count;
2922                mmc->max_seg_size = mmc->max_req_size;
2923        }
2924
2925        dw_mci_get_cd(mmc);
2926
2927        ret = mmc_add_host(mmc);
2928        if (ret)
2929                goto err_host_allocated;
2930
2931#if defined(CONFIG_DEBUG_FS)
2932        dw_mci_init_debugfs(slot);
2933#endif
2934
2935        return 0;
2936
2937err_host_allocated:
2938        mmc_free_host(mmc);
2939        return ret;
2940}
2941
2942static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2943{
2944        /* Debugfs stuff is cleaned up by mmc core */
2945        mmc_remove_host(slot->mmc);
2946        slot->host->slot = NULL;
2947        mmc_free_host(slot->mmc);
2948}
2949
2950static void dw_mci_init_dma(struct dw_mci *host)
2951{
2952        int addr_config;
2953        struct device *dev = host->dev;
2954
2955        /*
2956        * Check tansfer mode from HCON[17:16]
2957        * Clear the ambiguous description of dw_mmc databook:
2958        * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2959        * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2960        * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2961        * 2b'11: Non DW DMA Interface -> pio only
2962        * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2963        * simpler request/acknowledge handshake mechanism and both of them
2964        * are regarded as external dma master for dw_mmc.
2965        */
2966        host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2967        if (host->use_dma == DMA_INTERFACE_IDMA) {
2968                host->use_dma = TRANS_MODE_IDMAC;
2969        } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2970                   host->use_dma == DMA_INTERFACE_GDMA) {
2971                host->use_dma = TRANS_MODE_EDMAC;
2972        } else {
2973                goto no_dma;
2974        }
2975
2976        /* Determine which DMA interface to use */
2977        if (host->use_dma == TRANS_MODE_IDMAC) {
2978                /*
2979                * Check ADDR_CONFIG bit in HCON to find
2980                * IDMAC address bus width
2981                */
2982                addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2983
2984                if (addr_config == 1) {
2985                        /* host supports IDMAC in 64-bit address mode */
2986                        host->dma_64bit_address = 1;
2987                        dev_info(host->dev,
2988                                 "IDMAC supports 64-bit address mode.\n");
2989                        if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2990                                dma_set_coherent_mask(host->dev,
2991                                                      DMA_BIT_MASK(64));
2992                } else {
2993                        /* host supports IDMAC in 32-bit address mode */
2994                        host->dma_64bit_address = 0;
2995                        dev_info(host->dev,
2996                                 "IDMAC supports 32-bit address mode.\n");
2997                }
2998
2999                /* Alloc memory for sg translation */
3000                host->sg_cpu = dmam_alloc_coherent(host->dev,
3001                                                   DESC_RING_BUF_SZ,
3002                                                   &host->sg_dma, GFP_KERNEL);
3003                if (!host->sg_cpu) {
3004                        dev_err(host->dev,
3005                                "%s: could not alloc DMA memory\n",
3006                                __func__);
3007                        goto no_dma;
3008                }
3009
3010                host->dma_ops = &dw_mci_idmac_ops;
3011                dev_info(host->dev, "Using internal DMA controller.\n");
3012        } else {
3013                /* TRANS_MODE_EDMAC: check dma bindings again */
3014                if ((device_property_read_string_array(dev, "dma-names",
3015                                                       NULL, 0) < 0) ||
3016                    !device_property_present(dev, "dmas")) {
3017                        goto no_dma;
3018                }
3019                host->dma_ops = &dw_mci_edmac_ops;
3020                dev_info(host->dev, "Using external DMA controller.\n");
3021        }
3022
3023        if (host->dma_ops->init && host->dma_ops->start &&
3024            host->dma_ops->stop && host->dma_ops->cleanup) {
3025                if (host->dma_ops->init(host)) {
3026                        dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3027                                __func__);
3028                        goto no_dma;
3029                }
3030        } else {
3031                dev_err(host->dev, "DMA initialization not found.\n");
3032                goto no_dma;
3033        }
3034
3035        return;
3036
3037no_dma:
3038        dev_info(host->dev, "Using PIO mode.\n");
3039        host->use_dma = TRANS_MODE_PIO;
3040}
3041
3042static void dw_mci_cmd11_timer(struct timer_list *t)
3043{
3044        struct dw_mci *host = from_timer(host, t, cmd11_timer);
3045
3046        if (host->state != STATE_SENDING_CMD11) {
3047                dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3048                return;
3049        }
3050
3051        host->cmd_status = SDMMC_INT_RTO;
3052        set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3053        tasklet_schedule(&host->tasklet);
3054}
3055
3056static void dw_mci_cto_timer(struct timer_list *t)
3057{
3058        struct dw_mci *host = from_timer(host, t, cto_timer);
3059        unsigned long irqflags;
3060        u32 pending;
3061
3062        spin_lock_irqsave(&host->irq_lock, irqflags);
3063
3064        /*
3065         * If somehow we have very bad interrupt latency it's remotely possible
3066         * that the timer could fire while the interrupt is still pending or
3067         * while the interrupt is midway through running.  Let's be paranoid
3068         * and detect those two cases.  Note that this is paranoia is somewhat
3069         * justified because in this function we don't actually cancel the
3070         * pending command in the controller--we just assume it will never come.
3071         */
3072        pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3073        if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3074                /* The interrupt should fire; no need to act but we can warn */
3075                dev_warn(host->dev, "Unexpected interrupt latency\n");
3076                goto exit;
3077        }
3078        if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3079                /* Presumably interrupt handler couldn't delete the timer */
3080                dev_warn(host->dev, "CTO timeout when already completed\n");
3081                goto exit;
3082        }
3083
3084        /*
3085         * Continued paranoia to make sure we're in the state we expect.
3086         * This paranoia isn't really justified but it seems good to be safe.
3087         */
3088        switch (host->state) {
3089        case STATE_SENDING_CMD11:
3090        case STATE_SENDING_CMD:
3091        case STATE_SENDING_STOP:
3092                /*
3093                 * If CMD_DONE interrupt does NOT come in sending command
3094                 * state, we should notify the driver to terminate current
3095                 * transfer and report a command timeout to the core.
3096                 */
3097                host->cmd_status = SDMMC_INT_RTO;
3098                set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3099                tasklet_schedule(&host->tasklet);
3100                break;
3101        default:
3102                dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3103                         host->state);
3104                break;
3105        }
3106
3107exit:
3108        spin_unlock_irqrestore(&host->irq_lock, irqflags);
3109}
3110
3111static void dw_mci_dto_timer(struct timer_list *t)
3112{
3113        struct dw_mci *host = from_timer(host, t, dto_timer);
3114        unsigned long irqflags;
3115        u32 pending;
3116
3117        spin_lock_irqsave(&host->irq_lock, irqflags);
3118
3119        /*
3120         * The DTO timer is much longer than the CTO timer, so it's even less
3121         * likely that we'll these cases, but it pays to be paranoid.
3122         */
3123        pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3124        if (pending & SDMMC_INT_DATA_OVER) {
3125                /* The interrupt should fire; no need to act but we can warn */
3126                dev_warn(host->dev, "Unexpected data interrupt latency\n");
3127                goto exit;
3128        }
3129        if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3130                /* Presumably interrupt handler couldn't delete the timer */
3131                dev_warn(host->dev, "DTO timeout when already completed\n");
3132                goto exit;
3133        }
3134
3135        /*
3136         * Continued paranoia to make sure we're in the state we expect.
3137         * This paranoia isn't really justified but it seems good to be safe.
3138         */
3139        switch (host->state) {
3140        case STATE_SENDING_DATA:
3141        case STATE_DATA_BUSY:
3142                /*
3143                 * If DTO interrupt does NOT come in sending data state,
3144                 * we should notify the driver to terminate current transfer
3145                 * and report a data timeout to the core.
3146                 */
3147                host->data_status = SDMMC_INT_DRTO;
3148                set_bit(EVENT_DATA_ERROR, &host->pending_events);
3149                set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3150                tasklet_schedule(&host->tasklet);
3151                break;
3152        default:
3153                dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3154                         host->state);
3155                break;
3156        }
3157
3158exit:
3159        spin_unlock_irqrestore(&host->irq_lock, irqflags);
3160}
3161
3162#ifdef CONFIG_OF
3163static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3164{
3165        struct dw_mci_board *pdata;
3166        struct device *dev = host->dev;
3167        const struct dw_mci_drv_data *drv_data = host->drv_data;
3168        int ret;
3169        u32 clock_frequency;
3170
3171        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3172        if (!pdata)
3173                return ERR_PTR(-ENOMEM);
3174
3175        /* find reset controller when exist */
3176        pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3177        if (IS_ERR(pdata->rstc))
3178                return ERR_CAST(pdata->rstc);
3179
3180        if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3181                dev_info(dev,
3182                         "fifo-depth property not found, using value of FIFOTH register as default\n");
3183
3184        device_property_read_u32(dev, "card-detect-delay",
3185                                 &pdata->detect_delay_ms);
3186
3187        device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3188
3189        if (device_property_present(dev, "fifo-watermark-aligned"))
3190                host->wm_aligned = true;
3191
3192        if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3193                pdata->bus_hz = clock_frequency;
3194
3195        if (drv_data && drv_data->parse_dt) {
3196                ret = drv_data->parse_dt(host);
3197                if (ret)
3198                        return ERR_PTR(ret);
3199        }
3200
3201        return pdata;
3202}
3203
3204#else /* CONFIG_OF */
3205static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3206{
3207        return ERR_PTR(-EINVAL);
3208}
3209#endif /* CONFIG_OF */
3210
3211static void dw_mci_enable_cd(struct dw_mci *host)
3212{
3213        unsigned long irqflags;
3214        u32 temp;
3215
3216        /*
3217         * No need for CD if all slots have a non-error GPIO
3218         * as well as broken card detection is found.
3219         */
3220        if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3221                return;
3222
3223        if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3224                spin_lock_irqsave(&host->irq_lock, irqflags);
3225                temp = mci_readl(host, INTMASK);
3226                temp  |= SDMMC_INT_CD;
3227                mci_writel(host, INTMASK, temp);
3228                spin_unlock_irqrestore(&host->irq_lock, irqflags);
3229        }
3230}
3231
3232int dw_mci_probe(struct dw_mci *host)
3233{
3234        const struct dw_mci_drv_data *drv_data = host->drv_data;
3235        int width, i, ret = 0;
3236        u32 fifo_size;
3237
3238        if (!host->pdata) {
3239                host->pdata = dw_mci_parse_dt(host);
3240                if (IS_ERR(host->pdata))
3241                        return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3242                                             "platform data not available\n");
3243        }
3244
3245        host->biu_clk = devm_clk_get(host->dev, "biu");
3246        if (IS_ERR(host->biu_clk)) {
3247                dev_dbg(host->dev, "biu clock not available\n");
3248        } else {
3249                ret = clk_prepare_enable(host->biu_clk);
3250                if (ret) {
3251                        dev_err(host->dev, "failed to enable biu clock\n");
3252                        return ret;
3253                }
3254        }
3255
3256        host->ciu_clk = devm_clk_get(host->dev, "ciu");
3257        if (IS_ERR(host->ciu_clk)) {
3258                dev_dbg(host->dev, "ciu clock not available\n");
3259                host->bus_hz = host->pdata->bus_hz;
3260        } else {
3261                ret = clk_prepare_enable(host->ciu_clk);
3262                if (ret) {
3263                        dev_err(host->dev, "failed to enable ciu clock\n");
3264                        goto err_clk_biu;
3265                }
3266
3267                if (host->pdata->bus_hz) {
3268                        ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3269                        if (ret)
3270                                dev_warn(host->dev,
3271                                         "Unable to set bus rate to %uHz\n",
3272                                         host->pdata->bus_hz);
3273                }
3274                host->bus_hz = clk_get_rate(host->ciu_clk);
3275        }
3276
3277        if (!host->bus_hz) {
3278                dev_err(host->dev,
3279                        "Platform data must supply bus speed\n");
3280                ret = -ENODEV;
3281                goto err_clk_ciu;
3282        }
3283
3284        if (host->pdata->rstc) {
3285                reset_control_assert(host->pdata->rstc);
3286                usleep_range(10, 50);
3287                reset_control_deassert(host->pdata->rstc);
3288        }
3289
3290        if (drv_data && drv_data->init) {
3291                ret = drv_data->init(host);
3292                if (ret) {
3293                        dev_err(host->dev,
3294                                "implementation specific init failed\n");
3295                        goto err_clk_ciu;
3296                }
3297        }
3298
3299        timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3300        timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3301        timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3302
3303        spin_lock_init(&host->lock);
3304        spin_lock_init(&host->irq_lock);
3305        INIT_LIST_HEAD(&host->queue);
3306
3307        dw_mci_init_fault(host);
3308
3309        /*
3310         * Get the host data width - this assumes that HCON has been set with
3311         * the correct values.
3312         */
3313        i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3314        if (!i) {
3315                host->push_data = dw_mci_push_data16;
3316                host->pull_data = dw_mci_pull_data16;
3317                width = 16;
3318                host->data_shift = 1;
3319        } else if (i == 2) {
3320                host->push_data = dw_mci_push_data64;
3321                host->pull_data = dw_mci_pull_data64;
3322                width = 64;
3323                host->data_shift = 3;
3324        } else {
3325                /* Check for a reserved value, and warn if it is */
3326                WARN((i != 1),
3327                     "HCON reports a reserved host data width!\n"
3328                     "Defaulting to 32-bit access.\n");
3329                host->push_data = dw_mci_push_data32;
3330                host->pull_data = dw_mci_pull_data32;
3331                width = 32;
3332                host->data_shift = 2;
3333        }
3334
3335        /* Reset all blocks */
3336        if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3337                ret = -ENODEV;
3338                goto err_clk_ciu;
3339        }
3340
3341        host->dma_ops = host->pdata->dma_ops;
3342        dw_mci_init_dma(host);
3343
3344        /* Clear the interrupts for the host controller */
3345        mci_writel(host, RINTSTS, 0xFFFFFFFF);
3346        mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3347
3348        /* Put in max timeout */
3349        mci_writel(host, TMOUT, 0xFFFFFFFF);
3350
3351        /*
3352         * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
3353         *                          Tx Mark = fifo_size / 2 DMA Size = 8
3354         */
3355        if (!host->pdata->fifo_depth) {
3356                /*
3357                 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3358                 * have been overwritten by the bootloader, just like we're
3359                 * about to do, so if you know the value for your hardware, you
3360                 * should put it in the platform data.
3361                 */
3362                fifo_size = mci_readl(host, FIFOTH);
3363                fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3364        } else {
3365                fifo_size = host->pdata->fifo_depth;
3366        }
3367        host->fifo_depth = fifo_size;
3368        host->fifoth_val =
3369                SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3370        mci_writel(host, FIFOTH, host->fifoth_val);
3371
3372        /* disable clock to CIU */
3373        mci_writel(host, CLKENA, 0);
3374        mci_writel(host, CLKSRC, 0);
3375
3376        /*
3377         * In 2.40a spec, Data offset is changed.
3378         * Need to check the version-id and set data-offset for DATA register.
3379         */
3380        host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3381        dev_info(host->dev, "Version ID is %04x\n", host->verid);
3382
3383        if (host->data_addr_override)
3384                host->fifo_reg = host->regs + host->data_addr_override;
3385        else if (host->verid < DW_MMC_240A)
3386                host->fifo_reg = host->regs + DATA_OFFSET;
3387        else
3388                host->fifo_reg = host->regs + DATA_240A_OFFSET;
3389
3390        tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
3391        ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3392                               host->irq_flags, "dw-mci", host);
3393        if (ret)
3394                goto err_dmaunmap;
3395
3396        /*
3397         * Enable interrupts for command done, data over, data empty,
3398         * receive ready and error such as transmit, receive timeout, crc error
3399         */
3400        mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3401                   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3402                   DW_MCI_ERROR_FLAGS);
3403        /* Enable mci interrupt */
3404        mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3405
3406        dev_info(host->dev,
3407                 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3408                 host->irq, width, fifo_size);
3409
3410        /* We need at least one slot to succeed */
3411        ret = dw_mci_init_slot(host);
3412        if (ret) {
3413                dev_dbg(host->dev, "slot %d init failed\n", i);
3414                goto err_dmaunmap;
3415        }
3416
3417        /* Now that slots are all setup, we can enable card detect */
3418        dw_mci_enable_cd(host);
3419
3420        return 0;
3421
3422err_dmaunmap:
3423        if (host->use_dma && host->dma_ops->exit)
3424                host->dma_ops->exit(host);
3425
3426        reset_control_assert(host->pdata->rstc);
3427
3428err_clk_ciu:
3429        clk_disable_unprepare(host->ciu_clk);
3430
3431err_clk_biu:
3432        clk_disable_unprepare(host->biu_clk);
3433
3434        return ret;
3435}
3436EXPORT_SYMBOL(dw_mci_probe);
3437
3438void dw_mci_remove(struct dw_mci *host)
3439{
3440        dev_dbg(host->dev, "remove slot\n");
3441        if (host->slot)
3442                dw_mci_cleanup_slot(host->slot);
3443
3444        mci_writel(host, RINTSTS, 0xFFFFFFFF);
3445        mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3446
3447        /* disable clock to CIU */
3448        mci_writel(host, CLKENA, 0);
3449        mci_writel(host, CLKSRC, 0);
3450
3451        if (host->use_dma && host->dma_ops->exit)
3452                host->dma_ops->exit(host);
3453
3454        reset_control_assert(host->pdata->rstc);
3455
3456        clk_disable_unprepare(host->ciu_clk);
3457        clk_disable_unprepare(host->biu_clk);
3458}
3459EXPORT_SYMBOL(dw_mci_remove);
3460
3461
3462
3463#ifdef CONFIG_PM
3464int dw_mci_runtime_suspend(struct device *dev)
3465{
3466        struct dw_mci *host = dev_get_drvdata(dev);
3467
3468        if (host->use_dma && host->dma_ops->exit)
3469                host->dma_ops->exit(host);
3470
3471        clk_disable_unprepare(host->ciu_clk);
3472
3473        if (host->slot &&
3474            (mmc_can_gpio_cd(host->slot->mmc) ||
3475             !mmc_card_is_removable(host->slot->mmc)))
3476                clk_disable_unprepare(host->biu_clk);
3477
3478        return 0;
3479}
3480EXPORT_SYMBOL(dw_mci_runtime_suspend);
3481
3482int dw_mci_runtime_resume(struct device *dev)
3483{
3484        int ret = 0;
3485        struct dw_mci *host = dev_get_drvdata(dev);
3486
3487        if (host->slot &&
3488            (mmc_can_gpio_cd(host->slot->mmc) ||
3489             !mmc_card_is_removable(host->slot->mmc))) {
3490                ret = clk_prepare_enable(host->biu_clk);
3491                if (ret)
3492                        return ret;
3493        }
3494
3495        ret = clk_prepare_enable(host->ciu_clk);
3496        if (ret)
3497                goto err;
3498
3499        if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3500                clk_disable_unprepare(host->ciu_clk);
3501                ret = -ENODEV;
3502                goto err;
3503        }
3504
3505        if (host->use_dma && host->dma_ops->init)
3506                host->dma_ops->init(host);
3507
3508        /*
3509         * Restore the initial value at FIFOTH register
3510         * And Invalidate the prev_blksz with zero
3511         */
3512        mci_writel(host, FIFOTH, host->fifoth_val);
3513        host->prev_blksz = 0;
3514
3515        /* Put in max timeout */
3516        mci_writel(host, TMOUT, 0xFFFFFFFF);
3517
3518        mci_writel(host, RINTSTS, 0xFFFFFFFF);
3519        mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3520                   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3521                   DW_MCI_ERROR_FLAGS);
3522        mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3523
3524
3525        if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3526                dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3527
3528        /* Force setup bus to guarantee available clock output */
3529        dw_mci_setup_bus(host->slot, true);
3530
3531        /* Re-enable SDIO interrupts. */
3532        if (sdio_irq_claimed(host->slot->mmc))
3533                __dw_mci_enable_sdio_irq(host->slot, 1);
3534
3535        /* Now that slots are all setup, we can enable card detect */
3536        dw_mci_enable_cd(host);
3537
3538        return 0;
3539
3540err:
3541        if (host->slot &&
3542            (mmc_can_gpio_cd(host->slot->mmc) ||
3543             !mmc_card_is_removable(host->slot->mmc)))
3544                clk_disable_unprepare(host->biu_clk);
3545
3546        return ret;
3547}
3548EXPORT_SYMBOL(dw_mci_runtime_resume);
3549#endif /* CONFIG_PM */
3550
3551static int __init dw_mci_init(void)
3552{
3553        pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3554        return 0;
3555}
3556
3557static void __exit dw_mci_exit(void)
3558{
3559}
3560
3561module_init(dw_mci_init);
3562module_exit(dw_mci_exit);
3563
3564MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3565MODULE_AUTHOR("NXP Semiconductor VietNam");
3566MODULE_AUTHOR("Imagination Technologies Ltd");
3567MODULE_LICENSE("GPL v2");
3568