linux/drivers/mmc/host/atmel-mci.c
<<
>>
Prefs
   1/*
   2 * Atmel MultiMedia Card Interface driver
   3 *
   4 * Copyright (C) 2004-2008 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/blkdev.h>
  11#include <linux/clk.h>
  12#include <linux/debugfs.h>
  13#include <linux/device.h>
  14#include <linux/dmaengine.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/err.h>
  17#include <linux/gpio.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/ioport.h>
  21#include <linux/module.h>
  22#include <linux/platform_device.h>
  23#include <linux/scatterlist.h>
  24#include <linux/seq_file.h>
  25#include <linux/stat.h>
  26
  27#include <linux/mmc/host.h>
  28#include <linux/atmel-mci.h>
  29
  30#include <asm/io.h>
  31#include <asm/unaligned.h>
  32
  33#include <mach/cpu.h>
  34#include <mach/board.h>
  35
  36#include "atmel-mci-regs.h"
  37
  38#define ATMCI_DATA_ERROR_FLAGS  (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
  39#define ATMCI_DMA_THRESHOLD     16
  40
  41enum {
  42        EVENT_CMD_COMPLETE = 0,
  43        EVENT_XFER_COMPLETE,
  44        EVENT_DATA_COMPLETE,
  45        EVENT_DATA_ERROR,
  46};
  47
  48enum atmel_mci_state {
  49        STATE_IDLE = 0,
  50        STATE_SENDING_CMD,
  51        STATE_SENDING_DATA,
  52        STATE_DATA_BUSY,
  53        STATE_SENDING_STOP,
  54        STATE_DATA_ERROR,
  55};
  56
  57struct atmel_mci_dma {
  58#ifdef CONFIG_MMC_ATMELMCI_DMA
  59        struct dma_chan                 *chan;
  60        struct dma_async_tx_descriptor  *data_desc;
  61#endif
  62};
  63
  64/**
  65 * struct atmel_mci - MMC controller state shared between all slots
  66 * @lock: Spinlock protecting the queue and associated data.
  67 * @regs: Pointer to MMIO registers.
  68 * @sg: Scatterlist entry currently being processed by PIO code, if any.
  69 * @pio_offset: Offset into the current scatterlist entry.
  70 * @cur_slot: The slot which is currently using the controller.
  71 * @mrq: The request currently being processed on @cur_slot,
  72 *      or NULL if the controller is idle.
  73 * @cmd: The command currently being sent to the card, or NULL.
  74 * @data: The data currently being transferred, or NULL if no data
  75 *      transfer is in progress.
  76 * @dma: DMA client state.
  77 * @data_chan: DMA channel being used for the current data transfer.
  78 * @cmd_status: Snapshot of SR taken upon completion of the current
  79 *      command. Only valid when EVENT_CMD_COMPLETE is pending.
  80 * @data_status: Snapshot of SR taken upon completion of the current
  81 *      data transfer. Only valid when EVENT_DATA_COMPLETE or
  82 *      EVENT_DATA_ERROR is pending.
  83 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
  84 *      to be sent.
  85 * @tasklet: Tasklet running the request state machine.
  86 * @pending_events: Bitmask of events flagged by the interrupt handler
  87 *      to be processed by the tasklet.
  88 * @completed_events: Bitmask of events which the state machine has
  89 *      processed.
  90 * @state: Tasklet state.
  91 * @queue: List of slots waiting for access to the controller.
  92 * @need_clock_update: Update the clock rate before the next request.
  93 * @need_reset: Reset controller before next request.
  94 * @mode_reg: Value of the MR register.
  95 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
  96 *      rate and timeout calculations.
  97 * @mapbase: Physical address of the MMIO registers.
  98 * @mck: The peripheral bus clock hooked up to the MMC controller.
  99 * @pdev: Platform device associated with the MMC controller.
 100 * @slot: Slots sharing this MMC controller.
 101 *
 102 * Locking
 103 * =======
 104 *
 105 * @lock is a softirq-safe spinlock protecting @queue as well as
 106 * @cur_slot, @mrq and @state. These must always be updated
 107 * at the same time while holding @lock.
 108 *
 109 * @lock also protects mode_reg and need_clock_update since these are
 110 * used to synchronize mode register updates with the queue
 111 * processing.
 112 *
 113 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
 114 * and must always be written at the same time as the slot is added to
 115 * @queue.
 116 *
 117 * @pending_events and @completed_events are accessed using atomic bit
 118 * operations, so they don't need any locking.
 119 *
 120 * None of the fields touched by the interrupt handler need any
 121 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
 122 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
 123 * interrupts must be disabled and @data_status updated with a
 124 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
 125 * CMDRDY interupt must be disabled and @cmd_status updated with a
 126 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
 127 * bytes_xfered field of @data must be written. This is ensured by
 128 * using barriers.
 129 */
 130struct atmel_mci {
 131        spinlock_t              lock;
 132        void __iomem            *regs;
 133
 134        struct scatterlist      *sg;
 135        unsigned int            pio_offset;
 136
 137        struct atmel_mci_slot   *cur_slot;
 138        struct mmc_request      *mrq;
 139        struct mmc_command      *cmd;
 140        struct mmc_data         *data;
 141
 142        struct atmel_mci_dma    dma;
 143        struct dma_chan         *data_chan;
 144
 145        u32                     cmd_status;
 146        u32                     data_status;
 147        u32                     stop_cmdr;
 148
 149        struct tasklet_struct   tasklet;
 150        unsigned long           pending_events;
 151        unsigned long           completed_events;
 152        enum atmel_mci_state    state;
 153        struct list_head        queue;
 154
 155        bool                    need_clock_update;
 156        bool                    need_reset;
 157        u32                     mode_reg;
 158        unsigned long           bus_hz;
 159        unsigned long           mapbase;
 160        struct clk              *mck;
 161        struct platform_device  *pdev;
 162
 163        struct atmel_mci_slot   *slot[ATMEL_MCI_MAX_NR_SLOTS];
 164};
 165
 166/**
 167 * struct atmel_mci_slot - MMC slot state
 168 * @mmc: The mmc_host representing this slot.
 169 * @host: The MMC controller this slot is using.
 170 * @sdc_reg: Value of SDCR to be written before using this slot.
 171 * @mrq: mmc_request currently being processed or waiting to be
 172 *      processed, or NULL when the slot is idle.
 173 * @queue_node: List node for placing this node in the @queue list of
 174 *      &struct atmel_mci.
 175 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
 176 * @flags: Random state bits associated with the slot.
 177 * @detect_pin: GPIO pin used for card detection, or negative if not
 178 *      available.
 179 * @wp_pin: GPIO pin used for card write protect sending, or negative
 180 *      if not available.
 181 * @detect_is_active_high: The state of the detect pin when it is active.
 182 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
 183 */
 184struct atmel_mci_slot {
 185        struct mmc_host         *mmc;
 186        struct atmel_mci        *host;
 187
 188        u32                     sdc_reg;
 189
 190        struct mmc_request      *mrq;
 191        struct list_head        queue_node;
 192
 193        unsigned int            clock;
 194        unsigned long           flags;
 195#define ATMCI_CARD_PRESENT      0
 196#define ATMCI_CARD_NEED_INIT    1
 197#define ATMCI_SHUTDOWN          2
 198
 199        int                     detect_pin;
 200        int                     wp_pin;
 201        bool                    detect_is_active_high;
 202
 203        struct timer_list       detect_timer;
 204};
 205
 206#define atmci_test_and_clear_pending(host, event)               \
 207        test_and_clear_bit(event, &host->pending_events)
 208#define atmci_set_completed(host, event)                        \
 209        set_bit(event, &host->completed_events)
 210#define atmci_set_pending(host, event)                          \
 211        set_bit(event, &host->pending_events)
 212
 213/*
 214 * Enable or disable features/registers based on
 215 * whether the processor supports them
 216 */
 217static bool mci_has_rwproof(void)
 218{
 219        if (cpu_is_at91sam9261() || cpu_is_at91rm9200())
 220                return false;
 221        else
 222                return true;
 223}
 224
 225/*
 226 * The debugfs stuff below is mostly optimized away when
 227 * CONFIG_DEBUG_FS is not set.
 228 */
 229static int atmci_req_show(struct seq_file *s, void *v)
 230{
 231        struct atmel_mci_slot   *slot = s->private;
 232        struct mmc_request      *mrq;
 233        struct mmc_command      *cmd;
 234        struct mmc_command      *stop;
 235        struct mmc_data         *data;
 236
 237        /* Make sure we get a consistent snapshot */
 238        spin_lock_bh(&slot->host->lock);
 239        mrq = slot->mrq;
 240
 241        if (mrq) {
 242                cmd = mrq->cmd;
 243                data = mrq->data;
 244                stop = mrq->stop;
 245
 246                if (cmd)
 247                        seq_printf(s,
 248                                "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 249                                cmd->opcode, cmd->arg, cmd->flags,
 250                                cmd->resp[0], cmd->resp[1], cmd->resp[2],
 251                                cmd->resp[2], cmd->error);
 252                if (data)
 253                        seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 254                                data->bytes_xfered, data->blocks,
 255                                data->blksz, data->flags, data->error);
 256                if (stop)
 257                        seq_printf(s,
 258                                "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 259                                stop->opcode, stop->arg, stop->flags,
 260                                stop->resp[0], stop->resp[1], stop->resp[2],
 261                                stop->resp[2], stop->error);
 262        }
 263
 264        spin_unlock_bh(&slot->host->lock);
 265
 266        return 0;
 267}
 268
 269static int atmci_req_open(struct inode *inode, struct file *file)
 270{
 271        return single_open(file, atmci_req_show, inode->i_private);
 272}
 273
 274static const struct file_operations atmci_req_fops = {
 275        .owner          = THIS_MODULE,
 276        .open           = atmci_req_open,
 277        .read           = seq_read,
 278        .llseek         = seq_lseek,
 279        .release        = single_release,
 280};
 281
 282static void atmci_show_status_reg(struct seq_file *s,
 283                const char *regname, u32 value)
 284{
 285        static const char       *sr_bit[] = {
 286                [0]     = "CMDRDY",
 287                [1]     = "RXRDY",
 288                [2]     = "TXRDY",
 289                [3]     = "BLKE",
 290                [4]     = "DTIP",
 291                [5]     = "NOTBUSY",
 292                [6]     = "ENDRX",
 293                [7]     = "ENDTX",
 294                [8]     = "SDIOIRQA",
 295                [9]     = "SDIOIRQB",
 296                [12]    = "SDIOWAIT",
 297                [14]    = "RXBUFF",
 298                [15]    = "TXBUFE",
 299                [16]    = "RINDE",
 300                [17]    = "RDIRE",
 301                [18]    = "RCRCE",
 302                [19]    = "RENDE",
 303                [20]    = "RTOE",
 304                [21]    = "DCRCE",
 305                [22]    = "DTOE",
 306                [23]    = "CSTOE",
 307                [24]    = "BLKOVRE",
 308                [25]    = "DMADONE",
 309                [26]    = "FIFOEMPTY",
 310                [27]    = "XFRDONE",
 311                [30]    = "OVRE",
 312                [31]    = "UNRE",
 313        };
 314        unsigned int            i;
 315
 316        seq_printf(s, "%s:\t0x%08x", regname, value);
 317        for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
 318                if (value & (1 << i)) {
 319                        if (sr_bit[i])
 320                                seq_printf(s, " %s", sr_bit[i]);
 321                        else
 322                                seq_puts(s, " UNKNOWN");
 323                }
 324        }
 325        seq_putc(s, '\n');
 326}
 327
 328static int atmci_regs_show(struct seq_file *s, void *v)
 329{
 330        struct atmel_mci        *host = s->private;
 331        u32                     *buf;
 332
 333        buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL);
 334        if (!buf)
 335                return -ENOMEM;
 336
 337        /*
 338         * Grab a more or less consistent snapshot. Note that we're
 339         * not disabling interrupts, so IMR and SR may not be
 340         * consistent.
 341         */
 342        spin_lock_bh(&host->lock);
 343        clk_enable(host->mck);
 344        memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
 345        clk_disable(host->mck);
 346        spin_unlock_bh(&host->lock);
 347
 348        seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
 349                        buf[MCI_MR / 4],
 350                        buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "",
 351                        buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "",
 352                        buf[MCI_MR / 4] & 0xff);
 353        seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]);
 354        seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]);
 355        seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]);
 356        seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
 357                        buf[MCI_BLKR / 4],
 358                        buf[MCI_BLKR / 4] & 0xffff,
 359                        (buf[MCI_BLKR / 4] >> 16) & 0xffff);
 360
 361        /* Don't read RSPR and RDR; it will consume the data there */
 362
 363        atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
 364        atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
 365
 366        kfree(buf);
 367
 368        return 0;
 369}
 370
 371static int atmci_regs_open(struct inode *inode, struct file *file)
 372{
 373        return single_open(file, atmci_regs_show, inode->i_private);
 374}
 375
 376static const struct file_operations atmci_regs_fops = {
 377        .owner          = THIS_MODULE,
 378        .open           = atmci_regs_open,
 379        .read           = seq_read,
 380        .llseek         = seq_lseek,
 381        .release        = single_release,
 382};
 383
 384static void atmci_init_debugfs(struct atmel_mci_slot *slot)
 385{
 386        struct mmc_host         *mmc = slot->mmc;
 387        struct atmel_mci        *host = slot->host;
 388        struct dentry           *root;
 389        struct dentry           *node;
 390
 391        root = mmc->debugfs_root;
 392        if (!root)
 393                return;
 394
 395        node = debugfs_create_file("regs", S_IRUSR, root, host,
 396                        &atmci_regs_fops);
 397        if (IS_ERR(node))
 398                return;
 399        if (!node)
 400                goto err;
 401
 402        node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
 403        if (!node)
 404                goto err;
 405
 406        node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
 407        if (!node)
 408                goto err;
 409
 410        node = debugfs_create_x32("pending_events", S_IRUSR, root,
 411                                     (u32 *)&host->pending_events);
 412        if (!node)
 413                goto err;
 414
 415        node = debugfs_create_x32("completed_events", S_IRUSR, root,
 416                                     (u32 *)&host->completed_events);
 417        if (!node)
 418                goto err;
 419
 420        return;
 421
 422err:
 423        dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 424}
 425
 426static inline unsigned int ns_to_clocks(struct atmel_mci *host,
 427                                        unsigned int ns)
 428{
 429        return (ns * (host->bus_hz / 1000000) + 999) / 1000;
 430}
 431
 432static void atmci_set_timeout(struct atmel_mci *host,
 433                struct atmel_mci_slot *slot, struct mmc_data *data)
 434{
 435        static unsigned dtomul_to_shift[] = {
 436                0, 4, 7, 8, 10, 12, 16, 20
 437        };
 438        unsigned        timeout;
 439        unsigned        dtocyc;
 440        unsigned        dtomul;
 441
 442        timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks;
 443
 444        for (dtomul = 0; dtomul < 8; dtomul++) {
 445                unsigned shift = dtomul_to_shift[dtomul];
 446                dtocyc = (timeout + (1 << shift) - 1) >> shift;
 447                if (dtocyc < 15)
 448                        break;
 449        }
 450
 451        if (dtomul >= 8) {
 452                dtomul = 7;
 453                dtocyc = 15;
 454        }
 455
 456        dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
 457                        dtocyc << dtomul_to_shift[dtomul]);
 458        mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
 459}
 460
 461/*
 462 * Return mask with command flags to be enabled for this command.
 463 */
 464static u32 atmci_prepare_command(struct mmc_host *mmc,
 465                                 struct mmc_command *cmd)
 466{
 467        struct mmc_data *data;
 468        u32             cmdr;
 469
 470        cmd->error = -EINPROGRESS;
 471
 472        cmdr = MCI_CMDR_CMDNB(cmd->opcode);
 473
 474        if (cmd->flags & MMC_RSP_PRESENT) {
 475                if (cmd->flags & MMC_RSP_136)
 476                        cmdr |= MCI_CMDR_RSPTYP_136BIT;
 477                else
 478                        cmdr |= MCI_CMDR_RSPTYP_48BIT;
 479        }
 480
 481        /*
 482         * This should really be MAXLAT_5 for CMD2 and ACMD41, but
 483         * it's too difficult to determine whether this is an ACMD or
 484         * not. Better make it 64.
 485         */
 486        cmdr |= MCI_CMDR_MAXLAT_64CYC;
 487
 488        if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
 489                cmdr |= MCI_CMDR_OPDCMD;
 490
 491        data = cmd->data;
 492        if (data) {
 493                cmdr |= MCI_CMDR_START_XFER;
 494                if (data->flags & MMC_DATA_STREAM)
 495                        cmdr |= MCI_CMDR_STREAM;
 496                else if (data->blocks > 1)
 497                        cmdr |= MCI_CMDR_MULTI_BLOCK;
 498                else
 499                        cmdr |= MCI_CMDR_BLOCK;
 500
 501                if (data->flags & MMC_DATA_READ)
 502                        cmdr |= MCI_CMDR_TRDIR_READ;
 503        }
 504
 505        return cmdr;
 506}
 507
 508static void atmci_start_command(struct atmel_mci *host,
 509                struct mmc_command *cmd, u32 cmd_flags)
 510{
 511        WARN_ON(host->cmd);
 512        host->cmd = cmd;
 513
 514        dev_vdbg(&host->pdev->dev,
 515                        "start command: ARGR=0x%08x CMDR=0x%08x\n",
 516                        cmd->arg, cmd_flags);
 517
 518        mci_writel(host, ARGR, cmd->arg);
 519        mci_writel(host, CMDR, cmd_flags);
 520}
 521
 522static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
 523{
 524        atmci_start_command(host, data->stop, host->stop_cmdr);
 525        mci_writel(host, IER, MCI_CMDRDY);
 526}
 527
 528#ifdef CONFIG_MMC_ATMELMCI_DMA
 529static void atmci_dma_cleanup(struct atmel_mci *host)
 530{
 531        struct mmc_data                 *data = host->data;
 532
 533        dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
 534                     ((data->flags & MMC_DATA_WRITE)
 535                      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
 536}
 537
 538static void atmci_stop_dma(struct atmel_mci *host)
 539{
 540        struct dma_chan *chan = host->data_chan;
 541
 542        if (chan) {
 543                chan->device->device_terminate_all(chan);
 544                atmci_dma_cleanup(host);
 545        } else {
 546                /* Data transfer was stopped by the interrupt handler */
 547                atmci_set_pending(host, EVENT_XFER_COMPLETE);
 548                mci_writel(host, IER, MCI_NOTBUSY);
 549        }
 550}
 551
 552/* This function is called by the DMA driver from tasklet context. */
 553static void atmci_dma_complete(void *arg)
 554{
 555        struct atmel_mci        *host = arg;
 556        struct mmc_data         *data = host->data;
 557
 558        dev_vdbg(&host->pdev->dev, "DMA complete\n");
 559
 560        atmci_dma_cleanup(host);
 561
 562        /*
 563         * If the card was removed, data will be NULL. No point trying
 564         * to send the stop command or waiting for NBUSY in this case.
 565         */
 566        if (data) {
 567                atmci_set_pending(host, EVENT_XFER_COMPLETE);
 568                tasklet_schedule(&host->tasklet);
 569
 570                /*
 571                 * Regardless of what the documentation says, we have
 572                 * to wait for NOTBUSY even after block read
 573                 * operations.
 574                 *
 575                 * When the DMA transfer is complete, the controller
 576                 * may still be reading the CRC from the card, i.e.
 577                 * the data transfer is still in progress and we
 578                 * haven't seen all the potential error bits yet.
 579                 *
 580                 * The interrupt handler will schedule a different
 581                 * tasklet to finish things up when the data transfer
 582                 * is completely done.
 583                 *
 584                 * We may not complete the mmc request here anyway
 585                 * because the mmc layer may call back and cause us to
 586                 * violate the "don't submit new operations from the
 587                 * completion callback" rule of the dma engine
 588                 * framework.
 589                 */
 590                mci_writel(host, IER, MCI_NOTBUSY);
 591        }
 592}
 593
 594static int
 595atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
 596{
 597        struct dma_chan                 *chan;
 598        struct dma_async_tx_descriptor  *desc;
 599        struct scatterlist              *sg;
 600        unsigned int                    i;
 601        enum dma_data_direction         direction;
 602        unsigned int                    sglen;
 603
 604        /*
 605         * We don't do DMA on "complex" transfers, i.e. with
 606         * non-word-aligned buffers or lengths. Also, we don't bother
 607         * with all the DMA setup overhead for short transfers.
 608         */
 609        if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
 610                return -EINVAL;
 611        if (data->blksz & 3)
 612                return -EINVAL;
 613
 614        for_each_sg(data->sg, sg, data->sg_len, i) {
 615                if (sg->offset & 3 || sg->length & 3)
 616                        return -EINVAL;
 617        }
 618
 619        /* If we don't have a channel, we can't do DMA */
 620        chan = host->dma.chan;
 621        if (chan)
 622                host->data_chan = chan;
 623
 624        if (!chan)
 625                return -ENODEV;
 626
 627        if (data->flags & MMC_DATA_READ)
 628                direction = DMA_FROM_DEVICE;
 629        else
 630                direction = DMA_TO_DEVICE;
 631
 632        sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction);
 633        if (sglen != data->sg_len)
 634                goto unmap_exit;
 635        desc = chan->device->device_prep_slave_sg(chan,
 636                        data->sg, data->sg_len, direction,
 637                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 638        if (!desc)
 639                goto unmap_exit;
 640
 641        host->dma.data_desc = desc;
 642        desc->callback = atmci_dma_complete;
 643        desc->callback_param = host;
 644        desc->tx_submit(desc);
 645
 646        /* Go! */
 647        chan->device->device_issue_pending(chan);
 648
 649        return 0;
 650unmap_exit:
 651        dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction);
 652        return -ENOMEM;
 653}
 654
 655#else /* CONFIG_MMC_ATMELMCI_DMA */
 656
 657static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
 658{
 659        return -ENOSYS;
 660}
 661
 662static void atmci_stop_dma(struct atmel_mci *host)
 663{
 664        /* Data transfer was stopped by the interrupt handler */
 665        atmci_set_pending(host, EVENT_XFER_COMPLETE);
 666        mci_writel(host, IER, MCI_NOTBUSY);
 667}
 668
 669#endif /* CONFIG_MMC_ATMELMCI_DMA */
 670
 671/*
 672 * Returns a mask of interrupt flags to be enabled after the whole
 673 * request has been prepared.
 674 */
 675static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
 676{
 677        u32 iflags;
 678
 679        data->error = -EINPROGRESS;
 680
 681        WARN_ON(host->data);
 682        host->sg = NULL;
 683        host->data = data;
 684
 685        iflags = ATMCI_DATA_ERROR_FLAGS;
 686        if (atmci_submit_data_dma(host, data)) {
 687                host->data_chan = NULL;
 688
 689                /*
 690                 * Errata: MMC data write operation with less than 12
 691                 * bytes is impossible.
 692                 *
 693                 * Errata: MCI Transmit Data Register (TDR) FIFO
 694                 * corruption when length is not multiple of 4.
 695                 */
 696                if (data->blocks * data->blksz < 12
 697                                || (data->blocks * data->blksz) & 3)
 698                        host->need_reset = true;
 699
 700                host->sg = data->sg;
 701                host->pio_offset = 0;
 702                if (data->flags & MMC_DATA_READ)
 703                        iflags |= MCI_RXRDY;
 704                else
 705                        iflags |= MCI_TXRDY;
 706        }
 707
 708        return iflags;
 709}
 710
 711static void atmci_start_request(struct atmel_mci *host,
 712                struct atmel_mci_slot *slot)
 713{
 714        struct mmc_request      *mrq;
 715        struct mmc_command      *cmd;
 716        struct mmc_data         *data;
 717        u32                     iflags;
 718        u32                     cmdflags;
 719
 720        mrq = slot->mrq;
 721        host->cur_slot = slot;
 722        host->mrq = mrq;
 723
 724        host->pending_events = 0;
 725        host->completed_events = 0;
 726        host->data_status = 0;
 727
 728        if (host->need_reset) {
 729                mci_writel(host, CR, MCI_CR_SWRST);
 730                mci_writel(host, CR, MCI_CR_MCIEN);
 731                mci_writel(host, MR, host->mode_reg);
 732                host->need_reset = false;
 733        }
 734        mci_writel(host, SDCR, slot->sdc_reg);
 735
 736        iflags = mci_readl(host, IMR);
 737        if (iflags)
 738                dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
 739                                iflags);
 740
 741        if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
 742                /* Send init sequence (74 clock cycles) */
 743                mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
 744                while (!(mci_readl(host, SR) & MCI_CMDRDY))
 745                        cpu_relax();
 746        }
 747        data = mrq->data;
 748        if (data) {
 749                atmci_set_timeout(host, slot, data);
 750
 751                /* Must set block count/size before sending command */
 752                mci_writel(host, BLKR, MCI_BCNT(data->blocks)
 753                                | MCI_BLKLEN(data->blksz));
 754                dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
 755                        MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
 756        }
 757
 758        iflags = MCI_CMDRDY;
 759        cmd = mrq->cmd;
 760        cmdflags = atmci_prepare_command(slot->mmc, cmd);
 761        atmci_start_command(host, cmd, cmdflags);
 762
 763        if (data)
 764                iflags |= atmci_submit_data(host, data);
 765
 766        if (mrq->stop) {
 767                host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
 768                host->stop_cmdr |= MCI_CMDR_STOP_XFER;
 769                if (!(data->flags & MMC_DATA_WRITE))
 770                        host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
 771                if (data->flags & MMC_DATA_STREAM)
 772                        host->stop_cmdr |= MCI_CMDR_STREAM;
 773                else
 774                        host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK;
 775        }
 776
 777        /*
 778         * We could have enabled interrupts earlier, but I suspect
 779         * that would open up a nice can of interesting race
 780         * conditions (e.g. command and data complete, but stop not
 781         * prepared yet.)
 782         */
 783        mci_writel(host, IER, iflags);
 784}
 785
 786static void atmci_queue_request(struct atmel_mci *host,
 787                struct atmel_mci_slot *slot, struct mmc_request *mrq)
 788{
 789        dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
 790                        host->state);
 791
 792        spin_lock_bh(&host->lock);
 793        slot->mrq = mrq;
 794        if (host->state == STATE_IDLE) {
 795                host->state = STATE_SENDING_CMD;
 796                atmci_start_request(host, slot);
 797        } else {
 798                list_add_tail(&slot->queue_node, &host->queue);
 799        }
 800        spin_unlock_bh(&host->lock);
 801}
 802
 803static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 804{
 805        struct atmel_mci_slot   *slot = mmc_priv(mmc);
 806        struct atmel_mci        *host = slot->host;
 807        struct mmc_data         *data;
 808
 809        WARN_ON(slot->mrq);
 810
 811        /*
 812         * We may "know" the card is gone even though there's still an
 813         * electrical connection. If so, we really need to communicate
 814         * this to the MMC core since there won't be any more
 815         * interrupts as the card is completely removed. Otherwise,
 816         * the MMC core might believe the card is still there even
 817         * though the card was just removed very slowly.
 818         */
 819        if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
 820                mrq->cmd->error = -ENOMEDIUM;
 821                mmc_request_done(mmc, mrq);
 822                return;
 823        }
 824
 825        /* We don't support multiple blocks of weird lengths. */
 826        data = mrq->data;
 827        if (data && data->blocks > 1 && data->blksz & 3) {
 828                mrq->cmd->error = -EINVAL;
 829                mmc_request_done(mmc, mrq);
 830        }
 831
 832        atmci_queue_request(host, slot, mrq);
 833}
 834
 835static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 836{
 837        struct atmel_mci_slot   *slot = mmc_priv(mmc);
 838        struct atmel_mci        *host = slot->host;
 839        unsigned int            i;
 840
 841        slot->sdc_reg &= ~MCI_SDCBUS_MASK;
 842        switch (ios->bus_width) {
 843        case MMC_BUS_WIDTH_1:
 844                slot->sdc_reg |= MCI_SDCBUS_1BIT;
 845                break;
 846        case MMC_BUS_WIDTH_4:
 847                slot->sdc_reg |= MCI_SDCBUS_4BIT;
 848                break;
 849        }
 850
 851        if (ios->clock) {
 852                unsigned int clock_min = ~0U;
 853                u32 clkdiv;
 854
 855                spin_lock_bh(&host->lock);
 856                if (!host->mode_reg) {
 857                        clk_enable(host->mck);
 858                        mci_writel(host, CR, MCI_CR_SWRST);
 859                        mci_writel(host, CR, MCI_CR_MCIEN);
 860                }
 861
 862                /*
 863                 * Use mirror of ios->clock to prevent race with mmc
 864                 * core ios update when finding the minimum.
 865                 */
 866                slot->clock = ios->clock;
 867                for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
 868                        if (host->slot[i] && host->slot[i]->clock
 869                                        && host->slot[i]->clock < clock_min)
 870                                clock_min = host->slot[i]->clock;
 871                }
 872
 873                /* Calculate clock divider */
 874                clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
 875                if (clkdiv > 255) {
 876                        dev_warn(&mmc->class_dev,
 877                                "clock %u too slow; using %lu\n",
 878                                clock_min, host->bus_hz / (2 * 256));
 879                        clkdiv = 255;
 880                }
 881
 882                host->mode_reg = MCI_MR_CLKDIV(clkdiv);
 883
 884                /*
 885                 * WRPROOF and RDPROOF prevent overruns/underruns by
 886                 * stopping the clock when the FIFO is full/empty.
 887                 * This state is not expected to last for long.
 888                 */
 889                if (mci_has_rwproof())
 890                        host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
 891
 892                if (list_empty(&host->queue))
 893                        mci_writel(host, MR, host->mode_reg);
 894                else
 895                        host->need_clock_update = true;
 896
 897                spin_unlock_bh(&host->lock);
 898        } else {
 899                bool any_slot_active = false;
 900
 901                spin_lock_bh(&host->lock);
 902                slot->clock = 0;
 903                for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
 904                        if (host->slot[i] && host->slot[i]->clock) {
 905                                any_slot_active = true;
 906                                break;
 907                        }
 908                }
 909                if (!any_slot_active) {
 910                        mci_writel(host, CR, MCI_CR_MCIDIS);
 911                        if (host->mode_reg) {
 912                                mci_readl(host, MR);
 913                                clk_disable(host->mck);
 914                        }
 915                        host->mode_reg = 0;
 916                }
 917                spin_unlock_bh(&host->lock);
 918        }
 919
 920        switch (ios->power_mode) {
 921        case MMC_POWER_UP:
 922                set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
 923                break;
 924        default:
 925                /*
 926                 * TODO: None of the currently available AVR32-based
 927                 * boards allow MMC power to be turned off. Implement
 928                 * power control when this can be tested properly.
 929                 *
 930                 * We also need to hook this into the clock management
 931                 * somehow so that newly inserted cards aren't
 932                 * subjected to a fast clock before we have a chance
 933                 * to figure out what the maximum rate is. Currently,
 934                 * there's no way to avoid this, and there never will
 935                 * be for boards that don't support power control.
 936                 */
 937                break;
 938        }
 939}
 940
 941static int atmci_get_ro(struct mmc_host *mmc)
 942{
 943        int                     read_only = -ENOSYS;
 944        struct atmel_mci_slot   *slot = mmc_priv(mmc);
 945
 946        if (gpio_is_valid(slot->wp_pin)) {
 947                read_only = gpio_get_value(slot->wp_pin);
 948                dev_dbg(&mmc->class_dev, "card is %s\n",
 949                                read_only ? "read-only" : "read-write");
 950        }
 951
 952        return read_only;
 953}
 954
 955static int atmci_get_cd(struct mmc_host *mmc)
 956{
 957        int                     present = -ENOSYS;
 958        struct atmel_mci_slot   *slot = mmc_priv(mmc);
 959
 960        if (gpio_is_valid(slot->detect_pin)) {
 961                present = !(gpio_get_value(slot->detect_pin) ^
 962                            slot->detect_is_active_high);
 963                dev_dbg(&mmc->class_dev, "card is %spresent\n",
 964                                present ? "" : "not ");
 965        }
 966
 967        return present;
 968}
 969
 970static const struct mmc_host_ops atmci_ops = {
 971        .request        = atmci_request,
 972        .set_ios        = atmci_set_ios,
 973        .get_ro         = atmci_get_ro,
 974        .get_cd         = atmci_get_cd,
 975};
 976
 977/* Called with host->lock held */
 978static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
 979        __releases(&host->lock)
 980        __acquires(&host->lock)
 981{
 982        struct atmel_mci_slot   *slot = NULL;
 983        struct mmc_host         *prev_mmc = host->cur_slot->mmc;
 984
 985        WARN_ON(host->cmd || host->data);
 986
 987        /*
 988         * Update the MMC clock rate if necessary. This may be
 989         * necessary if set_ios() is called when a different slot is
 990         * busy transfering data.
 991         */
 992        if (host->need_clock_update)
 993                mci_writel(host, MR, host->mode_reg);
 994
 995        host->cur_slot->mrq = NULL;
 996        host->mrq = NULL;
 997        if (!list_empty(&host->queue)) {
 998                slot = list_entry(host->queue.next,
 999                                struct atmel_mci_slot, queue_node);
1000                list_del(&slot->queue_node);
1001                dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1002                                mmc_hostname(slot->mmc));
1003                host->state = STATE_SENDING_CMD;
1004                atmci_start_request(host, slot);
1005        } else {
1006                dev_vdbg(&host->pdev->dev, "list empty\n");
1007                host->state = STATE_IDLE;
1008        }
1009
1010        spin_unlock(&host->lock);
1011        mmc_request_done(prev_mmc, mrq);
1012        spin_lock(&host->lock);
1013}
1014
1015static void atmci_command_complete(struct atmel_mci *host,
1016                        struct mmc_command *cmd)
1017{
1018        u32             status = host->cmd_status;
1019
1020        /* Read the response from the card (up to 16 bytes) */
1021        cmd->resp[0] = mci_readl(host, RSPR);
1022        cmd->resp[1] = mci_readl(host, RSPR);
1023        cmd->resp[2] = mci_readl(host, RSPR);
1024        cmd->resp[3] = mci_readl(host, RSPR);
1025
1026        if (status & MCI_RTOE)
1027                cmd->error = -ETIMEDOUT;
1028        else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE))
1029                cmd->error = -EILSEQ;
1030        else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE))
1031                cmd->error = -EIO;
1032        else
1033                cmd->error = 0;
1034
1035        if (cmd->error) {
1036                dev_dbg(&host->pdev->dev,
1037                        "command error: status=0x%08x\n", status);
1038
1039                if (cmd->data) {
1040                        host->data = NULL;
1041                        atmci_stop_dma(host);
1042                        mci_writel(host, IDR, MCI_NOTBUSY
1043                                        | MCI_TXRDY | MCI_RXRDY
1044                                        | ATMCI_DATA_ERROR_FLAGS);
1045                }
1046        }
1047}
1048
1049static void atmci_detect_change(unsigned long data)
1050{
1051        struct atmel_mci_slot   *slot = (struct atmel_mci_slot *)data;
1052        bool                    present;
1053        bool                    present_old;
1054
1055        /*
1056         * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1057         * freeing the interrupt. We must not re-enable the interrupt
1058         * if it has been freed, and if we're shutting down, it
1059         * doesn't really matter whether the card is present or not.
1060         */
1061        smp_rmb();
1062        if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1063                return;
1064
1065        enable_irq(gpio_to_irq(slot->detect_pin));
1066        present = !(gpio_get_value(slot->detect_pin) ^
1067                    slot->detect_is_active_high);
1068        present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1069
1070        dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1071                        present, present_old);
1072
1073        if (present != present_old) {
1074                struct atmel_mci        *host = slot->host;
1075                struct mmc_request      *mrq;
1076
1077                dev_dbg(&slot->mmc->class_dev, "card %s\n",
1078                        present ? "inserted" : "removed");
1079
1080                spin_lock(&host->lock);
1081
1082                if (!present)
1083                        clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1084                else
1085                        set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1086
1087                /* Clean up queue if present */
1088                mrq = slot->mrq;
1089                if (mrq) {
1090                        if (mrq == host->mrq) {
1091                                /*
1092                                 * Reset controller to terminate any ongoing
1093                                 * commands or data transfers.
1094                                 */
1095                                mci_writel(host, CR, MCI_CR_SWRST);
1096                                mci_writel(host, CR, MCI_CR_MCIEN);
1097                                mci_writel(host, MR, host->mode_reg);
1098
1099                                host->data = NULL;
1100                                host->cmd = NULL;
1101
1102                                switch (host->state) {
1103                                case STATE_IDLE:
1104                                        break;
1105                                case STATE_SENDING_CMD:
1106                                        mrq->cmd->error = -ENOMEDIUM;
1107                                        if (!mrq->data)
1108                                                break;
1109                                        /* fall through */
1110                                case STATE_SENDING_DATA:
1111                                        mrq->data->error = -ENOMEDIUM;
1112                                        atmci_stop_dma(host);
1113                                        break;
1114                                case STATE_DATA_BUSY:
1115                                case STATE_DATA_ERROR:
1116                                        if (mrq->data->error == -EINPROGRESS)
1117                                                mrq->data->error = -ENOMEDIUM;
1118                                        if (!mrq->stop)
1119                                                break;
1120                                        /* fall through */
1121                                case STATE_SENDING_STOP:
1122                                        mrq->stop->error = -ENOMEDIUM;
1123                                        break;
1124                                }
1125
1126                                atmci_request_end(host, mrq);
1127                        } else {
1128                                list_del(&slot->queue_node);
1129                                mrq->cmd->error = -ENOMEDIUM;
1130                                if (mrq->data)
1131                                        mrq->data->error = -ENOMEDIUM;
1132                                if (mrq->stop)
1133                                        mrq->stop->error = -ENOMEDIUM;
1134
1135                                spin_unlock(&host->lock);
1136                                mmc_request_done(slot->mmc, mrq);
1137                                spin_lock(&host->lock);
1138                        }
1139                }
1140                spin_unlock(&host->lock);
1141
1142                mmc_detect_change(slot->mmc, 0);
1143        }
1144}
1145
1146static void atmci_tasklet_func(unsigned long priv)
1147{
1148        struct atmel_mci        *host = (struct atmel_mci *)priv;
1149        struct mmc_request      *mrq = host->mrq;
1150        struct mmc_data         *data = host->data;
1151        struct mmc_command      *cmd = host->cmd;
1152        enum atmel_mci_state    state = host->state;
1153        enum atmel_mci_state    prev_state;
1154        u32                     status;
1155
1156        spin_lock(&host->lock);
1157
1158        state = host->state;
1159
1160        dev_vdbg(&host->pdev->dev,
1161                "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1162                state, host->pending_events, host->completed_events,
1163                mci_readl(host, IMR));
1164
1165        do {
1166                prev_state = state;
1167
1168                switch (state) {
1169                case STATE_IDLE:
1170                        break;
1171
1172                case STATE_SENDING_CMD:
1173                        if (!atmci_test_and_clear_pending(host,
1174                                                EVENT_CMD_COMPLETE))
1175                                break;
1176
1177                        host->cmd = NULL;
1178                        atmci_set_completed(host, EVENT_CMD_COMPLETE);
1179                        atmci_command_complete(host, mrq->cmd);
1180                        if (!mrq->data || cmd->error) {
1181                                atmci_request_end(host, host->mrq);
1182                                goto unlock;
1183                        }
1184
1185                        prev_state = state = STATE_SENDING_DATA;
1186                        /* fall through */
1187
1188                case STATE_SENDING_DATA:
1189                        if (atmci_test_and_clear_pending(host,
1190                                                EVENT_DATA_ERROR)) {
1191                                atmci_stop_dma(host);
1192                                if (data->stop)
1193                                        send_stop_cmd(host, data);
1194                                state = STATE_DATA_ERROR;
1195                                break;
1196                        }
1197
1198                        if (!atmci_test_and_clear_pending(host,
1199                                                EVENT_XFER_COMPLETE))
1200                                break;
1201
1202                        atmci_set_completed(host, EVENT_XFER_COMPLETE);
1203                        prev_state = state = STATE_DATA_BUSY;
1204                        /* fall through */
1205
1206                case STATE_DATA_BUSY:
1207                        if (!atmci_test_and_clear_pending(host,
1208                                                EVENT_DATA_COMPLETE))
1209                                break;
1210
1211                        host->data = NULL;
1212                        atmci_set_completed(host, EVENT_DATA_COMPLETE);
1213                        status = host->data_status;
1214                        if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
1215                                if (status & MCI_DTOE) {
1216                                        dev_dbg(&host->pdev->dev,
1217                                                        "data timeout error\n");
1218                                        data->error = -ETIMEDOUT;
1219                                } else if (status & MCI_DCRCE) {
1220                                        dev_dbg(&host->pdev->dev,
1221                                                        "data CRC error\n");
1222                                        data->error = -EILSEQ;
1223                                } else {
1224                                        dev_dbg(&host->pdev->dev,
1225                                                "data FIFO error (status=%08x)\n",
1226                                                status);
1227                                        data->error = -EIO;
1228                                }
1229                        } else {
1230                                data->bytes_xfered = data->blocks * data->blksz;
1231                                data->error = 0;
1232                        }
1233
1234                        if (!data->stop) {
1235                                atmci_request_end(host, host->mrq);
1236                                goto unlock;
1237                        }
1238
1239                        prev_state = state = STATE_SENDING_STOP;
1240                        if (!data->error)
1241                                send_stop_cmd(host, data);
1242                        /* fall through */
1243
1244                case STATE_SENDING_STOP:
1245                        if (!atmci_test_and_clear_pending(host,
1246                                                EVENT_CMD_COMPLETE))
1247                                break;
1248
1249                        host->cmd = NULL;
1250                        atmci_command_complete(host, mrq->stop);
1251                        atmci_request_end(host, host->mrq);
1252                        goto unlock;
1253
1254                case STATE_DATA_ERROR:
1255                        if (!atmci_test_and_clear_pending(host,
1256                                                EVENT_XFER_COMPLETE))
1257                                break;
1258
1259                        state = STATE_DATA_BUSY;
1260                        break;
1261                }
1262        } while (state != prev_state);
1263
1264        host->state = state;
1265
1266unlock:
1267        spin_unlock(&host->lock);
1268}
1269
1270static void atmci_read_data_pio(struct atmel_mci *host)
1271{
1272        struct scatterlist      *sg = host->sg;
1273        void                    *buf = sg_virt(sg);
1274        unsigned int            offset = host->pio_offset;
1275        struct mmc_data         *data = host->data;
1276        u32                     value;
1277        u32                     status;
1278        unsigned int            nbytes = 0;
1279
1280        do {
1281                value = mci_readl(host, RDR);
1282                if (likely(offset + 4 <= sg->length)) {
1283                        put_unaligned(value, (u32 *)(buf + offset));
1284
1285                        offset += 4;
1286                        nbytes += 4;
1287
1288                        if (offset == sg->length) {
1289                                flush_dcache_page(sg_page(sg));
1290                                host->sg = sg = sg_next(sg);
1291                                if (!sg)
1292                                        goto done;
1293
1294                                offset = 0;
1295                                buf = sg_virt(sg);
1296                        }
1297                } else {
1298                        unsigned int remaining = sg->length - offset;
1299                        memcpy(buf + offset, &value, remaining);
1300                        nbytes += remaining;
1301
1302                        flush_dcache_page(sg_page(sg));
1303                        host->sg = sg = sg_next(sg);
1304                        if (!sg)
1305                                goto done;
1306
1307                        offset = 4 - remaining;
1308                        buf = sg_virt(sg);
1309                        memcpy(buf, (u8 *)&value + remaining, offset);
1310                        nbytes += offset;
1311                }
1312
1313                status = mci_readl(host, SR);
1314                if (status & ATMCI_DATA_ERROR_FLAGS) {
1315                        mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
1316                                                | ATMCI_DATA_ERROR_FLAGS));
1317                        host->data_status = status;
1318                        data->bytes_xfered += nbytes;
1319                        smp_wmb();
1320                        atmci_set_pending(host, EVENT_DATA_ERROR);
1321                        tasklet_schedule(&host->tasklet);
1322                        return;
1323                }
1324        } while (status & MCI_RXRDY);
1325
1326        host->pio_offset = offset;
1327        data->bytes_xfered += nbytes;
1328
1329        return;
1330
1331done:
1332        mci_writel(host, IDR, MCI_RXRDY);
1333        mci_writel(host, IER, MCI_NOTBUSY);
1334        data->bytes_xfered += nbytes;
1335        smp_wmb();
1336        atmci_set_pending(host, EVENT_XFER_COMPLETE);
1337}
1338
1339static void atmci_write_data_pio(struct atmel_mci *host)
1340{
1341        struct scatterlist      *sg = host->sg;
1342        void                    *buf = sg_virt(sg);
1343        unsigned int            offset = host->pio_offset;
1344        struct mmc_data         *data = host->data;
1345        u32                     value;
1346        u32                     status;
1347        unsigned int            nbytes = 0;
1348
1349        do {
1350                if (likely(offset + 4 <= sg->length)) {
1351                        value = get_unaligned((u32 *)(buf + offset));
1352                        mci_writel(host, TDR, value);
1353
1354                        offset += 4;
1355                        nbytes += 4;
1356                        if (offset == sg->length) {
1357                                host->sg = sg = sg_next(sg);
1358                                if (!sg)
1359                                        goto done;
1360
1361                                offset = 0;
1362                                buf = sg_virt(sg);
1363                        }
1364                } else {
1365                        unsigned int remaining = sg->length - offset;
1366
1367                        value = 0;
1368                        memcpy(&value, buf + offset, remaining);
1369                        nbytes += remaining;
1370
1371                        host->sg = sg = sg_next(sg);
1372                        if (!sg) {
1373                                mci_writel(host, TDR, value);
1374                                goto done;
1375                        }
1376
1377                        offset = 4 - remaining;
1378                        buf = sg_virt(sg);
1379                        memcpy((u8 *)&value + remaining, buf, offset);
1380                        mci_writel(host, TDR, value);
1381                        nbytes += offset;
1382                }
1383
1384                status = mci_readl(host, SR);
1385                if (status & ATMCI_DATA_ERROR_FLAGS) {
1386                        mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
1387                                                | ATMCI_DATA_ERROR_FLAGS));
1388                        host->data_status = status;
1389                        data->bytes_xfered += nbytes;
1390                        smp_wmb();
1391                        atmci_set_pending(host, EVENT_DATA_ERROR);
1392                        tasklet_schedule(&host->tasklet);
1393                        return;
1394                }
1395        } while (status & MCI_TXRDY);
1396
1397        host->pio_offset = offset;
1398        data->bytes_xfered += nbytes;
1399
1400        return;
1401
1402done:
1403        mci_writel(host, IDR, MCI_TXRDY);
1404        mci_writel(host, IER, MCI_NOTBUSY);
1405        data->bytes_xfered += nbytes;
1406        smp_wmb();
1407        atmci_set_pending(host, EVENT_XFER_COMPLETE);
1408}
1409
1410static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1411{
1412        mci_writel(host, IDR, MCI_CMDRDY);
1413
1414        host->cmd_status = status;
1415        smp_wmb();
1416        atmci_set_pending(host, EVENT_CMD_COMPLETE);
1417        tasklet_schedule(&host->tasklet);
1418}
1419
1420static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1421{
1422        struct atmel_mci        *host = dev_id;
1423        u32                     status, mask, pending;
1424        unsigned int            pass_count = 0;
1425
1426        do {
1427                status = mci_readl(host, SR);
1428                mask = mci_readl(host, IMR);
1429                pending = status & mask;
1430                if (!pending)
1431                        break;
1432
1433                if (pending & ATMCI_DATA_ERROR_FLAGS) {
1434                        mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
1435                                        | MCI_RXRDY | MCI_TXRDY);
1436                        pending &= mci_readl(host, IMR);
1437
1438                        host->data_status = status;
1439                        smp_wmb();
1440                        atmci_set_pending(host, EVENT_DATA_ERROR);
1441                        tasklet_schedule(&host->tasklet);
1442                }
1443                if (pending & MCI_NOTBUSY) {
1444                        mci_writel(host, IDR,
1445                                        ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY);
1446                        if (!host->data_status)
1447                                host->data_status = status;
1448                        smp_wmb();
1449                        atmci_set_pending(host, EVENT_DATA_COMPLETE);
1450                        tasklet_schedule(&host->tasklet);
1451                }
1452                if (pending & MCI_RXRDY)
1453                        atmci_read_data_pio(host);
1454                if (pending & MCI_TXRDY)
1455                        atmci_write_data_pio(host);
1456
1457                if (pending & MCI_CMDRDY)
1458                        atmci_cmd_interrupt(host, status);
1459        } while (pass_count++ < 5);
1460
1461        return pass_count ? IRQ_HANDLED : IRQ_NONE;
1462}
1463
1464static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1465{
1466        struct atmel_mci_slot   *slot = dev_id;
1467
1468        /*
1469         * Disable interrupts until the pin has stabilized and check
1470         * the state then. Use mod_timer() since we may be in the
1471         * middle of the timer routine when this interrupt triggers.
1472         */
1473        disable_irq_nosync(irq);
1474        mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
1475
1476        return IRQ_HANDLED;
1477}
1478
1479static int __init atmci_init_slot(struct atmel_mci *host,
1480                struct mci_slot_pdata *slot_data, unsigned int id,
1481                u32 sdc_reg)
1482{
1483        struct mmc_host                 *mmc;
1484        struct atmel_mci_slot           *slot;
1485
1486        mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
1487        if (!mmc)
1488                return -ENOMEM;
1489
1490        slot = mmc_priv(mmc);
1491        slot->mmc = mmc;
1492        slot->host = host;
1493        slot->detect_pin = slot_data->detect_pin;
1494        slot->wp_pin = slot_data->wp_pin;
1495        slot->detect_is_active_high = slot_data->detect_is_active_high;
1496        slot->sdc_reg = sdc_reg;
1497
1498        mmc->ops = &atmci_ops;
1499        mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1500        mmc->f_max = host->bus_hz / 2;
1501        mmc->ocr_avail  = MMC_VDD_32_33 | MMC_VDD_33_34;
1502        if (slot_data->bus_width >= 4)
1503                mmc->caps |= MMC_CAP_4_BIT_DATA;
1504
1505        mmc->max_hw_segs = 64;
1506        mmc->max_phys_segs = 64;
1507        mmc->max_req_size = 32768 * 512;
1508        mmc->max_blk_size = 32768;
1509        mmc->max_blk_count = 512;
1510
1511        /* Assume card is present initially */
1512        set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1513        if (gpio_is_valid(slot->detect_pin)) {
1514                if (gpio_request(slot->detect_pin, "mmc_detect")) {
1515                        dev_dbg(&mmc->class_dev, "no detect pin available\n");
1516                        slot->detect_pin = -EBUSY;
1517                } else if (gpio_get_value(slot->detect_pin) ^
1518                                slot->detect_is_active_high) {
1519                        clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1520                }
1521        }
1522
1523        if (!gpio_is_valid(slot->detect_pin))
1524                mmc->caps |= MMC_CAP_NEEDS_POLL;
1525
1526        if (gpio_is_valid(slot->wp_pin)) {
1527                if (gpio_request(slot->wp_pin, "mmc_wp")) {
1528                        dev_dbg(&mmc->class_dev, "no WP pin available\n");
1529                        slot->wp_pin = -EBUSY;
1530                }
1531        }
1532
1533        host->slot[id] = slot;
1534        mmc_add_host(mmc);
1535
1536        if (gpio_is_valid(slot->detect_pin)) {
1537                int ret;
1538
1539                setup_timer(&slot->detect_timer, atmci_detect_change,
1540                                (unsigned long)slot);
1541
1542                ret = request_irq(gpio_to_irq(slot->detect_pin),
1543                                atmci_detect_interrupt,
1544                                IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
1545                                "mmc-detect", slot);
1546                if (ret) {
1547                        dev_dbg(&mmc->class_dev,
1548                                "could not request IRQ %d for detect pin\n",
1549                                gpio_to_irq(slot->detect_pin));
1550                        gpio_free(slot->detect_pin);
1551                        slot->detect_pin = -EBUSY;
1552                }
1553        }
1554
1555        atmci_init_debugfs(slot);
1556
1557        return 0;
1558}
1559
1560static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1561                unsigned int id)
1562{
1563        /* Debugfs stuff is cleaned up by mmc core */
1564
1565        set_bit(ATMCI_SHUTDOWN, &slot->flags);
1566        smp_wmb();
1567
1568        mmc_remove_host(slot->mmc);
1569
1570        if (gpio_is_valid(slot->detect_pin)) {
1571                int pin = slot->detect_pin;
1572
1573                free_irq(gpio_to_irq(pin), slot);
1574                del_timer_sync(&slot->detect_timer);
1575                gpio_free(pin);
1576        }
1577        if (gpio_is_valid(slot->wp_pin))
1578                gpio_free(slot->wp_pin);
1579
1580        slot->host->slot[id] = NULL;
1581        mmc_free_host(slot->mmc);
1582}
1583
1584#ifdef CONFIG_MMC_ATMELMCI_DMA
1585static bool filter(struct dma_chan *chan, void *slave)
1586{
1587        struct dw_dma_slave *dws = slave;
1588
1589        if (dws->dma_dev == chan->device->dev) {
1590                chan->private = dws;
1591                return true;
1592        } else
1593                return false;
1594}
1595#endif
1596
1597static int __init atmci_probe(struct platform_device *pdev)
1598{
1599        struct mci_platform_data        *pdata;
1600        struct atmel_mci                *host;
1601        struct resource                 *regs;
1602        unsigned int                    nr_slots;
1603        int                             irq;
1604        int                             ret;
1605
1606        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1607        if (!regs)
1608                return -ENXIO;
1609        pdata = pdev->dev.platform_data;
1610        if (!pdata)
1611                return -ENXIO;
1612        irq = platform_get_irq(pdev, 0);
1613        if (irq < 0)
1614                return irq;
1615
1616        host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
1617        if (!host)
1618                return -ENOMEM;
1619
1620        host->pdev = pdev;
1621        spin_lock_init(&host->lock);
1622        INIT_LIST_HEAD(&host->queue);
1623
1624        host->mck = clk_get(&pdev->dev, "mci_clk");
1625        if (IS_ERR(host->mck)) {
1626                ret = PTR_ERR(host->mck);
1627                goto err_clk_get;
1628        }
1629
1630        ret = -ENOMEM;
1631        host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1632        if (!host->regs)
1633                goto err_ioremap;
1634
1635        clk_enable(host->mck);
1636        mci_writel(host, CR, MCI_CR_SWRST);
1637        host->bus_hz = clk_get_rate(host->mck);
1638        clk_disable(host->mck);
1639
1640        host->mapbase = regs->start;
1641
1642        tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
1643
1644        ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
1645        if (ret)
1646                goto err_request_irq;
1647
1648#ifdef CONFIG_MMC_ATMELMCI_DMA
1649        if (pdata->dma_slave.dma_dev) {
1650                struct dw_dma_slave *dws = &pdata->dma_slave;
1651                dma_cap_mask_t mask;
1652
1653                dws->tx_reg = regs->start + MCI_TDR;
1654                dws->rx_reg = regs->start + MCI_RDR;
1655
1656                /* Try to grab a DMA channel */
1657                dma_cap_zero(mask);
1658                dma_cap_set(DMA_SLAVE, mask);
1659                host->dma.chan = dma_request_channel(mask, filter, dws);
1660        }
1661        if (!host->dma.chan)
1662                dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1663#endif /* CONFIG_MMC_ATMELMCI_DMA */
1664
1665        platform_set_drvdata(pdev, host);
1666
1667        /* We need at least one slot to succeed */
1668        nr_slots = 0;
1669        ret = -ENODEV;
1670        if (pdata->slot[0].bus_width) {
1671                ret = atmci_init_slot(host, &pdata->slot[0],
1672                                MCI_SDCSEL_SLOT_A, 0);
1673                if (!ret)
1674                        nr_slots++;
1675        }
1676        if (pdata->slot[1].bus_width) {
1677                ret = atmci_init_slot(host, &pdata->slot[1],
1678                                MCI_SDCSEL_SLOT_B, 1);
1679                if (!ret)
1680                        nr_slots++;
1681        }
1682
1683        if (!nr_slots) {
1684                dev_err(&pdev->dev, "init failed: no slot defined\n");
1685                goto err_init_slot;
1686        }
1687
1688        dev_info(&pdev->dev,
1689                        "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
1690                        host->mapbase, irq, nr_slots);
1691
1692        return 0;
1693
1694err_init_slot:
1695#ifdef CONFIG_MMC_ATMELMCI_DMA
1696        if (host->dma.chan)
1697                dma_release_channel(host->dma.chan);
1698#endif
1699        free_irq(irq, host);
1700err_request_irq:
1701        iounmap(host->regs);
1702err_ioremap:
1703        clk_put(host->mck);
1704err_clk_get:
1705        kfree(host);
1706        return ret;
1707}
1708
1709static int __exit atmci_remove(struct platform_device *pdev)
1710{
1711        struct atmel_mci        *host = platform_get_drvdata(pdev);
1712        unsigned int            i;
1713
1714        platform_set_drvdata(pdev, NULL);
1715
1716        for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1717                if (host->slot[i])
1718                        atmci_cleanup_slot(host->slot[i], i);
1719        }
1720
1721        clk_enable(host->mck);
1722        mci_writel(host, IDR, ~0UL);
1723        mci_writel(host, CR, MCI_CR_MCIDIS);
1724        mci_readl(host, SR);
1725        clk_disable(host->mck);
1726
1727#ifdef CONFIG_MMC_ATMELMCI_DMA
1728        if (host->dma.chan)
1729                dma_release_channel(host->dma.chan);
1730#endif
1731
1732        free_irq(platform_get_irq(pdev, 0), host);
1733        iounmap(host->regs);
1734
1735        clk_put(host->mck);
1736        kfree(host);
1737
1738        return 0;
1739}
1740
1741static struct platform_driver atmci_driver = {
1742        .remove         = __exit_p(atmci_remove),
1743        .driver         = {
1744                .name           = "atmel_mci",
1745        },
1746};
1747
1748static int __init atmci_init(void)
1749{
1750        return platform_driver_probe(&atmci_driver, atmci_probe);
1751}
1752
1753static void __exit atmci_exit(void)
1754{
1755        platform_driver_unregister(&atmci_driver);
1756}
1757
1758late_initcall(atmci_init); /* try to load after dma driver when built-in */
1759module_exit(atmci_exit);
1760
1761MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
1762MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
1763MODULE_LICENSE("GPL v2");
1764