linux/drivers/mmc/host/atmel-mci.c
<<
>>
Prefs
   1/*
   2 * Atmel MultiMedia Card Interface driver
   3 *
   4 * Copyright (C) 2004-2008 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/blkdev.h>
  11#include <linux/clk.h>
  12#include <linux/debugfs.h>
  13#include <linux/device.h>
  14#include <linux/dmaengine.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/err.h>
  17#include <linux/gpio.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/io.h>
  21#include <linux/ioport.h>
  22#include <linux/module.h>
  23#include <linux/of.h>
  24#include <linux/of_device.h>
  25#include <linux/of_gpio.h>
  26#include <linux/platform_device.h>
  27#include <linux/scatterlist.h>
  28#include <linux/seq_file.h>
  29#include <linux/slab.h>
  30#include <linux/stat.h>
  31#include <linux/types.h>
  32#include <linux/platform_data/atmel.h>
  33
  34#include <linux/mmc/host.h>
  35#include <linux/mmc/sdio.h>
  36
  37#include <mach/atmel-mci.h>
  38#include <linux/atmel-mci.h>
  39#include <linux/atmel_pdc.h>
  40
  41#include <asm/cacheflush.h>
  42#include <asm/io.h>
  43#include <asm/unaligned.h>
  44
  45#include "atmel-mci-regs.h"
  46
  47#define ATMCI_DATA_ERROR_FLAGS  (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
  48#define ATMCI_DMA_THRESHOLD     16
  49
  50enum {
  51        EVENT_CMD_RDY = 0,
  52        EVENT_XFER_COMPLETE,
  53        EVENT_NOTBUSY,
  54        EVENT_DATA_ERROR,
  55};
  56
  57enum atmel_mci_state {
  58        STATE_IDLE = 0,
  59        STATE_SENDING_CMD,
  60        STATE_DATA_XFER,
  61        STATE_WAITING_NOTBUSY,
  62        STATE_SENDING_STOP,
  63        STATE_END_REQUEST,
  64};
  65
  66enum atmci_xfer_dir {
  67        XFER_RECEIVE = 0,
  68        XFER_TRANSMIT,
  69};
  70
  71enum atmci_pdc_buf {
  72        PDC_FIRST_BUF = 0,
  73        PDC_SECOND_BUF,
  74};
  75
  76struct atmel_mci_caps {
  77        bool    has_dma_conf_reg;
  78        bool    has_pdc;
  79        bool    has_cfg_reg;
  80        bool    has_cstor_reg;
  81        bool    has_highspeed;
  82        bool    has_rwproof;
  83        bool    has_odd_clk_div;
  84        bool    has_bad_data_ordering;
  85        bool    need_reset_after_xfer;
  86        bool    need_blksz_mul_4;
  87        bool    need_notbusy_for_read_ops;
  88};
  89
  90struct atmel_mci_dma {
  91        struct dma_chan                 *chan;
  92        struct dma_async_tx_descriptor  *data_desc;
  93};
  94
  95/**
  96 * struct atmel_mci - MMC controller state shared between all slots
  97 * @lock: Spinlock protecting the queue and associated data.
  98 * @regs: Pointer to MMIO registers.
  99 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
 100 * @pio_offset: Offset into the current scatterlist entry.
 101 * @buffer: Buffer used if we don't have the r/w proof capability. We
 102 *      don't have the time to switch pdc buffers so we have to use only
 103 *      one buffer for the full transaction.
 104 * @buf_size: size of the buffer.
 105 * @phys_buf_addr: buffer address needed for pdc.
 106 * @cur_slot: The slot which is currently using the controller.
 107 * @mrq: The request currently being processed on @cur_slot,
 108 *      or NULL if the controller is idle.
 109 * @cmd: The command currently being sent to the card, or NULL.
 110 * @data: The data currently being transferred, or NULL if no data
 111 *      transfer is in progress.
 112 * @data_size: just data->blocks * data->blksz.
 113 * @dma: DMA client state.
 114 * @data_chan: DMA channel being used for the current data transfer.
 115 * @cmd_status: Snapshot of SR taken upon completion of the current
 116 *      command. Only valid when EVENT_CMD_COMPLETE is pending.
 117 * @data_status: Snapshot of SR taken upon completion of the current
 118 *      data transfer. Only valid when EVENT_DATA_COMPLETE or
 119 *      EVENT_DATA_ERROR is pending.
 120 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
 121 *      to be sent.
 122 * @tasklet: Tasklet running the request state machine.
 123 * @pending_events: Bitmask of events flagged by the interrupt handler
 124 *      to be processed by the tasklet.
 125 * @completed_events: Bitmask of events which the state machine has
 126 *      processed.
 127 * @state: Tasklet state.
 128 * @queue: List of slots waiting for access to the controller.
 129 * @need_clock_update: Update the clock rate before the next request.
 130 * @need_reset: Reset controller before next request.
 131 * @timer: Timer to balance the data timeout error flag which cannot rise.
 132 * @mode_reg: Value of the MR register.
 133 * @cfg_reg: Value of the CFG register.
 134 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
 135 *      rate and timeout calculations.
 136 * @mapbase: Physical address of the MMIO registers.
 137 * @mck: The peripheral bus clock hooked up to the MMC controller.
 138 * @pdev: Platform device associated with the MMC controller.
 139 * @slot: Slots sharing this MMC controller.
 140 * @caps: MCI capabilities depending on MCI version.
 141 * @prepare_data: function to setup MCI before data transfer which
 142 * depends on MCI capabilities.
 143 * @submit_data: function to start data transfer which depends on MCI
 144 * capabilities.
 145 * @stop_transfer: function to stop data transfer which depends on MCI
 146 * capabilities.
 147 *
 148 * Locking
 149 * =======
 150 *
 151 * @lock is a softirq-safe spinlock protecting @queue as well as
 152 * @cur_slot, @mrq and @state. These must always be updated
 153 * at the same time while holding @lock.
 154 *
 155 * @lock also protects mode_reg and need_clock_update since these are
 156 * used to synchronize mode register updates with the queue
 157 * processing.
 158 *
 159 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
 160 * and must always be written at the same time as the slot is added to
 161 * @queue.
 162 *
 163 * @pending_events and @completed_events are accessed using atomic bit
 164 * operations, so they don't need any locking.
 165 *
 166 * None of the fields touched by the interrupt handler need any
 167 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
 168 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
 169 * interrupts must be disabled and @data_status updated with a
 170 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
 171 * CMDRDY interrupt must be disabled and @cmd_status updated with a
 172 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
 173 * bytes_xfered field of @data must be written. This is ensured by
 174 * using barriers.
 175 */
 176struct atmel_mci {
 177        spinlock_t              lock;
 178        void __iomem            *regs;
 179
 180        struct scatterlist      *sg;
 181        unsigned int            sg_len;
 182        unsigned int            pio_offset;
 183        unsigned int            *buffer;
 184        unsigned int            buf_size;
 185        dma_addr_t              buf_phys_addr;
 186
 187        struct atmel_mci_slot   *cur_slot;
 188        struct mmc_request      *mrq;
 189        struct mmc_command      *cmd;
 190        struct mmc_data         *data;
 191        unsigned int            data_size;
 192
 193        struct atmel_mci_dma    dma;
 194        struct dma_chan         *data_chan;
 195        struct dma_slave_config dma_conf;
 196
 197        u32                     cmd_status;
 198        u32                     data_status;
 199        u32                     stop_cmdr;
 200
 201        struct tasklet_struct   tasklet;
 202        unsigned long           pending_events;
 203        unsigned long           completed_events;
 204        enum atmel_mci_state    state;
 205        struct list_head        queue;
 206
 207        bool                    need_clock_update;
 208        bool                    need_reset;
 209        struct timer_list       timer;
 210        u32                     mode_reg;
 211        u32                     cfg_reg;
 212        unsigned long           bus_hz;
 213        unsigned long           mapbase;
 214        struct clk              *mck;
 215        struct platform_device  *pdev;
 216
 217        struct atmel_mci_slot   *slot[ATMCI_MAX_NR_SLOTS];
 218
 219        struct atmel_mci_caps   caps;
 220
 221        u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
 222        void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
 223        void (*stop_transfer)(struct atmel_mci *host);
 224};
 225
 226/**
 227 * struct atmel_mci_slot - MMC slot state
 228 * @mmc: The mmc_host representing this slot.
 229 * @host: The MMC controller this slot is using.
 230 * @sdc_reg: Value of SDCR to be written before using this slot.
 231 * @sdio_irq: SDIO irq mask for this slot.
 232 * @mrq: mmc_request currently being processed or waiting to be
 233 *      processed, or NULL when the slot is idle.
 234 * @queue_node: List node for placing this node in the @queue list of
 235 *      &struct atmel_mci.
 236 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
 237 * @flags: Random state bits associated with the slot.
 238 * @detect_pin: GPIO pin used for card detection, or negative if not
 239 *      available.
 240 * @wp_pin: GPIO pin used for card write protect sending, or negative
 241 *      if not available.
 242 * @detect_is_active_high: The state of the detect pin when it is active.
 243 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
 244 */
 245struct atmel_mci_slot {
 246        struct mmc_host         *mmc;
 247        struct atmel_mci        *host;
 248
 249        u32                     sdc_reg;
 250        u32                     sdio_irq;
 251
 252        struct mmc_request      *mrq;
 253        struct list_head        queue_node;
 254
 255        unsigned int            clock;
 256        unsigned long           flags;
 257#define ATMCI_CARD_PRESENT      0
 258#define ATMCI_CARD_NEED_INIT    1
 259#define ATMCI_SHUTDOWN          2
 260
 261        int                     detect_pin;
 262        int                     wp_pin;
 263        bool                    detect_is_active_high;
 264
 265        struct timer_list       detect_timer;
 266};
 267
 268#define atmci_test_and_clear_pending(host, event)               \
 269        test_and_clear_bit(event, &host->pending_events)
 270#define atmci_set_completed(host, event)                        \
 271        set_bit(event, &host->completed_events)
 272#define atmci_set_pending(host, event)                          \
 273        set_bit(event, &host->pending_events)
 274
 275/*
 276 * The debugfs stuff below is mostly optimized away when
 277 * CONFIG_DEBUG_FS is not set.
 278 */
 279static int atmci_req_show(struct seq_file *s, void *v)
 280{
 281        struct atmel_mci_slot   *slot = s->private;
 282        struct mmc_request      *mrq;
 283        struct mmc_command      *cmd;
 284        struct mmc_command      *stop;
 285        struct mmc_data         *data;
 286
 287        /* Make sure we get a consistent snapshot */
 288        spin_lock_bh(&slot->host->lock);
 289        mrq = slot->mrq;
 290
 291        if (mrq) {
 292                cmd = mrq->cmd;
 293                data = mrq->data;
 294                stop = mrq->stop;
 295
 296                if (cmd)
 297                        seq_printf(s,
 298                                "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 299                                cmd->opcode, cmd->arg, cmd->flags,
 300                                cmd->resp[0], cmd->resp[1], cmd->resp[2],
 301                                cmd->resp[3], cmd->error);
 302                if (data)
 303                        seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
 304                                data->bytes_xfered, data->blocks,
 305                                data->blksz, data->flags, data->error);
 306                if (stop)
 307                        seq_printf(s,
 308                                "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
 309                                stop->opcode, stop->arg, stop->flags,
 310                                stop->resp[0], stop->resp[1], stop->resp[2],
 311                                stop->resp[3], stop->error);
 312        }
 313
 314        spin_unlock_bh(&slot->host->lock);
 315
 316        return 0;
 317}
 318
 319static int atmci_req_open(struct inode *inode, struct file *file)
 320{
 321        return single_open(file, atmci_req_show, inode->i_private);
 322}
 323
 324static const struct file_operations atmci_req_fops = {
 325        .owner          = THIS_MODULE,
 326        .open           = atmci_req_open,
 327        .read           = seq_read,
 328        .llseek         = seq_lseek,
 329        .release        = single_release,
 330};
 331
 332static void atmci_show_status_reg(struct seq_file *s,
 333                const char *regname, u32 value)
 334{
 335        static const char       *sr_bit[] = {
 336                [0]     = "CMDRDY",
 337                [1]     = "RXRDY",
 338                [2]     = "TXRDY",
 339                [3]     = "BLKE",
 340                [4]     = "DTIP",
 341                [5]     = "NOTBUSY",
 342                [6]     = "ENDRX",
 343                [7]     = "ENDTX",
 344                [8]     = "SDIOIRQA",
 345                [9]     = "SDIOIRQB",
 346                [12]    = "SDIOWAIT",
 347                [14]    = "RXBUFF",
 348                [15]    = "TXBUFE",
 349                [16]    = "RINDE",
 350                [17]    = "RDIRE",
 351                [18]    = "RCRCE",
 352                [19]    = "RENDE",
 353                [20]    = "RTOE",
 354                [21]    = "DCRCE",
 355                [22]    = "DTOE",
 356                [23]    = "CSTOE",
 357                [24]    = "BLKOVRE",
 358                [25]    = "DMADONE",
 359                [26]    = "FIFOEMPTY",
 360                [27]    = "XFRDONE",
 361                [30]    = "OVRE",
 362                [31]    = "UNRE",
 363        };
 364        unsigned int            i;
 365
 366        seq_printf(s, "%s:\t0x%08x", regname, value);
 367        for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
 368                if (value & (1 << i)) {
 369                        if (sr_bit[i])
 370                                seq_printf(s, " %s", sr_bit[i]);
 371                        else
 372                                seq_puts(s, " UNKNOWN");
 373                }
 374        }
 375        seq_putc(s, '\n');
 376}
 377
 378static int atmci_regs_show(struct seq_file *s, void *v)
 379{
 380        struct atmel_mci        *host = s->private;
 381        u32                     *buf;
 382        int                     ret = 0;
 383
 384
 385        buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
 386        if (!buf)
 387                return -ENOMEM;
 388
 389        /*
 390         * Grab a more or less consistent snapshot. Note that we're
 391         * not disabling interrupts, so IMR and SR may not be
 392         * consistent.
 393         */
 394        ret = clk_prepare_enable(host->mck);
 395        if (ret)
 396                goto out;
 397
 398        spin_lock_bh(&host->lock);
 399        memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
 400        spin_unlock_bh(&host->lock);
 401
 402        clk_disable_unprepare(host->mck);
 403
 404        seq_printf(s, "MR:\t0x%08x%s%s ",
 405                        buf[ATMCI_MR / 4],
 406                        buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
 407                        buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
 408        if (host->caps.has_odd_clk_div)
 409                seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
 410                                ((buf[ATMCI_MR / 4] & 0xff) << 1)
 411                                | ((buf[ATMCI_MR / 4] >> 16) & 1));
 412        else
 413                seq_printf(s, "CLKDIV=%u\n",
 414                                (buf[ATMCI_MR / 4] & 0xff));
 415        seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
 416        seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
 417        seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
 418        seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
 419                        buf[ATMCI_BLKR / 4],
 420                        buf[ATMCI_BLKR / 4] & 0xffff,
 421                        (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
 422        if (host->caps.has_cstor_reg)
 423                seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
 424
 425        /* Don't read RSPR and RDR; it will consume the data there */
 426
 427        atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
 428        atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
 429
 430        if (host->caps.has_dma_conf_reg) {
 431                u32 val;
 432
 433                val = buf[ATMCI_DMA / 4];
 434                seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
 435                                val, val & 3,
 436                                ((val >> 4) & 3) ?
 437                                        1 << (((val >> 4) & 3) + 1) : 1,
 438                                val & ATMCI_DMAEN ? " DMAEN" : "");
 439        }
 440        if (host->caps.has_cfg_reg) {
 441                u32 val;
 442
 443                val = buf[ATMCI_CFG / 4];
 444                seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
 445                                val,
 446                                val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
 447                                val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
 448                                val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
 449                                val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
 450        }
 451
 452out:
 453        kfree(buf);
 454
 455        return ret;
 456}
 457
 458static int atmci_regs_open(struct inode *inode, struct file *file)
 459{
 460        return single_open(file, atmci_regs_show, inode->i_private);
 461}
 462
 463static const struct file_operations atmci_regs_fops = {
 464        .owner          = THIS_MODULE,
 465        .open           = atmci_regs_open,
 466        .read           = seq_read,
 467        .llseek         = seq_lseek,
 468        .release        = single_release,
 469};
 470
 471static void atmci_init_debugfs(struct atmel_mci_slot *slot)
 472{
 473        struct mmc_host         *mmc = slot->mmc;
 474        struct atmel_mci        *host = slot->host;
 475        struct dentry           *root;
 476        struct dentry           *node;
 477
 478        root = mmc->debugfs_root;
 479        if (!root)
 480                return;
 481
 482        node = debugfs_create_file("regs", S_IRUSR, root, host,
 483                        &atmci_regs_fops);
 484        if (IS_ERR(node))
 485                return;
 486        if (!node)
 487                goto err;
 488
 489        node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
 490        if (!node)
 491                goto err;
 492
 493        node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
 494        if (!node)
 495                goto err;
 496
 497        node = debugfs_create_x32("pending_events", S_IRUSR, root,
 498                                     (u32 *)&host->pending_events);
 499        if (!node)
 500                goto err;
 501
 502        node = debugfs_create_x32("completed_events", S_IRUSR, root,
 503                                     (u32 *)&host->completed_events);
 504        if (!node)
 505                goto err;
 506
 507        return;
 508
 509err:
 510        dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 511}
 512
 513#if defined(CONFIG_OF)
 514static const struct of_device_id atmci_dt_ids[] = {
 515        { .compatible = "atmel,hsmci" },
 516        { /* sentinel */ }
 517};
 518
 519MODULE_DEVICE_TABLE(of, atmci_dt_ids);
 520
 521static struct mci_platform_data*
 522atmci_of_init(struct platform_device *pdev)
 523{
 524        struct device_node *np = pdev->dev.of_node;
 525        struct device_node *cnp;
 526        struct mci_platform_data *pdata;
 527        u32 slot_id;
 528
 529        if (!np) {
 530                dev_err(&pdev->dev, "device node not found\n");
 531                return ERR_PTR(-EINVAL);
 532        }
 533
 534        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 535        if (!pdata) {
 536                dev_err(&pdev->dev, "could not allocate memory for pdata\n");
 537                return ERR_PTR(-ENOMEM);
 538        }
 539
 540        for_each_child_of_node(np, cnp) {
 541                if (of_property_read_u32(cnp, "reg", &slot_id)) {
 542                        dev_warn(&pdev->dev, "reg property is missing for %s\n",
 543                                 cnp->full_name);
 544                        continue;
 545                }
 546
 547                if (slot_id >= ATMCI_MAX_NR_SLOTS) {
 548                        dev_warn(&pdev->dev, "can't have more than %d slots\n",
 549                                 ATMCI_MAX_NR_SLOTS);
 550                        break;
 551                }
 552
 553                if (of_property_read_u32(cnp, "bus-width",
 554                                         &pdata->slot[slot_id].bus_width))
 555                        pdata->slot[slot_id].bus_width = 1;
 556
 557                pdata->slot[slot_id].detect_pin =
 558                        of_get_named_gpio(cnp, "cd-gpios", 0);
 559
 560                pdata->slot[slot_id].detect_is_active_high =
 561                        of_property_read_bool(cnp, "cd-inverted");
 562
 563                pdata->slot[slot_id].wp_pin =
 564                        of_get_named_gpio(cnp, "wp-gpios", 0);
 565        }
 566
 567        return pdata;
 568}
 569#else /* CONFIG_OF */
 570static inline struct mci_platform_data*
 571atmci_of_init(struct platform_device *dev)
 572{
 573        return ERR_PTR(-EINVAL);
 574}
 575#endif
 576
 577static inline unsigned int atmci_get_version(struct atmel_mci *host)
 578{
 579        return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
 580}
 581
 582static void atmci_timeout_timer(unsigned long data)
 583{
 584        struct atmel_mci *host;
 585
 586        host = (struct atmel_mci *)data;
 587
 588        dev_dbg(&host->pdev->dev, "software timeout\n");
 589
 590        if (host->mrq->cmd->data) {
 591                host->mrq->cmd->data->error = -ETIMEDOUT;
 592                host->data = NULL;
 593                /*
 594                 * With some SDIO modules, sometimes DMA transfer hangs. If
 595                 * stop_transfer() is not called then the DMA request is not
 596                 * removed, following ones are queued and never computed.
 597                 */
 598                if (host->state == STATE_DATA_XFER)
 599                        host->stop_transfer(host);
 600        } else {
 601                host->mrq->cmd->error = -ETIMEDOUT;
 602                host->cmd = NULL;
 603        }
 604        host->need_reset = 1;
 605        host->state = STATE_END_REQUEST;
 606        smp_wmb();
 607        tasklet_schedule(&host->tasklet);
 608}
 609
 610static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
 611                                        unsigned int ns)
 612{
 613        /*
 614         * It is easier here to use us instead of ns for the timeout,
 615         * it prevents from overflows during calculation.
 616         */
 617        unsigned int us = DIV_ROUND_UP(ns, 1000);
 618
 619        /* Maximum clock frequency is host->bus_hz/2 */
 620        return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
 621}
 622
 623static void atmci_set_timeout(struct atmel_mci *host,
 624                struct atmel_mci_slot *slot, struct mmc_data *data)
 625{
 626        static unsigned dtomul_to_shift[] = {
 627                0, 4, 7, 8, 10, 12, 16, 20
 628        };
 629        unsigned        timeout;
 630        unsigned        dtocyc;
 631        unsigned        dtomul;
 632
 633        timeout = atmci_ns_to_clocks(host, data->timeout_ns)
 634                + data->timeout_clks;
 635
 636        for (dtomul = 0; dtomul < 8; dtomul++) {
 637                unsigned shift = dtomul_to_shift[dtomul];
 638                dtocyc = (timeout + (1 << shift) - 1) >> shift;
 639                if (dtocyc < 15)
 640                        break;
 641        }
 642
 643        if (dtomul >= 8) {
 644                dtomul = 7;
 645                dtocyc = 15;
 646        }
 647
 648        dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
 649                        dtocyc << dtomul_to_shift[dtomul]);
 650        atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
 651}
 652
 653/*
 654 * Return mask with command flags to be enabled for this command.
 655 */
 656static u32 atmci_prepare_command(struct mmc_host *mmc,
 657                                 struct mmc_command *cmd)
 658{
 659        struct mmc_data *data;
 660        u32             cmdr;
 661
 662        cmd->error = -EINPROGRESS;
 663
 664        cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
 665
 666        if (cmd->flags & MMC_RSP_PRESENT) {
 667                if (cmd->flags & MMC_RSP_136)
 668                        cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
 669                else
 670                        cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
 671        }
 672
 673        /*
 674         * This should really be MAXLAT_5 for CMD2 and ACMD41, but
 675         * it's too difficult to determine whether this is an ACMD or
 676         * not. Better make it 64.
 677         */
 678        cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
 679
 680        if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
 681                cmdr |= ATMCI_CMDR_OPDCMD;
 682
 683        data = cmd->data;
 684        if (data) {
 685                cmdr |= ATMCI_CMDR_START_XFER;
 686
 687                if (cmd->opcode == SD_IO_RW_EXTENDED) {
 688                        cmdr |= ATMCI_CMDR_SDIO_BLOCK;
 689                } else {
 690                        if (data->flags & MMC_DATA_STREAM)
 691                                cmdr |= ATMCI_CMDR_STREAM;
 692                        else if (data->blocks > 1)
 693                                cmdr |= ATMCI_CMDR_MULTI_BLOCK;
 694                        else
 695                                cmdr |= ATMCI_CMDR_BLOCK;
 696                }
 697
 698                if (data->flags & MMC_DATA_READ)
 699                        cmdr |= ATMCI_CMDR_TRDIR_READ;
 700        }
 701
 702        return cmdr;
 703}
 704
 705static void atmci_send_command(struct atmel_mci *host,
 706                struct mmc_command *cmd, u32 cmd_flags)
 707{
 708        WARN_ON(host->cmd);
 709        host->cmd = cmd;
 710
 711        dev_vdbg(&host->pdev->dev,
 712                        "start command: ARGR=0x%08x CMDR=0x%08x\n",
 713                        cmd->arg, cmd_flags);
 714
 715        atmci_writel(host, ATMCI_ARGR, cmd->arg);
 716        atmci_writel(host, ATMCI_CMDR, cmd_flags);
 717}
 718
 719static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
 720{
 721        dev_dbg(&host->pdev->dev, "send stop command\n");
 722        atmci_send_command(host, data->stop, host->stop_cmdr);
 723        atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
 724}
 725
 726/*
 727 * Configure given PDC buffer taking care of alignement issues.
 728 * Update host->data_size and host->sg.
 729 */
 730static void atmci_pdc_set_single_buf(struct atmel_mci *host,
 731        enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
 732{
 733        u32 pointer_reg, counter_reg;
 734        unsigned int buf_size;
 735
 736        if (dir == XFER_RECEIVE) {
 737                pointer_reg = ATMEL_PDC_RPR;
 738                counter_reg = ATMEL_PDC_RCR;
 739        } else {
 740                pointer_reg = ATMEL_PDC_TPR;
 741                counter_reg = ATMEL_PDC_TCR;
 742        }
 743
 744        if (buf_nb == PDC_SECOND_BUF) {
 745                pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
 746                counter_reg += ATMEL_PDC_SCND_BUF_OFF;
 747        }
 748
 749        if (!host->caps.has_rwproof) {
 750                buf_size = host->buf_size;
 751                atmci_writel(host, pointer_reg, host->buf_phys_addr);
 752        } else {
 753                buf_size = sg_dma_len(host->sg);
 754                atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
 755        }
 756
 757        if (host->data_size <= buf_size) {
 758                if (host->data_size & 0x3) {
 759                        /* If size is different from modulo 4, transfer bytes */
 760                        atmci_writel(host, counter_reg, host->data_size);
 761                        atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
 762                } else {
 763                        /* Else transfer 32-bits words */
 764                        atmci_writel(host, counter_reg, host->data_size / 4);
 765                }
 766                host->data_size = 0;
 767        } else {
 768                /* We assume the size of a page is 32-bits aligned */
 769                atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
 770                host->data_size -= sg_dma_len(host->sg);
 771                if (host->data_size)
 772                        host->sg = sg_next(host->sg);
 773        }
 774}
 775
 776/*
 777 * Configure PDC buffer according to the data size ie configuring one or two
 778 * buffers. Don't use this function if you want to configure only the second
 779 * buffer. In this case, use atmci_pdc_set_single_buf.
 780 */
 781static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
 782{
 783        atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
 784        if (host->data_size)
 785                atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
 786}
 787
 788/*
 789 * Unmap sg lists, called when transfer is finished.
 790 */
 791static void atmci_pdc_cleanup(struct atmel_mci *host)
 792{
 793        struct mmc_data         *data = host->data;
 794
 795        if (data)
 796                dma_unmap_sg(&host->pdev->dev,
 797                                data->sg, data->sg_len,
 798                                ((data->flags & MMC_DATA_WRITE)
 799                                 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
 800}
 801
 802/*
 803 * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
 804 * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
 805 * interrupt needed for both transfer directions.
 806 */
 807static void atmci_pdc_complete(struct atmel_mci *host)
 808{
 809        int transfer_size = host->data->blocks * host->data->blksz;
 810        int i;
 811
 812        atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
 813
 814        if ((!host->caps.has_rwproof)
 815            && (host->data->flags & MMC_DATA_READ)) {
 816                if (host->caps.has_bad_data_ordering)
 817                        for (i = 0; i < transfer_size; i++)
 818                                host->buffer[i] = swab32(host->buffer[i]);
 819                sg_copy_from_buffer(host->data->sg, host->data->sg_len,
 820                                    host->buffer, transfer_size);
 821        }
 822
 823        atmci_pdc_cleanup(host);
 824
 825        dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
 826        atmci_set_pending(host, EVENT_XFER_COMPLETE);
 827        tasklet_schedule(&host->tasklet);
 828}
 829
 830static void atmci_dma_cleanup(struct atmel_mci *host)
 831{
 832        struct mmc_data                 *data = host->data;
 833
 834        if (data)
 835                dma_unmap_sg(host->dma.chan->device->dev,
 836                                data->sg, data->sg_len,
 837                                ((data->flags & MMC_DATA_WRITE)
 838                                 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
 839}
 840
 841/*
 842 * This function is called by the DMA driver from tasklet context.
 843 */
 844static void atmci_dma_complete(void *arg)
 845{
 846        struct atmel_mci        *host = arg;
 847        struct mmc_data         *data = host->data;
 848
 849        dev_vdbg(&host->pdev->dev, "DMA complete\n");
 850
 851        if (host->caps.has_dma_conf_reg)
 852                /* Disable DMA hardware handshaking on MCI */
 853                atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
 854
 855        atmci_dma_cleanup(host);
 856
 857        /*
 858         * If the card was removed, data will be NULL. No point trying
 859         * to send the stop command or waiting for NBUSY in this case.
 860         */
 861        if (data) {
 862                dev_dbg(&host->pdev->dev,
 863                        "(%s) set pending xfer complete\n", __func__);
 864                atmci_set_pending(host, EVENT_XFER_COMPLETE);
 865                tasklet_schedule(&host->tasklet);
 866
 867                /*
 868                 * Regardless of what the documentation says, we have
 869                 * to wait for NOTBUSY even after block read
 870                 * operations.
 871                 *
 872                 * When the DMA transfer is complete, the controller
 873                 * may still be reading the CRC from the card, i.e.
 874                 * the data transfer is still in progress and we
 875                 * haven't seen all the potential error bits yet.
 876                 *
 877                 * The interrupt handler will schedule a different
 878                 * tasklet to finish things up when the data transfer
 879                 * is completely done.
 880                 *
 881                 * We may not complete the mmc request here anyway
 882                 * because the mmc layer may call back and cause us to
 883                 * violate the "don't submit new operations from the
 884                 * completion callback" rule of the dma engine
 885                 * framework.
 886                 */
 887                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
 888        }
 889}
 890
 891/*
 892 * Returns a mask of interrupt flags to be enabled after the whole
 893 * request has been prepared.
 894 */
 895static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
 896{
 897        u32 iflags;
 898
 899        data->error = -EINPROGRESS;
 900
 901        host->sg = data->sg;
 902        host->sg_len = data->sg_len;
 903        host->data = data;
 904        host->data_chan = NULL;
 905
 906        iflags = ATMCI_DATA_ERROR_FLAGS;
 907
 908        /*
 909         * Errata: MMC data write operation with less than 12
 910         * bytes is impossible.
 911         *
 912         * Errata: MCI Transmit Data Register (TDR) FIFO
 913         * corruption when length is not multiple of 4.
 914         */
 915        if (data->blocks * data->blksz < 12
 916                        || (data->blocks * data->blksz) & 3)
 917                host->need_reset = true;
 918
 919        host->pio_offset = 0;
 920        if (data->flags & MMC_DATA_READ)
 921                iflags |= ATMCI_RXRDY;
 922        else
 923                iflags |= ATMCI_TXRDY;
 924
 925        return iflags;
 926}
 927
 928/*
 929 * Set interrupt flags and set block length into the MCI mode register even
 930 * if this value is also accessible in the MCI block register. It seems to be
 931 * necessary before the High Speed MCI version. It also map sg and configure
 932 * PDC registers.
 933 */
 934static u32
 935atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
 936{
 937        u32 iflags, tmp;
 938        unsigned int sg_len;
 939        enum dma_data_direction dir;
 940        int i;
 941
 942        data->error = -EINPROGRESS;
 943
 944        host->data = data;
 945        host->sg = data->sg;
 946        iflags = ATMCI_DATA_ERROR_FLAGS;
 947
 948        /* Enable pdc mode */
 949        atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
 950
 951        if (data->flags & MMC_DATA_READ) {
 952                dir = DMA_FROM_DEVICE;
 953                iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
 954        } else {
 955                dir = DMA_TO_DEVICE;
 956                iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
 957        }
 958
 959        /* Set BLKLEN */
 960        tmp = atmci_readl(host, ATMCI_MR);
 961        tmp &= 0x0000ffff;
 962        tmp |= ATMCI_BLKLEN(data->blksz);
 963        atmci_writel(host, ATMCI_MR, tmp);
 964
 965        /* Configure PDC */
 966        host->data_size = data->blocks * data->blksz;
 967        sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
 968
 969        if ((!host->caps.has_rwproof)
 970            && (host->data->flags & MMC_DATA_WRITE)) {
 971                sg_copy_to_buffer(host->data->sg, host->data->sg_len,
 972                                  host->buffer, host->data_size);
 973                if (host->caps.has_bad_data_ordering)
 974                        for (i = 0; i < host->data_size; i++)
 975                                host->buffer[i] = swab32(host->buffer[i]);
 976        }
 977
 978        if (host->data_size)
 979                atmci_pdc_set_both_buf(host,
 980                        ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
 981
 982        return iflags;
 983}
 984
 985static u32
 986atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
 987{
 988        struct dma_chan                 *chan;
 989        struct dma_async_tx_descriptor  *desc;
 990        struct scatterlist              *sg;
 991        unsigned int                    i;
 992        enum dma_data_direction         direction;
 993        enum dma_transfer_direction     slave_dirn;
 994        unsigned int                    sglen;
 995        u32                             maxburst;
 996        u32 iflags;
 997
 998        data->error = -EINPROGRESS;
 999
1000        WARN_ON(host->data);
1001        host->sg = NULL;
1002        host->data = data;
1003
1004        iflags = ATMCI_DATA_ERROR_FLAGS;
1005
1006        /*
1007         * We don't do DMA on "complex" transfers, i.e. with
1008         * non-word-aligned buffers or lengths. Also, we don't bother
1009         * with all the DMA setup overhead for short transfers.
1010         */
1011        if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1012                return atmci_prepare_data(host, data);
1013        if (data->blksz & 3)
1014                return atmci_prepare_data(host, data);
1015
1016        for_each_sg(data->sg, sg, data->sg_len, i) {
1017                if (sg->offset & 3 || sg->length & 3)
1018                        return atmci_prepare_data(host, data);
1019        }
1020
1021        /* If we don't have a channel, we can't do DMA */
1022        chan = host->dma.chan;
1023        if (chan)
1024                host->data_chan = chan;
1025
1026        if (!chan)
1027                return -ENODEV;
1028
1029        if (data->flags & MMC_DATA_READ) {
1030                direction = DMA_FROM_DEVICE;
1031                host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1032                maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
1033        } else {
1034                direction = DMA_TO_DEVICE;
1035                host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1036                maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
1037        }
1038
1039        if (host->caps.has_dma_conf_reg)
1040                atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1041                        ATMCI_DMAEN);
1042
1043        sglen = dma_map_sg(chan->device->dev, data->sg,
1044                        data->sg_len, direction);
1045
1046        dmaengine_slave_config(chan, &host->dma_conf);
1047        desc = dmaengine_prep_slave_sg(chan,
1048                        data->sg, sglen, slave_dirn,
1049                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1050        if (!desc)
1051                goto unmap_exit;
1052
1053        host->dma.data_desc = desc;
1054        desc->callback = atmci_dma_complete;
1055        desc->callback_param = host;
1056
1057        return iflags;
1058unmap_exit:
1059        dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
1060        return -ENOMEM;
1061}
1062
1063static void
1064atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1065{
1066        return;
1067}
1068
1069/*
1070 * Start PDC according to transfer direction.
1071 */
1072static void
1073atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1074{
1075        if (data->flags & MMC_DATA_READ)
1076                atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1077        else
1078                atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1079}
1080
1081static void
1082atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1083{
1084        struct dma_chan                 *chan = host->data_chan;
1085        struct dma_async_tx_descriptor  *desc = host->dma.data_desc;
1086
1087        if (chan) {
1088                dmaengine_submit(desc);
1089                dma_async_issue_pending(chan);
1090        }
1091}
1092
1093static void atmci_stop_transfer(struct atmel_mci *host)
1094{
1095        dev_dbg(&host->pdev->dev,
1096                "(%s) set pending xfer complete\n", __func__);
1097        atmci_set_pending(host, EVENT_XFER_COMPLETE);
1098        atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1099}
1100
1101/*
1102 * Stop data transfer because error(s) occurred.
1103 */
1104static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1105{
1106        atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1107}
1108
1109static void atmci_stop_transfer_dma(struct atmel_mci *host)
1110{
1111        struct dma_chan *chan = host->data_chan;
1112
1113        if (chan) {
1114                dmaengine_terminate_all(chan);
1115                atmci_dma_cleanup(host);
1116        } else {
1117                /* Data transfer was stopped by the interrupt handler */
1118                dev_dbg(&host->pdev->dev,
1119                        "(%s) set pending xfer complete\n", __func__);
1120                atmci_set_pending(host, EVENT_XFER_COMPLETE);
1121                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1122        }
1123}
1124
1125/*
1126 * Start a request: prepare data if needed, prepare the command and activate
1127 * interrupts.
1128 */
1129static void atmci_start_request(struct atmel_mci *host,
1130                struct atmel_mci_slot *slot)
1131{
1132        struct mmc_request      *mrq;
1133        struct mmc_command      *cmd;
1134        struct mmc_data         *data;
1135        u32                     iflags;
1136        u32                     cmdflags;
1137
1138        mrq = slot->mrq;
1139        host->cur_slot = slot;
1140        host->mrq = mrq;
1141
1142        host->pending_events = 0;
1143        host->completed_events = 0;
1144        host->cmd_status = 0;
1145        host->data_status = 0;
1146
1147        dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1148
1149        if (host->need_reset || host->caps.need_reset_after_xfer) {
1150                iflags = atmci_readl(host, ATMCI_IMR);
1151                iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1152                atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1153                atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1154                atmci_writel(host, ATMCI_MR, host->mode_reg);
1155                if (host->caps.has_cfg_reg)
1156                        atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1157                atmci_writel(host, ATMCI_IER, iflags);
1158                host->need_reset = false;
1159        }
1160        atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1161
1162        iflags = atmci_readl(host, ATMCI_IMR);
1163        if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1164                dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1165                                iflags);
1166
1167        if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1168                /* Send init sequence (74 clock cycles) */
1169                atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1170                while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1171                        cpu_relax();
1172        }
1173        iflags = 0;
1174        data = mrq->data;
1175        if (data) {
1176                atmci_set_timeout(host, slot, data);
1177
1178                /* Must set block count/size before sending command */
1179                atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1180                                | ATMCI_BLKLEN(data->blksz));
1181                dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1182                        ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1183
1184                iflags |= host->prepare_data(host, data);
1185        }
1186
1187        iflags |= ATMCI_CMDRDY;
1188        cmd = mrq->cmd;
1189        cmdflags = atmci_prepare_command(slot->mmc, cmd);
1190
1191        /*
1192         * DMA transfer should be started before sending the command to avoid
1193         * unexpected errors especially for read operations in SDIO mode.
1194         * Unfortunately, in PDC mode, command has to be sent before starting
1195         * the transfer.
1196         */
1197        if (host->submit_data != &atmci_submit_data_dma)
1198                atmci_send_command(host, cmd, cmdflags);
1199
1200        if (data)
1201                host->submit_data(host, data);
1202
1203        if (host->submit_data == &atmci_submit_data_dma)
1204                atmci_send_command(host, cmd, cmdflags);
1205
1206        if (mrq->stop) {
1207                host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1208                host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1209                if (!(data->flags & MMC_DATA_WRITE))
1210                        host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1211                if (data->flags & MMC_DATA_STREAM)
1212                        host->stop_cmdr |= ATMCI_CMDR_STREAM;
1213                else
1214                        host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1215        }
1216
1217        /*
1218         * We could have enabled interrupts earlier, but I suspect
1219         * that would open up a nice can of interesting race
1220         * conditions (e.g. command and data complete, but stop not
1221         * prepared yet.)
1222         */
1223        atmci_writel(host, ATMCI_IER, iflags);
1224
1225        mod_timer(&host->timer, jiffies +  msecs_to_jiffies(2000));
1226}
1227
1228static void atmci_queue_request(struct atmel_mci *host,
1229                struct atmel_mci_slot *slot, struct mmc_request *mrq)
1230{
1231        dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1232                        host->state);
1233
1234        spin_lock_bh(&host->lock);
1235        slot->mrq = mrq;
1236        if (host->state == STATE_IDLE) {
1237                host->state = STATE_SENDING_CMD;
1238                atmci_start_request(host, slot);
1239        } else {
1240                dev_dbg(&host->pdev->dev, "queue request\n");
1241                list_add_tail(&slot->queue_node, &host->queue);
1242        }
1243        spin_unlock_bh(&host->lock);
1244}
1245
1246static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1247{
1248        struct atmel_mci_slot   *slot = mmc_priv(mmc);
1249        struct atmel_mci        *host = slot->host;
1250        struct mmc_data         *data;
1251
1252        WARN_ON(slot->mrq);
1253        dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1254
1255        /*
1256         * We may "know" the card is gone even though there's still an
1257         * electrical connection. If so, we really need to communicate
1258         * this to the MMC core since there won't be any more
1259         * interrupts as the card is completely removed. Otherwise,
1260         * the MMC core might believe the card is still there even
1261         * though the card was just removed very slowly.
1262         */
1263        if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1264                mrq->cmd->error = -ENOMEDIUM;
1265                mmc_request_done(mmc, mrq);
1266                return;
1267        }
1268
1269        /* We don't support multiple blocks of weird lengths. */
1270        data = mrq->data;
1271        if (data && data->blocks > 1 && data->blksz & 3) {
1272                mrq->cmd->error = -EINVAL;
1273                mmc_request_done(mmc, mrq);
1274        }
1275
1276        atmci_queue_request(host, slot, mrq);
1277}
1278
1279static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1280{
1281        struct atmel_mci_slot   *slot = mmc_priv(mmc);
1282        struct atmel_mci        *host = slot->host;
1283        unsigned int            i;
1284        bool                    unprepare_clk;
1285
1286        slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1287        switch (ios->bus_width) {
1288        case MMC_BUS_WIDTH_1:
1289                slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1290                break;
1291        case MMC_BUS_WIDTH_4:
1292                slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1293                break;
1294        }
1295
1296        if (ios->clock) {
1297                unsigned int clock_min = ~0U;
1298                u32 clkdiv;
1299
1300                clk_prepare(host->mck);
1301                unprepare_clk = true;
1302
1303                spin_lock_bh(&host->lock);
1304                if (!host->mode_reg) {
1305                        clk_enable(host->mck);
1306                        unprepare_clk = false;
1307                        atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1308                        atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1309                        if (host->caps.has_cfg_reg)
1310                                atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1311                }
1312
1313                /*
1314                 * Use mirror of ios->clock to prevent race with mmc
1315                 * core ios update when finding the minimum.
1316                 */
1317                slot->clock = ios->clock;
1318                for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1319                        if (host->slot[i] && host->slot[i]->clock
1320                                        && host->slot[i]->clock < clock_min)
1321                                clock_min = host->slot[i]->clock;
1322                }
1323
1324                /* Calculate clock divider */
1325                if (host->caps.has_odd_clk_div) {
1326                        clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1327                        if (clkdiv > 511) {
1328                                dev_warn(&mmc->class_dev,
1329                                         "clock %u too slow; using %lu\n",
1330                                         clock_min, host->bus_hz / (511 + 2));
1331                                clkdiv = 511;
1332                        }
1333                        host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1334                                         | ATMCI_MR_CLKODD(clkdiv & 1);
1335                } else {
1336                        clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1337                        if (clkdiv > 255) {
1338                                dev_warn(&mmc->class_dev,
1339                                         "clock %u too slow; using %lu\n",
1340                                         clock_min, host->bus_hz / (2 * 256));
1341                                clkdiv = 255;
1342                        }
1343                        host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1344                }
1345
1346                /*
1347                 * WRPROOF and RDPROOF prevent overruns/underruns by
1348                 * stopping the clock when the FIFO is full/empty.
1349                 * This state is not expected to last for long.
1350                 */
1351                if (host->caps.has_rwproof)
1352                        host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1353
1354                if (host->caps.has_cfg_reg) {
1355                        /* setup High Speed mode in relation with card capacity */
1356                        if (ios->timing == MMC_TIMING_SD_HS)
1357                                host->cfg_reg |= ATMCI_CFG_HSMODE;
1358                        else
1359                                host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1360                }
1361
1362                if (list_empty(&host->queue)) {
1363                        atmci_writel(host, ATMCI_MR, host->mode_reg);
1364                        if (host->caps.has_cfg_reg)
1365                                atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1366                } else {
1367                        host->need_clock_update = true;
1368                }
1369
1370                spin_unlock_bh(&host->lock);
1371        } else {
1372                bool any_slot_active = false;
1373
1374                unprepare_clk = false;
1375
1376                spin_lock_bh(&host->lock);
1377                slot->clock = 0;
1378                for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1379                        if (host->slot[i] && host->slot[i]->clock) {
1380                                any_slot_active = true;
1381                                break;
1382                        }
1383                }
1384                if (!any_slot_active) {
1385                        atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1386                        if (host->mode_reg) {
1387                                atmci_readl(host, ATMCI_MR);
1388                                clk_disable(host->mck);
1389                                unprepare_clk = true;
1390                        }
1391                        host->mode_reg = 0;
1392                }
1393                spin_unlock_bh(&host->lock);
1394        }
1395
1396        if (unprepare_clk)
1397                clk_unprepare(host->mck);
1398
1399        switch (ios->power_mode) {
1400        case MMC_POWER_OFF:
1401                if (!IS_ERR(mmc->supply.vmmc))
1402                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1403                break;
1404        case MMC_POWER_UP:
1405                set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1406                if (!IS_ERR(mmc->supply.vmmc))
1407                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1408                break;
1409        default:
1410                /*
1411                 * TODO: None of the currently available AVR32-based
1412                 * boards allow MMC power to be turned off. Implement
1413                 * power control when this can be tested properly.
1414                 *
1415                 * We also need to hook this into the clock management
1416                 * somehow so that newly inserted cards aren't
1417                 * subjected to a fast clock before we have a chance
1418                 * to figure out what the maximum rate is. Currently,
1419                 * there's no way to avoid this, and there never will
1420                 * be for boards that don't support power control.
1421                 */
1422                break;
1423        }
1424}
1425
1426static int atmci_get_ro(struct mmc_host *mmc)
1427{
1428        int                     read_only = -ENOSYS;
1429        struct atmel_mci_slot   *slot = mmc_priv(mmc);
1430
1431        if (gpio_is_valid(slot->wp_pin)) {
1432                read_only = gpio_get_value(slot->wp_pin);
1433                dev_dbg(&mmc->class_dev, "card is %s\n",
1434                                read_only ? "read-only" : "read-write");
1435        }
1436
1437        return read_only;
1438}
1439
1440static int atmci_get_cd(struct mmc_host *mmc)
1441{
1442        int                     present = -ENOSYS;
1443        struct atmel_mci_slot   *slot = mmc_priv(mmc);
1444
1445        if (gpio_is_valid(slot->detect_pin)) {
1446                present = !(gpio_get_value(slot->detect_pin) ^
1447                            slot->detect_is_active_high);
1448                dev_dbg(&mmc->class_dev, "card is %spresent\n",
1449                                present ? "" : "not ");
1450        }
1451
1452        return present;
1453}
1454
1455static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1456{
1457        struct atmel_mci_slot   *slot = mmc_priv(mmc);
1458        struct atmel_mci        *host = slot->host;
1459
1460        if (enable)
1461                atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1462        else
1463                atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1464}
1465
1466static const struct mmc_host_ops atmci_ops = {
1467        .request        = atmci_request,
1468        .set_ios        = atmci_set_ios,
1469        .get_ro         = atmci_get_ro,
1470        .get_cd         = atmci_get_cd,
1471        .enable_sdio_irq = atmci_enable_sdio_irq,
1472};
1473
1474/* Called with host->lock held */
1475static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1476        __releases(&host->lock)
1477        __acquires(&host->lock)
1478{
1479        struct atmel_mci_slot   *slot = NULL;
1480        struct mmc_host         *prev_mmc = host->cur_slot->mmc;
1481
1482        WARN_ON(host->cmd || host->data);
1483
1484        /*
1485         * Update the MMC clock rate if necessary. This may be
1486         * necessary if set_ios() is called when a different slot is
1487         * busy transferring data.
1488         */
1489        if (host->need_clock_update) {
1490                atmci_writel(host, ATMCI_MR, host->mode_reg);
1491                if (host->caps.has_cfg_reg)
1492                        atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1493        }
1494
1495        host->cur_slot->mrq = NULL;
1496        host->mrq = NULL;
1497        if (!list_empty(&host->queue)) {
1498                slot = list_entry(host->queue.next,
1499                                struct atmel_mci_slot, queue_node);
1500                list_del(&slot->queue_node);
1501                dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1502                                mmc_hostname(slot->mmc));
1503                host->state = STATE_SENDING_CMD;
1504                atmci_start_request(host, slot);
1505        } else {
1506                dev_vdbg(&host->pdev->dev, "list empty\n");
1507                host->state = STATE_IDLE;
1508        }
1509
1510        del_timer(&host->timer);
1511
1512        spin_unlock(&host->lock);
1513        mmc_request_done(prev_mmc, mrq);
1514        spin_lock(&host->lock);
1515}
1516
1517static void atmci_command_complete(struct atmel_mci *host,
1518                        struct mmc_command *cmd)
1519{
1520        u32             status = host->cmd_status;
1521
1522        /* Read the response from the card (up to 16 bytes) */
1523        cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1524        cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1525        cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1526        cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1527
1528        if (status & ATMCI_RTOE)
1529                cmd->error = -ETIMEDOUT;
1530        else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1531                cmd->error = -EILSEQ;
1532        else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1533                cmd->error = -EIO;
1534        else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1535                if (host->caps.need_blksz_mul_4) {
1536                        cmd->error = -EINVAL;
1537                        host->need_reset = 1;
1538                }
1539        } else
1540                cmd->error = 0;
1541}
1542
1543static void atmci_detect_change(unsigned long data)
1544{
1545        struct atmel_mci_slot   *slot = (struct atmel_mci_slot *)data;
1546        bool                    present;
1547        bool                    present_old;
1548
1549        /*
1550         * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1551         * freeing the interrupt. We must not re-enable the interrupt
1552         * if it has been freed, and if we're shutting down, it
1553         * doesn't really matter whether the card is present or not.
1554         */
1555        smp_rmb();
1556        if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1557                return;
1558
1559        enable_irq(gpio_to_irq(slot->detect_pin));
1560        present = !(gpio_get_value(slot->detect_pin) ^
1561                    slot->detect_is_active_high);
1562        present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1563
1564        dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1565                        present, present_old);
1566
1567        if (present != present_old) {
1568                struct atmel_mci        *host = slot->host;
1569                struct mmc_request      *mrq;
1570
1571                dev_dbg(&slot->mmc->class_dev, "card %s\n",
1572                        present ? "inserted" : "removed");
1573
1574                spin_lock(&host->lock);
1575
1576                if (!present)
1577                        clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1578                else
1579                        set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1580
1581                /* Clean up queue if present */
1582                mrq = slot->mrq;
1583                if (mrq) {
1584                        if (mrq == host->mrq) {
1585                                /*
1586                                 * Reset controller to terminate any ongoing
1587                                 * commands or data transfers.
1588                                 */
1589                                atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1590                                atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1591                                atmci_writel(host, ATMCI_MR, host->mode_reg);
1592                                if (host->caps.has_cfg_reg)
1593                                        atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1594
1595                                host->data = NULL;
1596                                host->cmd = NULL;
1597
1598                                switch (host->state) {
1599                                case STATE_IDLE:
1600                                        break;
1601                                case STATE_SENDING_CMD:
1602                                        mrq->cmd->error = -ENOMEDIUM;
1603                                        if (mrq->data)
1604                                                host->stop_transfer(host);
1605                                        break;
1606                                case STATE_DATA_XFER:
1607                                        mrq->data->error = -ENOMEDIUM;
1608                                        host->stop_transfer(host);
1609                                        break;
1610                                case STATE_WAITING_NOTBUSY:
1611                                        mrq->data->error = -ENOMEDIUM;
1612                                        break;
1613                                case STATE_SENDING_STOP:
1614                                        mrq->stop->error = -ENOMEDIUM;
1615                                        break;
1616                                case STATE_END_REQUEST:
1617                                        break;
1618                                }
1619
1620                                atmci_request_end(host, mrq);
1621                        } else {
1622                                list_del(&slot->queue_node);
1623                                mrq->cmd->error = -ENOMEDIUM;
1624                                if (mrq->data)
1625                                        mrq->data->error = -ENOMEDIUM;
1626                                if (mrq->stop)
1627                                        mrq->stop->error = -ENOMEDIUM;
1628
1629                                spin_unlock(&host->lock);
1630                                mmc_request_done(slot->mmc, mrq);
1631                                spin_lock(&host->lock);
1632                        }
1633                }
1634                spin_unlock(&host->lock);
1635
1636                mmc_detect_change(slot->mmc, 0);
1637        }
1638}
1639
1640static void atmci_tasklet_func(unsigned long priv)
1641{
1642        struct atmel_mci        *host = (struct atmel_mci *)priv;
1643        struct mmc_request      *mrq = host->mrq;
1644        struct mmc_data         *data = host->data;
1645        enum atmel_mci_state    state = host->state;
1646        enum atmel_mci_state    prev_state;
1647        u32                     status;
1648
1649        spin_lock(&host->lock);
1650
1651        state = host->state;
1652
1653        dev_vdbg(&host->pdev->dev,
1654                "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1655                state, host->pending_events, host->completed_events,
1656                atmci_readl(host, ATMCI_IMR));
1657
1658        do {
1659                prev_state = state;
1660                dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1661
1662                switch (state) {
1663                case STATE_IDLE:
1664                        break;
1665
1666                case STATE_SENDING_CMD:
1667                        /*
1668                         * Command has been sent, we are waiting for command
1669                         * ready. Then we have three next states possible:
1670                         * END_REQUEST by default, WAITING_NOTBUSY if it's a
1671                         * command needing it or DATA_XFER if there is data.
1672                         */
1673                        dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1674                        if (!atmci_test_and_clear_pending(host,
1675                                                EVENT_CMD_RDY))
1676                                break;
1677
1678                        dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1679                        host->cmd = NULL;
1680                        atmci_set_completed(host, EVENT_CMD_RDY);
1681                        atmci_command_complete(host, mrq->cmd);
1682                        if (mrq->data) {
1683                                dev_dbg(&host->pdev->dev,
1684                                        "command with data transfer");
1685                                /*
1686                                 * If there is a command error don't start
1687                                 * data transfer.
1688                                 */
1689                                if (mrq->cmd->error) {
1690                                        host->stop_transfer(host);
1691                                        host->data = NULL;
1692                                        atmci_writel(host, ATMCI_IDR,
1693                                                     ATMCI_TXRDY | ATMCI_RXRDY
1694                                                     | ATMCI_DATA_ERROR_FLAGS);
1695                                        state = STATE_END_REQUEST;
1696                                } else
1697                                        state = STATE_DATA_XFER;
1698                        } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1699                                dev_dbg(&host->pdev->dev,
1700                                        "command response need waiting notbusy");
1701                                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1702                                state = STATE_WAITING_NOTBUSY;
1703                        } else
1704                                state = STATE_END_REQUEST;
1705
1706                        break;
1707
1708                case STATE_DATA_XFER:
1709                        if (atmci_test_and_clear_pending(host,
1710                                                EVENT_DATA_ERROR)) {
1711                                dev_dbg(&host->pdev->dev, "set completed data error\n");
1712                                atmci_set_completed(host, EVENT_DATA_ERROR);
1713                                state = STATE_END_REQUEST;
1714                                break;
1715                        }
1716
1717                        /*
1718                         * A data transfer is in progress. The event expected
1719                         * to move to the next state depends of data transfer
1720                         * type (PDC or DMA). Once transfer done we can move
1721                         * to the next step which is WAITING_NOTBUSY in write
1722                         * case and directly SENDING_STOP in read case.
1723                         */
1724                        dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1725                        if (!atmci_test_and_clear_pending(host,
1726                                                EVENT_XFER_COMPLETE))
1727                                break;
1728
1729                        dev_dbg(&host->pdev->dev,
1730                                "(%s) set completed xfer complete\n",
1731                                __func__);
1732                        atmci_set_completed(host, EVENT_XFER_COMPLETE);
1733
1734                        if (host->caps.need_notbusy_for_read_ops ||
1735                           (host->data->flags & MMC_DATA_WRITE)) {
1736                                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1737                                state = STATE_WAITING_NOTBUSY;
1738                        } else if (host->mrq->stop) {
1739                                atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1740                                atmci_send_stop_cmd(host, data);
1741                                state = STATE_SENDING_STOP;
1742                        } else {
1743                                host->data = NULL;
1744                                data->bytes_xfered = data->blocks * data->blksz;
1745                                data->error = 0;
1746                                state = STATE_END_REQUEST;
1747                        }
1748                        break;
1749
1750                case STATE_WAITING_NOTBUSY:
1751                        /*
1752                         * We can be in the state for two reasons: a command
1753                         * requiring waiting not busy signal (stop command
1754                         * included) or a write operation. In the latest case,
1755                         * we need to send a stop command.
1756                         */
1757                        dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1758                        if (!atmci_test_and_clear_pending(host,
1759                                                EVENT_NOTBUSY))
1760                                break;
1761
1762                        dev_dbg(&host->pdev->dev, "set completed not busy\n");
1763                        atmci_set_completed(host, EVENT_NOTBUSY);
1764
1765                        if (host->data) {
1766                                /*
1767                                 * For some commands such as CMD53, even if
1768                                 * there is data transfer, there is no stop
1769                                 * command to send.
1770                                 */
1771                                if (host->mrq->stop) {
1772                                        atmci_writel(host, ATMCI_IER,
1773                                                     ATMCI_CMDRDY);
1774                                        atmci_send_stop_cmd(host, data);
1775                                        state = STATE_SENDING_STOP;
1776                                } else {
1777                                        host->data = NULL;
1778                                        data->bytes_xfered = data->blocks
1779                                                             * data->blksz;
1780                                        data->error = 0;
1781                                        state = STATE_END_REQUEST;
1782                                }
1783                        } else
1784                                state = STATE_END_REQUEST;
1785                        break;
1786
1787                case STATE_SENDING_STOP:
1788                        /*
1789                         * In this state, it is important to set host->data to
1790                         * NULL (which is tested in the waiting notbusy state)
1791                         * in order to go to the end request state instead of
1792                         * sending stop again.
1793                         */
1794                        dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1795                        if (!atmci_test_and_clear_pending(host,
1796                                                EVENT_CMD_RDY))
1797                                break;
1798
1799                        dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1800                        host->cmd = NULL;
1801                        data->bytes_xfered = data->blocks * data->blksz;
1802                        data->error = 0;
1803                        atmci_command_complete(host, mrq->stop);
1804                        if (mrq->stop->error) {
1805                                host->stop_transfer(host);
1806                                atmci_writel(host, ATMCI_IDR,
1807                                             ATMCI_TXRDY | ATMCI_RXRDY
1808                                             | ATMCI_DATA_ERROR_FLAGS);
1809                                state = STATE_END_REQUEST;
1810                        } else {
1811                                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1812                                state = STATE_WAITING_NOTBUSY;
1813                        }
1814                        host->data = NULL;
1815                        break;
1816
1817                case STATE_END_REQUEST:
1818                        atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1819                                           | ATMCI_DATA_ERROR_FLAGS);
1820                        status = host->data_status;
1821                        if (unlikely(status)) {
1822                                host->stop_transfer(host);
1823                                host->data = NULL;
1824                                if (data) {
1825                                        if (status & ATMCI_DTOE) {
1826                                                data->error = -ETIMEDOUT;
1827                                        } else if (status & ATMCI_DCRCE) {
1828                                                data->error = -EILSEQ;
1829                                        } else {
1830                                                data->error = -EIO;
1831                                        }
1832                                }
1833                        }
1834
1835                        atmci_request_end(host, host->mrq);
1836                        state = STATE_IDLE;
1837                        break;
1838                }
1839        } while (state != prev_state);
1840
1841        host->state = state;
1842
1843        spin_unlock(&host->lock);
1844}
1845
1846static void atmci_read_data_pio(struct atmel_mci *host)
1847{
1848        struct scatterlist      *sg = host->sg;
1849        void                    *buf = sg_virt(sg);
1850        unsigned int            offset = host->pio_offset;
1851        struct mmc_data         *data = host->data;
1852        u32                     value;
1853        u32                     status;
1854        unsigned int            nbytes = 0;
1855
1856        do {
1857                value = atmci_readl(host, ATMCI_RDR);
1858                if (likely(offset + 4 <= sg->length)) {
1859                        put_unaligned(value, (u32 *)(buf + offset));
1860
1861                        offset += 4;
1862                        nbytes += 4;
1863
1864                        if (offset == sg->length) {
1865                                flush_dcache_page(sg_page(sg));
1866                                host->sg = sg = sg_next(sg);
1867                                host->sg_len--;
1868                                if (!sg || !host->sg_len)
1869                                        goto done;
1870
1871                                offset = 0;
1872                                buf = sg_virt(sg);
1873                        }
1874                } else {
1875                        unsigned int remaining = sg->length - offset;
1876                        memcpy(buf + offset, &value, remaining);
1877                        nbytes += remaining;
1878
1879                        flush_dcache_page(sg_page(sg));
1880                        host->sg = sg = sg_next(sg);
1881                        host->sg_len--;
1882                        if (!sg || !host->sg_len)
1883                                goto done;
1884
1885                        offset = 4 - remaining;
1886                        buf = sg_virt(sg);
1887                        memcpy(buf, (u8 *)&value + remaining, offset);
1888                        nbytes += offset;
1889                }
1890
1891                status = atmci_readl(host, ATMCI_SR);
1892                if (status & ATMCI_DATA_ERROR_FLAGS) {
1893                        atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1894                                                | ATMCI_DATA_ERROR_FLAGS));
1895                        host->data_status = status;
1896                        data->bytes_xfered += nbytes;
1897                        return;
1898                }
1899        } while (status & ATMCI_RXRDY);
1900
1901        host->pio_offset = offset;
1902        data->bytes_xfered += nbytes;
1903
1904        return;
1905
1906done:
1907        atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1908        atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1909        data->bytes_xfered += nbytes;
1910        smp_wmb();
1911        atmci_set_pending(host, EVENT_XFER_COMPLETE);
1912}
1913
1914static void atmci_write_data_pio(struct atmel_mci *host)
1915{
1916        struct scatterlist      *sg = host->sg;
1917        void                    *buf = sg_virt(sg);
1918        unsigned int            offset = host->pio_offset;
1919        struct mmc_data         *data = host->data;
1920        u32                     value;
1921        u32                     status;
1922        unsigned int            nbytes = 0;
1923
1924        do {
1925                if (likely(offset + 4 <= sg->length)) {
1926                        value = get_unaligned((u32 *)(buf + offset));
1927                        atmci_writel(host, ATMCI_TDR, value);
1928
1929                        offset += 4;
1930                        nbytes += 4;
1931                        if (offset == sg->length) {
1932                                host->sg = sg = sg_next(sg);
1933                                host->sg_len--;
1934                                if (!sg || !host->sg_len)
1935                                        goto done;
1936
1937                                offset = 0;
1938                                buf = sg_virt(sg);
1939                        }
1940                } else {
1941                        unsigned int remaining = sg->length - offset;
1942
1943                        value = 0;
1944                        memcpy(&value, buf + offset, remaining);
1945                        nbytes += remaining;
1946
1947                        host->sg = sg = sg_next(sg);
1948                        host->sg_len--;
1949                        if (!sg || !host->sg_len) {
1950                                atmci_writel(host, ATMCI_TDR, value);
1951                                goto done;
1952                        }
1953
1954                        offset = 4 - remaining;
1955                        buf = sg_virt(sg);
1956                        memcpy((u8 *)&value + remaining, buf, offset);
1957                        atmci_writel(host, ATMCI_TDR, value);
1958                        nbytes += offset;
1959                }
1960
1961                status = atmci_readl(host, ATMCI_SR);
1962                if (status & ATMCI_DATA_ERROR_FLAGS) {
1963                        atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1964                                                | ATMCI_DATA_ERROR_FLAGS));
1965                        host->data_status = status;
1966                        data->bytes_xfered += nbytes;
1967                        return;
1968                }
1969        } while (status & ATMCI_TXRDY);
1970
1971        host->pio_offset = offset;
1972        data->bytes_xfered += nbytes;
1973
1974        return;
1975
1976done:
1977        atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1978        atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1979        data->bytes_xfered += nbytes;
1980        smp_wmb();
1981        atmci_set_pending(host, EVENT_XFER_COMPLETE);
1982}
1983
1984static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1985{
1986        int     i;
1987
1988        for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1989                struct atmel_mci_slot *slot = host->slot[i];
1990                if (slot && (status & slot->sdio_irq)) {
1991                        mmc_signal_sdio_irq(slot->mmc);
1992                }
1993        }
1994}
1995
1996
1997static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1998{
1999        struct atmel_mci        *host = dev_id;
2000        u32                     status, mask, pending;
2001        unsigned int            pass_count = 0;
2002
2003        do {
2004                status = atmci_readl(host, ATMCI_SR);
2005                mask = atmci_readl(host, ATMCI_IMR);
2006                pending = status & mask;
2007                if (!pending)
2008                        break;
2009
2010                if (pending & ATMCI_DATA_ERROR_FLAGS) {
2011                        dev_dbg(&host->pdev->dev, "IRQ: data error\n");
2012                        atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
2013                                        | ATMCI_RXRDY | ATMCI_TXRDY
2014                                        | ATMCI_ENDRX | ATMCI_ENDTX
2015                                        | ATMCI_RXBUFF | ATMCI_TXBUFE);
2016
2017                        host->data_status = status;
2018                        dev_dbg(&host->pdev->dev, "set pending data error\n");
2019                        smp_wmb();
2020                        atmci_set_pending(host, EVENT_DATA_ERROR);
2021                        tasklet_schedule(&host->tasklet);
2022                }
2023
2024                if (pending & ATMCI_TXBUFE) {
2025                        dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
2026                        atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
2027                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2028                        /*
2029                         * We can receive this interruption before having configured
2030                         * the second pdc buffer, so we need to reconfigure first and
2031                         * second buffers again
2032                         */
2033                        if (host->data_size) {
2034                                atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
2035                                atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2036                                atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
2037                        } else {
2038                                atmci_pdc_complete(host);
2039                        }
2040                } else if (pending & ATMCI_ENDTX) {
2041                        dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
2042                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2043
2044                        if (host->data_size) {
2045                                atmci_pdc_set_single_buf(host,
2046                                                XFER_TRANSMIT, PDC_SECOND_BUF);
2047                                atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2048                        }
2049                }
2050
2051                if (pending & ATMCI_RXBUFF) {
2052                        dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
2053                        atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2054                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2055                        /*
2056                         * We can receive this interruption before having configured
2057                         * the second pdc buffer, so we need to reconfigure first and
2058                         * second buffers again
2059                         */
2060                        if (host->data_size) {
2061                                atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2062                                atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2063                                atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2064                        } else {
2065                                atmci_pdc_complete(host);
2066                        }
2067                } else if (pending & ATMCI_ENDRX) {
2068                        dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
2069                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2070
2071                        if (host->data_size) {
2072                                atmci_pdc_set_single_buf(host,
2073                                                XFER_RECEIVE, PDC_SECOND_BUF);
2074                                atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2075                        }
2076                }
2077
2078                /*
2079                 * First mci IPs, so mainly the ones having pdc, have some
2080                 * issues with the notbusy signal. You can't get it after
2081                 * data transmission if you have not sent a stop command.
2082                 * The appropriate workaround is to use the BLKE signal.
2083                 */
2084                if (pending & ATMCI_BLKE) {
2085                        dev_dbg(&host->pdev->dev, "IRQ: blke\n");
2086                        atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2087                        smp_wmb();
2088                        dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2089                        atmci_set_pending(host, EVENT_NOTBUSY);
2090                        tasklet_schedule(&host->tasklet);
2091                }
2092
2093                if (pending & ATMCI_NOTBUSY) {
2094                        dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
2095                        atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2096                        smp_wmb();
2097                        dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2098                        atmci_set_pending(host, EVENT_NOTBUSY);
2099                        tasklet_schedule(&host->tasklet);
2100                }
2101
2102                if (pending & ATMCI_RXRDY)
2103                        atmci_read_data_pio(host);
2104                if (pending & ATMCI_TXRDY)
2105                        atmci_write_data_pio(host);
2106
2107                if (pending & ATMCI_CMDRDY) {
2108                        dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
2109                        atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2110                        host->cmd_status = status;
2111                        smp_wmb();
2112                        dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2113                        atmci_set_pending(host, EVENT_CMD_RDY);
2114                        tasklet_schedule(&host->tasklet);
2115                }
2116
2117                if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2118                        atmci_sdio_interrupt(host, status);
2119
2120        } while (pass_count++ < 5);
2121
2122        return pass_count ? IRQ_HANDLED : IRQ_NONE;
2123}
2124
2125static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2126{
2127        struct atmel_mci_slot   *slot = dev_id;
2128
2129        /*
2130         * Disable interrupts until the pin has stabilized and check
2131         * the state then. Use mod_timer() since we may be in the
2132         * middle of the timer routine when this interrupt triggers.
2133         */
2134        disable_irq_nosync(irq);
2135        mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2136
2137        return IRQ_HANDLED;
2138}
2139
2140static int __init atmci_init_slot(struct atmel_mci *host,
2141                struct mci_slot_pdata *slot_data, unsigned int id,
2142                u32 sdc_reg, u32 sdio_irq)
2143{
2144        struct mmc_host                 *mmc;
2145        struct atmel_mci_slot           *slot;
2146
2147        mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2148        if (!mmc)
2149                return -ENOMEM;
2150
2151        slot = mmc_priv(mmc);
2152        slot->mmc = mmc;
2153        slot->host = host;
2154        slot->detect_pin = slot_data->detect_pin;
2155        slot->wp_pin = slot_data->wp_pin;
2156        slot->detect_is_active_high = slot_data->detect_is_active_high;
2157        slot->sdc_reg = sdc_reg;
2158        slot->sdio_irq = sdio_irq;
2159
2160        dev_dbg(&mmc->class_dev,
2161                "slot[%u]: bus_width=%u, detect_pin=%d, "
2162                "detect_is_active_high=%s, wp_pin=%d\n",
2163                id, slot_data->bus_width, slot_data->detect_pin,
2164                slot_data->detect_is_active_high ? "true" : "false",
2165                slot_data->wp_pin);
2166
2167        mmc->ops = &atmci_ops;
2168        mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2169        mmc->f_max = host->bus_hz / 2;
2170        mmc->ocr_avail  = MMC_VDD_32_33 | MMC_VDD_33_34;
2171        if (sdio_irq)
2172                mmc->caps |= MMC_CAP_SDIO_IRQ;
2173        if (host->caps.has_highspeed)
2174                mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2175        /*
2176         * Without the read/write proof capability, it is strongly suggested to
2177         * use only one bit for data to prevent fifo underruns and overruns
2178         * which will corrupt data.
2179         */
2180        if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2181                mmc->caps |= MMC_CAP_4_BIT_DATA;
2182
2183        if (atmci_get_version(host) < 0x200) {
2184                mmc->max_segs = 256;
2185                mmc->max_blk_size = 4095;
2186                mmc->max_blk_count = 256;
2187                mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2188                mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2189        } else {
2190                mmc->max_segs = 64;
2191                mmc->max_req_size = 32768 * 512;
2192                mmc->max_blk_size = 32768;
2193                mmc->max_blk_count = 512;
2194        }
2195
2196        /* Assume card is present initially */
2197        set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2198        if (gpio_is_valid(slot->detect_pin)) {
2199                if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
2200                                      "mmc_detect")) {
2201                        dev_dbg(&mmc->class_dev, "no detect pin available\n");
2202                        slot->detect_pin = -EBUSY;
2203                } else if (gpio_get_value(slot->detect_pin) ^
2204                                slot->detect_is_active_high) {
2205                        clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2206                }
2207        }
2208
2209        if (!gpio_is_valid(slot->detect_pin))
2210                mmc->caps |= MMC_CAP_NEEDS_POLL;
2211
2212        if (gpio_is_valid(slot->wp_pin)) {
2213                if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
2214                                      "mmc_wp")) {
2215                        dev_dbg(&mmc->class_dev, "no WP pin available\n");
2216                        slot->wp_pin = -EBUSY;
2217                }
2218        }
2219
2220        host->slot[id] = slot;
2221        mmc_regulator_get_supply(mmc);
2222        mmc_add_host(mmc);
2223
2224        if (gpio_is_valid(slot->detect_pin)) {
2225                int ret;
2226
2227                setup_timer(&slot->detect_timer, atmci_detect_change,
2228                                (unsigned long)slot);
2229
2230                ret = request_irq(gpio_to_irq(slot->detect_pin),
2231                                atmci_detect_interrupt,
2232                                IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2233                                "mmc-detect", slot);
2234                if (ret) {
2235                        dev_dbg(&mmc->class_dev,
2236                                "could not request IRQ %d for detect pin\n",
2237                                gpio_to_irq(slot->detect_pin));
2238                        slot->detect_pin = -EBUSY;
2239                }
2240        }
2241
2242        atmci_init_debugfs(slot);
2243
2244        return 0;
2245}
2246
2247static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
2248                unsigned int id)
2249{
2250        /* Debugfs stuff is cleaned up by mmc core */
2251
2252        set_bit(ATMCI_SHUTDOWN, &slot->flags);
2253        smp_wmb();
2254
2255        mmc_remove_host(slot->mmc);
2256
2257        if (gpio_is_valid(slot->detect_pin)) {
2258                int pin = slot->detect_pin;
2259
2260                free_irq(gpio_to_irq(pin), slot);
2261                del_timer_sync(&slot->detect_timer);
2262        }
2263
2264        slot->host->slot[id] = NULL;
2265        mmc_free_host(slot->mmc);
2266}
2267
2268static bool atmci_filter(struct dma_chan *chan, void *pdata)
2269{
2270        struct mci_platform_data *sl_pdata = pdata;
2271        struct mci_dma_data *sl;
2272
2273        if (!sl_pdata)
2274                return false;
2275
2276        sl = sl_pdata->dma_slave;
2277        if (sl && find_slave_dev(sl) == chan->device->dev) {
2278                chan->private = slave_data_ptr(sl);
2279                return true;
2280        } else {
2281                return false;
2282        }
2283}
2284
2285static bool atmci_configure_dma(struct atmel_mci *host)
2286{
2287        struct mci_platform_data        *pdata;
2288        dma_cap_mask_t mask;
2289
2290        if (host == NULL)
2291                return false;
2292
2293        pdata = host->pdev->dev.platform_data;
2294
2295        dma_cap_zero(mask);
2296        dma_cap_set(DMA_SLAVE, mask);
2297
2298        host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2299                                                          &host->pdev->dev, "rxtx");
2300        if (!host->dma.chan) {
2301                dev_warn(&host->pdev->dev, "no DMA channel available\n");
2302                return false;
2303        } else {
2304                dev_info(&host->pdev->dev,
2305                                        "using %s for DMA transfers\n",
2306                                        dma_chan_name(host->dma.chan));
2307
2308                host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2309                host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2310                host->dma_conf.src_maxburst = 1;
2311                host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2312                host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2313                host->dma_conf.dst_maxburst = 1;
2314                host->dma_conf.device_fc = false;
2315                return true;
2316        }
2317}
2318
2319/*
2320 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2321 * HSMCI provides DMA support and a new config register but no more supports
2322 * PDC.
2323 */
2324static void __init atmci_get_cap(struct atmel_mci *host)
2325{
2326        unsigned int version;
2327
2328        version = atmci_get_version(host);
2329        dev_info(&host->pdev->dev,
2330                        "version: 0x%x\n", version);
2331
2332        host->caps.has_dma_conf_reg = 0;
2333        host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2334        host->caps.has_cfg_reg = 0;
2335        host->caps.has_cstor_reg = 0;
2336        host->caps.has_highspeed = 0;
2337        host->caps.has_rwproof = 0;
2338        host->caps.has_odd_clk_div = 0;
2339        host->caps.has_bad_data_ordering = 1;
2340        host->caps.need_reset_after_xfer = 1;
2341        host->caps.need_blksz_mul_4 = 1;
2342        host->caps.need_notbusy_for_read_ops = 0;
2343
2344        /* keep only major version number */
2345        switch (version & 0xf00) {
2346        case 0x600:
2347        case 0x500:
2348                host->caps.has_odd_clk_div = 1;
2349        case 0x400:
2350        case 0x300:
2351                host->caps.has_dma_conf_reg = 1;
2352                host->caps.has_pdc = 0;
2353                host->caps.has_cfg_reg = 1;
2354                host->caps.has_cstor_reg = 1;
2355                host->caps.has_highspeed = 1;
2356        case 0x200:
2357                host->caps.has_rwproof = 1;
2358                host->caps.need_blksz_mul_4 = 0;
2359                host->caps.need_notbusy_for_read_ops = 1;
2360        case 0x100:
2361                host->caps.has_bad_data_ordering = 0;
2362                host->caps.need_reset_after_xfer = 0;
2363        case 0x0:
2364                break;
2365        default:
2366                host->caps.has_pdc = 0;
2367                dev_warn(&host->pdev->dev,
2368                                "Unmanaged mci version, set minimum capabilities\n");
2369                break;
2370        }
2371}
2372
2373static int __init atmci_probe(struct platform_device *pdev)
2374{
2375        struct mci_platform_data        *pdata;
2376        struct atmel_mci                *host;
2377        struct resource                 *regs;
2378        unsigned int                    nr_slots;
2379        int                             irq;
2380        int                             ret, i;
2381
2382        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2383        if (!regs)
2384                return -ENXIO;
2385        pdata = pdev->dev.platform_data;
2386        if (!pdata) {
2387                pdata = atmci_of_init(pdev);
2388                if (IS_ERR(pdata)) {
2389                        dev_err(&pdev->dev, "platform data not available\n");
2390                        return PTR_ERR(pdata);
2391                }
2392        }
2393
2394        irq = platform_get_irq(pdev, 0);
2395        if (irq < 0)
2396                return irq;
2397
2398        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
2399        if (!host)
2400                return -ENOMEM;
2401
2402        host->pdev = pdev;
2403        spin_lock_init(&host->lock);
2404        INIT_LIST_HEAD(&host->queue);
2405
2406        host->mck = devm_clk_get(&pdev->dev, "mci_clk");
2407        if (IS_ERR(host->mck))
2408                return PTR_ERR(host->mck);
2409
2410        host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
2411        if (!host->regs)
2412                return -ENOMEM;
2413
2414        ret = clk_prepare_enable(host->mck);
2415        if (ret)
2416                return ret;
2417
2418        atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2419        host->bus_hz = clk_get_rate(host->mck);
2420        clk_disable_unprepare(host->mck);
2421
2422        host->mapbase = regs->start;
2423
2424        tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2425
2426        ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2427        if (ret)
2428                return ret;
2429
2430        /* Get MCI capabilities and set operations according to it */
2431        atmci_get_cap(host);
2432        if (atmci_configure_dma(host)) {
2433                host->prepare_data = &atmci_prepare_data_dma;
2434                host->submit_data = &atmci_submit_data_dma;
2435                host->stop_transfer = &atmci_stop_transfer_dma;
2436        } else if (host->caps.has_pdc) {
2437                dev_info(&pdev->dev, "using PDC\n");
2438                host->prepare_data = &atmci_prepare_data_pdc;
2439                host->submit_data = &atmci_submit_data_pdc;
2440                host->stop_transfer = &atmci_stop_transfer_pdc;
2441        } else {
2442                dev_info(&pdev->dev, "using PIO\n");
2443                host->prepare_data = &atmci_prepare_data;
2444                host->submit_data = &atmci_submit_data;
2445                host->stop_transfer = &atmci_stop_transfer;
2446        }
2447
2448        platform_set_drvdata(pdev, host);
2449
2450        setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2451
2452        /* We need at least one slot to succeed */
2453        nr_slots = 0;
2454        ret = -ENODEV;
2455        if (pdata->slot[0].bus_width) {
2456                ret = atmci_init_slot(host, &pdata->slot[0],
2457                                0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2458                if (!ret) {
2459                        nr_slots++;
2460                        host->buf_size = host->slot[0]->mmc->max_req_size;
2461                }
2462        }
2463        if (pdata->slot[1].bus_width) {
2464                ret = atmci_init_slot(host, &pdata->slot[1],
2465                                1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2466                if (!ret) {
2467                        nr_slots++;
2468                        if (host->slot[1]->mmc->max_req_size > host->buf_size)
2469                                host->buf_size =
2470                                        host->slot[1]->mmc->max_req_size;
2471                }
2472        }
2473
2474        if (!nr_slots) {
2475                dev_err(&pdev->dev, "init failed: no slot defined\n");
2476                goto err_init_slot;
2477        }
2478
2479        if (!host->caps.has_rwproof) {
2480                host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2481                                                  &host->buf_phys_addr,
2482                                                  GFP_KERNEL);
2483                if (!host->buffer) {
2484                        ret = -ENOMEM;
2485                        dev_err(&pdev->dev, "buffer allocation failed\n");
2486                        goto err_dma_alloc;
2487                }
2488        }
2489
2490        dev_info(&pdev->dev,
2491                        "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2492                        host->mapbase, irq, nr_slots);
2493
2494        return 0;
2495
2496err_dma_alloc:
2497        for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2498                if (host->slot[i])
2499                        atmci_cleanup_slot(host->slot[i], i);
2500        }
2501err_init_slot:
2502        del_timer_sync(&host->timer);
2503        if (host->dma.chan)
2504                dma_release_channel(host->dma.chan);
2505        free_irq(irq, host);
2506        return ret;
2507}
2508
2509static int __exit atmci_remove(struct platform_device *pdev)
2510{
2511        struct atmel_mci        *host = platform_get_drvdata(pdev);
2512        unsigned int            i;
2513
2514        if (host->buffer)
2515                dma_free_coherent(&pdev->dev, host->buf_size,
2516                                  host->buffer, host->buf_phys_addr);
2517
2518        for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2519                if (host->slot[i])
2520                        atmci_cleanup_slot(host->slot[i], i);
2521        }
2522
2523        clk_prepare_enable(host->mck);
2524        atmci_writel(host, ATMCI_IDR, ~0UL);
2525        atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2526        atmci_readl(host, ATMCI_SR);
2527        clk_disable_unprepare(host->mck);
2528
2529        del_timer_sync(&host->timer);
2530        if (host->dma.chan)
2531                dma_release_channel(host->dma.chan);
2532
2533        free_irq(platform_get_irq(pdev, 0), host);
2534
2535        return 0;
2536}
2537
2538static struct platform_driver atmci_driver = {
2539        .remove         = __exit_p(atmci_remove),
2540        .driver         = {
2541                .name           = "atmel_mci",
2542                .of_match_table = of_match_ptr(atmci_dt_ids),
2543        },
2544};
2545
2546static int __init atmci_init(void)
2547{
2548        return platform_driver_probe(&atmci_driver, atmci_probe);
2549}
2550
2551static void __exit atmci_exit(void)
2552{
2553        platform_driver_unregister(&atmci_driver);
2554}
2555
2556late_initcall(atmci_init); /* try to load after dma driver when built-in */
2557module_exit(atmci_exit);
2558
2559MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2560MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2561MODULE_LICENSE("GPL v2");
2562