linux/drivers/mmc/host/sdhci.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
   3 *
   4 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 *
  11 * Thanks to the following companies for their support:
  12 *
  13 *     - JMicron (hardware and technical support)
  14 */
  15
  16#include <linux/delay.h>
  17#include <linux/highmem.h>
  18#include <linux/io.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/scatterlist.h>
  21
  22#include <linux/leds.h>
  23
  24#include <linux/mmc/host.h>
  25
  26#include "sdhci.h"
  27
  28#define DRIVER_NAME "sdhci"
  29
  30#define DBG(f, x...) \
  31        pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
  32
  33#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
  34        defined(CONFIG_MMC_SDHCI_MODULE))
  35#define SDHCI_USE_LEDS_CLASS
  36#endif
  37
  38static unsigned int debug_quirks = 0;
  39
  40static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
  41static void sdhci_finish_data(struct sdhci_host *);
  42
  43static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
  44static void sdhci_finish_command(struct sdhci_host *);
  45
  46static void sdhci_dumpregs(struct sdhci_host *host)
  47{
  48        printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
  49
  50        printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
  51                sdhci_readl(host, SDHCI_DMA_ADDRESS),
  52                sdhci_readw(host, SDHCI_HOST_VERSION));
  53        printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
  54                sdhci_readw(host, SDHCI_BLOCK_SIZE),
  55                sdhci_readw(host, SDHCI_BLOCK_COUNT));
  56        printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
  57                sdhci_readl(host, SDHCI_ARGUMENT),
  58                sdhci_readw(host, SDHCI_TRANSFER_MODE));
  59        printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
  60                sdhci_readl(host, SDHCI_PRESENT_STATE),
  61                sdhci_readb(host, SDHCI_HOST_CONTROL));
  62        printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
  63                sdhci_readb(host, SDHCI_POWER_CONTROL),
  64                sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
  65        printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
  66                sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
  67                sdhci_readw(host, SDHCI_CLOCK_CONTROL));
  68        printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
  69                sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
  70                sdhci_readl(host, SDHCI_INT_STATUS));
  71        printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
  72                sdhci_readl(host, SDHCI_INT_ENABLE),
  73                sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
  74        printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
  75                sdhci_readw(host, SDHCI_ACMD12_ERR),
  76                sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
  77        printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
  78                sdhci_readl(host, SDHCI_CAPABILITIES),
  79                sdhci_readl(host, SDHCI_MAX_CURRENT));
  80
  81        if (host->flags & SDHCI_USE_ADMA)
  82                printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
  83                       readl(host->ioaddr + SDHCI_ADMA_ERROR),
  84                       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
  85
  86        printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
  87}
  88
  89/*****************************************************************************\
  90 *                                                                           *
  91 * Low level functions                                                       *
  92 *                                                                           *
  93\*****************************************************************************/
  94
  95static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
  96{
  97        u32 ier;
  98
  99        ier = sdhci_readl(host, SDHCI_INT_ENABLE);
 100        ier &= ~clear;
 101        ier |= set;
 102        sdhci_writel(host, ier, SDHCI_INT_ENABLE);
 103        sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
 104}
 105
 106static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
 107{
 108        sdhci_clear_set_irqs(host, 0, irqs);
 109}
 110
 111static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
 112{
 113        sdhci_clear_set_irqs(host, irqs, 0);
 114}
 115
 116static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 117{
 118        u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
 119
 120        if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
 121                return;
 122
 123        if (enable)
 124                sdhci_unmask_irqs(host, irqs);
 125        else
 126                sdhci_mask_irqs(host, irqs);
 127}
 128
 129static void sdhci_enable_card_detection(struct sdhci_host *host)
 130{
 131        sdhci_set_card_detection(host, true);
 132}
 133
 134static void sdhci_disable_card_detection(struct sdhci_host *host)
 135{
 136        sdhci_set_card_detection(host, false);
 137}
 138
 139static void sdhci_reset(struct sdhci_host *host, u8 mask)
 140{
 141        unsigned long timeout;
 142        u32 uninitialized_var(ier);
 143
 144        if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
 145                if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
 146                        SDHCI_CARD_PRESENT))
 147                        return;
 148        }
 149
 150        if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 151                ier = sdhci_readl(host, SDHCI_INT_ENABLE);
 152
 153        sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
 154
 155        if (mask & SDHCI_RESET_ALL)
 156                host->clock = 0;
 157
 158        /* Wait max 100 ms */
 159        timeout = 100;
 160
 161        /* hw clears the bit when it's done */
 162        while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 163                if (timeout == 0) {
 164                        printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
 165                                mmc_hostname(host->mmc), (int)mask);
 166                        sdhci_dumpregs(host);
 167                        return;
 168                }
 169                timeout--;
 170                mdelay(1);
 171        }
 172
 173        if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 174                sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
 175}
 176
 177static void sdhci_init(struct sdhci_host *host)
 178{
 179        sdhci_reset(host, SDHCI_RESET_ALL);
 180
 181        sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
 182                SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
 183                SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
 184                SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
 185                SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
 186}
 187
 188static void sdhci_reinit(struct sdhci_host *host)
 189{
 190        sdhci_init(host);
 191        sdhci_enable_card_detection(host);
 192}
 193
 194static void sdhci_activate_led(struct sdhci_host *host)
 195{
 196        u8 ctrl;
 197
 198        ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 199        ctrl |= SDHCI_CTRL_LED;
 200        sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 201}
 202
 203static void sdhci_deactivate_led(struct sdhci_host *host)
 204{
 205        u8 ctrl;
 206
 207        ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 208        ctrl &= ~SDHCI_CTRL_LED;
 209        sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 210}
 211
 212#ifdef SDHCI_USE_LEDS_CLASS
 213static void sdhci_led_control(struct led_classdev *led,
 214        enum led_brightness brightness)
 215{
 216        struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 217        unsigned long flags;
 218
 219        spin_lock_irqsave(&host->lock, flags);
 220
 221        if (brightness == LED_OFF)
 222                sdhci_deactivate_led(host);
 223        else
 224                sdhci_activate_led(host);
 225
 226        spin_unlock_irqrestore(&host->lock, flags);
 227}
 228#endif
 229
 230/*****************************************************************************\
 231 *                                                                           *
 232 * Core functions                                                            *
 233 *                                                                           *
 234\*****************************************************************************/
 235
 236static void sdhci_read_block_pio(struct sdhci_host *host)
 237{
 238        unsigned long flags;
 239        size_t blksize, len, chunk;
 240        u32 uninitialized_var(scratch);
 241        u8 *buf;
 242
 243        DBG("PIO reading\n");
 244
 245        blksize = host->data->blksz;
 246        chunk = 0;
 247
 248        local_irq_save(flags);
 249
 250        while (blksize) {
 251                if (!sg_miter_next(&host->sg_miter))
 252                        BUG();
 253
 254                len = min(host->sg_miter.length, blksize);
 255
 256                blksize -= len;
 257                host->sg_miter.consumed = len;
 258
 259                buf = host->sg_miter.addr;
 260
 261                while (len) {
 262                        if (chunk == 0) {
 263                                scratch = sdhci_readl(host, SDHCI_BUFFER);
 264                                chunk = 4;
 265                        }
 266
 267                        *buf = scratch & 0xFF;
 268
 269                        buf++;
 270                        scratch >>= 8;
 271                        chunk--;
 272                        len--;
 273                }
 274        }
 275
 276        sg_miter_stop(&host->sg_miter);
 277
 278        local_irq_restore(flags);
 279}
 280
 281static void sdhci_write_block_pio(struct sdhci_host *host)
 282{
 283        unsigned long flags;
 284        size_t blksize, len, chunk;
 285        u32 scratch;
 286        u8 *buf;
 287
 288        DBG("PIO writing\n");
 289
 290        blksize = host->data->blksz;
 291        chunk = 0;
 292        scratch = 0;
 293
 294        local_irq_save(flags);
 295
 296        while (blksize) {
 297                if (!sg_miter_next(&host->sg_miter))
 298                        BUG();
 299
 300                len = min(host->sg_miter.length, blksize);
 301
 302                blksize -= len;
 303                host->sg_miter.consumed = len;
 304
 305                buf = host->sg_miter.addr;
 306
 307                while (len) {
 308                        scratch |= (u32)*buf << (chunk * 8);
 309
 310                        buf++;
 311                        chunk++;
 312                        len--;
 313
 314                        if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
 315                                sdhci_writel(host, scratch, SDHCI_BUFFER);
 316                                chunk = 0;
 317                                scratch = 0;
 318                        }
 319                }
 320        }
 321
 322        sg_miter_stop(&host->sg_miter);
 323
 324        local_irq_restore(flags);
 325}
 326
 327static void sdhci_transfer_pio(struct sdhci_host *host)
 328{
 329        u32 mask;
 330
 331        BUG_ON(!host->data);
 332
 333        if (host->blocks == 0)
 334                return;
 335
 336        if (host->data->flags & MMC_DATA_READ)
 337                mask = SDHCI_DATA_AVAILABLE;
 338        else
 339                mask = SDHCI_SPACE_AVAILABLE;
 340
 341        /*
 342         * Some controllers (JMicron JMB38x) mess up the buffer bits
 343         * for transfers < 4 bytes. As long as it is just one block,
 344         * we can ignore the bits.
 345         */
 346        if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
 347                (host->data->blocks == 1))
 348                mask = ~0;
 349
 350        while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
 351                if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
 352                        udelay(100);
 353
 354                if (host->data->flags & MMC_DATA_READ)
 355                        sdhci_read_block_pio(host);
 356                else
 357                        sdhci_write_block_pio(host);
 358
 359                host->blocks--;
 360                if (host->blocks == 0)
 361                        break;
 362        }
 363
 364        DBG("PIO transfer complete.\n");
 365}
 366
 367static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
 368{
 369        local_irq_save(*flags);
 370        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 371}
 372
 373static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
 374{
 375        kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
 376        local_irq_restore(*flags);
 377}
 378
 379static int sdhci_adma_table_pre(struct sdhci_host *host,
 380        struct mmc_data *data)
 381{
 382        int direction;
 383
 384        u8 *desc;
 385        u8 *align;
 386        dma_addr_t addr;
 387        dma_addr_t align_addr;
 388        int len, offset;
 389
 390        struct scatterlist *sg;
 391        int i;
 392        char *buffer;
 393        unsigned long flags;
 394
 395        /*
 396         * The spec does not specify endianness of descriptor table.
 397         * We currently guess that it is LE.
 398         */
 399
 400        if (data->flags & MMC_DATA_READ)
 401                direction = DMA_FROM_DEVICE;
 402        else
 403                direction = DMA_TO_DEVICE;
 404
 405        /*
 406         * The ADMA descriptor table is mapped further down as we
 407         * need to fill it with data first.
 408         */
 409
 410        host->align_addr = dma_map_single(mmc_dev(host->mmc),
 411                host->align_buffer, 128 * 4, direction);
 412        if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
 413                goto fail;
 414        BUG_ON(host->align_addr & 0x3);
 415
 416        host->sg_count = dma_map_sg(mmc_dev(host->mmc),
 417                data->sg, data->sg_len, direction);
 418        if (host->sg_count == 0)
 419                goto unmap_align;
 420
 421        desc = host->adma_desc;
 422        align = host->align_buffer;
 423
 424        align_addr = host->align_addr;
 425
 426        for_each_sg(data->sg, sg, host->sg_count, i) {
 427                addr = sg_dma_address(sg);
 428                len = sg_dma_len(sg);
 429
 430                /*
 431                 * The SDHCI specification states that ADMA
 432                 * addresses must be 32-bit aligned. If they
 433                 * aren't, then we use a bounce buffer for
 434                 * the (up to three) bytes that screw up the
 435                 * alignment.
 436                 */
 437                offset = (4 - (addr & 0x3)) & 0x3;
 438                if (offset) {
 439                        if (data->flags & MMC_DATA_WRITE) {
 440                                buffer = sdhci_kmap_atomic(sg, &flags);
 441                                WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
 442                                memcpy(align, buffer, offset);
 443                                sdhci_kunmap_atomic(buffer, &flags);
 444                        }
 445
 446                        desc[7] = (align_addr >> 24) & 0xff;
 447                        desc[6] = (align_addr >> 16) & 0xff;
 448                        desc[5] = (align_addr >> 8) & 0xff;
 449                        desc[4] = (align_addr >> 0) & 0xff;
 450
 451                        BUG_ON(offset > 65536);
 452
 453                        desc[3] = (offset >> 8) & 0xff;
 454                        desc[2] = (offset >> 0) & 0xff;
 455
 456                        desc[1] = 0x00;
 457                        desc[0] = 0x21; /* tran, valid */
 458
 459                        align += 4;
 460                        align_addr += 4;
 461
 462                        desc += 8;
 463
 464                        addr += offset;
 465                        len -= offset;
 466                }
 467
 468                desc[7] = (addr >> 24) & 0xff;
 469                desc[6] = (addr >> 16) & 0xff;
 470                desc[5] = (addr >> 8) & 0xff;
 471                desc[4] = (addr >> 0) & 0xff;
 472
 473                BUG_ON(len > 65536);
 474
 475                desc[3] = (len >> 8) & 0xff;
 476                desc[2] = (len >> 0) & 0xff;
 477
 478                desc[1] = 0x00;
 479                desc[0] = 0x21; /* tran, valid */
 480
 481                desc += 8;
 482
 483                /*
 484                 * If this triggers then we have a calculation bug
 485                 * somewhere. :/
 486                 */
 487                WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
 488        }
 489
 490        /*
 491         * Add a terminating entry.
 492         */
 493        desc[7] = 0;
 494        desc[6] = 0;
 495        desc[5] = 0;
 496        desc[4] = 0;
 497
 498        desc[3] = 0;
 499        desc[2] = 0;
 500
 501        desc[1] = 0x00;
 502        desc[0] = 0x03; /* nop, end, valid */
 503
 504        /*
 505         * Resync align buffer as we might have changed it.
 506         */
 507        if (data->flags & MMC_DATA_WRITE) {
 508                dma_sync_single_for_device(mmc_dev(host->mmc),
 509                        host->align_addr, 128 * 4, direction);
 510        }
 511
 512        host->adma_addr = dma_map_single(mmc_dev(host->mmc),
 513                host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
 514        if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
 515                goto unmap_entries;
 516        BUG_ON(host->adma_addr & 0x3);
 517
 518        return 0;
 519
 520unmap_entries:
 521        dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 522                data->sg_len, direction);
 523unmap_align:
 524        dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
 525                128 * 4, direction);
 526fail:
 527        return -EINVAL;
 528}
 529
 530static void sdhci_adma_table_post(struct sdhci_host *host,
 531        struct mmc_data *data)
 532{
 533        int direction;
 534
 535        struct scatterlist *sg;
 536        int i, size;
 537        u8 *align;
 538        char *buffer;
 539        unsigned long flags;
 540
 541        if (data->flags & MMC_DATA_READ)
 542                direction = DMA_FROM_DEVICE;
 543        else
 544                direction = DMA_TO_DEVICE;
 545
 546        dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
 547                (128 * 2 + 1) * 4, DMA_TO_DEVICE);
 548
 549        dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
 550                128 * 4, direction);
 551
 552        if (data->flags & MMC_DATA_READ) {
 553                dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
 554                        data->sg_len, direction);
 555
 556                align = host->align_buffer;
 557
 558                for_each_sg(data->sg, sg, host->sg_count, i) {
 559                        if (sg_dma_address(sg) & 0x3) {
 560                                size = 4 - (sg_dma_address(sg) & 0x3);
 561
 562                                buffer = sdhci_kmap_atomic(sg, &flags);
 563                                WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
 564                                memcpy(buffer, align, size);
 565                                sdhci_kunmap_atomic(buffer, &flags);
 566
 567                                align += 4;
 568                        }
 569                }
 570        }
 571
 572        dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 573                data->sg_len, direction);
 574}
 575
 576static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
 577{
 578        u8 count;
 579        unsigned target_timeout, current_timeout;
 580
 581        /*
 582         * If the host controller provides us with an incorrect timeout
 583         * value, just skip the check and use 0xE.  The hardware may take
 584         * longer to time out, but that's much better than having a too-short
 585         * timeout value.
 586         */
 587        if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
 588                return 0xE;
 589
 590        /* timeout in us */
 591        target_timeout = data->timeout_ns / 1000 +
 592                data->timeout_clks / host->clock;
 593
 594        if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
 595                host->timeout_clk = host->clock / 1000;
 596
 597        /*
 598         * Figure out needed cycles.
 599         * We do this in steps in order to fit inside a 32 bit int.
 600         * The first step is the minimum timeout, which will have a
 601         * minimum resolution of 6 bits:
 602         * (1) 2^13*1000 > 2^22,
 603         * (2) host->timeout_clk < 2^16
 604         *     =>
 605         *     (1) / (2) > 2^6
 606         */
 607        count = 0;
 608        current_timeout = (1 << 13) * 1000 / host->timeout_clk;
 609        while (current_timeout < target_timeout) {
 610                count++;
 611                current_timeout <<= 1;
 612                if (count >= 0xF)
 613                        break;
 614        }
 615
 616        if (count >= 0xF) {
 617                printk(KERN_WARNING "%s: Too large timeout requested!\n",
 618                        mmc_hostname(host->mmc));
 619                count = 0xE;
 620        }
 621
 622        return count;
 623}
 624
 625static void sdhci_set_transfer_irqs(struct sdhci_host *host)
 626{
 627        u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
 628        u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
 629
 630        if (host->flags & SDHCI_REQ_USE_DMA)
 631                sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
 632        else
 633                sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
 634}
 635
 636static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
 637{
 638        u8 count;
 639        u8 ctrl;
 640        int ret;
 641
 642        WARN_ON(host->data);
 643
 644        if (data == NULL)
 645                return;
 646
 647        /* Sanity checks */
 648        BUG_ON(data->blksz * data->blocks > 524288);
 649        BUG_ON(data->blksz > host->mmc->max_blk_size);
 650        BUG_ON(data->blocks > 65535);
 651
 652        host->data = data;
 653        host->data_early = 0;
 654
 655        count = sdhci_calc_timeout(host, data);
 656        sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
 657
 658        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
 659                host->flags |= SDHCI_REQ_USE_DMA;
 660
 661        /*
 662         * FIXME: This doesn't account for merging when mapping the
 663         * scatterlist.
 664         */
 665        if (host->flags & SDHCI_REQ_USE_DMA) {
 666                int broken, i;
 667                struct scatterlist *sg;
 668
 669                broken = 0;
 670                if (host->flags & SDHCI_USE_ADMA) {
 671                        if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
 672                                broken = 1;
 673                } else {
 674                        if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
 675                                broken = 1;
 676                }
 677
 678                if (unlikely(broken)) {
 679                        for_each_sg(data->sg, sg, data->sg_len, i) {
 680                                if (sg->length & 0x3) {
 681                                        DBG("Reverting to PIO because of "
 682                                                "transfer size (%d)\n",
 683                                                sg->length);
 684                                        host->flags &= ~SDHCI_REQ_USE_DMA;
 685                                        break;
 686                                }
 687                        }
 688                }
 689        }
 690
 691        /*
 692         * The assumption here being that alignment is the same after
 693         * translation to device address space.
 694         */
 695        if (host->flags & SDHCI_REQ_USE_DMA) {
 696                int broken, i;
 697                struct scatterlist *sg;
 698
 699                broken = 0;
 700                if (host->flags & SDHCI_USE_ADMA) {
 701                        /*
 702                         * As we use 3 byte chunks to work around
 703                         * alignment problems, we need to check this
 704                         * quirk.
 705                         */
 706                        if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
 707                                broken = 1;
 708                } else {
 709                        if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
 710                                broken = 1;
 711                }
 712
 713                if (unlikely(broken)) {
 714                        for_each_sg(data->sg, sg, data->sg_len, i) {
 715                                if (sg->offset & 0x3) {
 716                                        DBG("Reverting to PIO because of "
 717                                                "bad alignment\n");
 718                                        host->flags &= ~SDHCI_REQ_USE_DMA;
 719                                        break;
 720                                }
 721                        }
 722                }
 723        }
 724
 725        if (host->flags & SDHCI_REQ_USE_DMA) {
 726                if (host->flags & SDHCI_USE_ADMA) {
 727                        ret = sdhci_adma_table_pre(host, data);
 728                        if (ret) {
 729                                /*
 730                                 * This only happens when someone fed
 731                                 * us an invalid request.
 732                                 */
 733                                WARN_ON(1);
 734                                host->flags &= ~SDHCI_REQ_USE_DMA;
 735                        } else {
 736                                sdhci_writel(host, host->adma_addr,
 737                                        SDHCI_ADMA_ADDRESS);
 738                        }
 739                } else {
 740                        int sg_cnt;
 741
 742                        sg_cnt = dma_map_sg(mmc_dev(host->mmc),
 743                                        data->sg, data->sg_len,
 744                                        (data->flags & MMC_DATA_READ) ?
 745                                                DMA_FROM_DEVICE :
 746                                                DMA_TO_DEVICE);
 747                        if (sg_cnt == 0) {
 748                                /*
 749                                 * This only happens when someone fed
 750                                 * us an invalid request.
 751                                 */
 752                                WARN_ON(1);
 753                                host->flags &= ~SDHCI_REQ_USE_DMA;
 754                        } else {
 755                                WARN_ON(sg_cnt != 1);
 756                                sdhci_writel(host, sg_dma_address(data->sg),
 757                                        SDHCI_DMA_ADDRESS);
 758                        }
 759                }
 760        }
 761
 762        /*
 763         * Always adjust the DMA selection as some controllers
 764         * (e.g. JMicron) can't do PIO properly when the selection
 765         * is ADMA.
 766         */
 767        if (host->version >= SDHCI_SPEC_200) {
 768                ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 769                ctrl &= ~SDHCI_CTRL_DMA_MASK;
 770                if ((host->flags & SDHCI_REQ_USE_DMA) &&
 771                        (host->flags & SDHCI_USE_ADMA))
 772                        ctrl |= SDHCI_CTRL_ADMA32;
 773                else
 774                        ctrl |= SDHCI_CTRL_SDMA;
 775                sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 776        }
 777
 778        if (!(host->flags & SDHCI_REQ_USE_DMA)) {
 779                int flags;
 780
 781                flags = SG_MITER_ATOMIC;
 782                if (host->data->flags & MMC_DATA_READ)
 783                        flags |= SG_MITER_TO_SG;
 784                else
 785                        flags |= SG_MITER_FROM_SG;
 786                sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 787                host->blocks = data->blocks;
 788        }
 789
 790        sdhci_set_transfer_irqs(host);
 791
 792        /* We do not handle DMA boundaries, so set it to max (512 KiB) */
 793        sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
 794        sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
 795}
 796
 797static void sdhci_set_transfer_mode(struct sdhci_host *host,
 798        struct mmc_data *data)
 799{
 800        u16 mode;
 801
 802        if (data == NULL)
 803                return;
 804
 805        WARN_ON(!host->data);
 806
 807        mode = SDHCI_TRNS_BLK_CNT_EN;
 808        if (data->blocks > 1)
 809                mode |= SDHCI_TRNS_MULTI;
 810        if (data->flags & MMC_DATA_READ)
 811                mode |= SDHCI_TRNS_READ;
 812        if (host->flags & SDHCI_REQ_USE_DMA)
 813                mode |= SDHCI_TRNS_DMA;
 814
 815        sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
 816}
 817
 818static void sdhci_finish_data(struct sdhci_host *host)
 819{
 820        struct mmc_data *data;
 821
 822        BUG_ON(!host->data);
 823
 824        data = host->data;
 825        host->data = NULL;
 826
 827        if (host->flags & SDHCI_REQ_USE_DMA) {
 828                if (host->flags & SDHCI_USE_ADMA)
 829                        sdhci_adma_table_post(host, data);
 830                else {
 831                        dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 832                                data->sg_len, (data->flags & MMC_DATA_READ) ?
 833                                        DMA_FROM_DEVICE : DMA_TO_DEVICE);
 834                }
 835        }
 836
 837        /*
 838         * The specification states that the block count register must
 839         * be updated, but it does not specify at what point in the
 840         * data flow. That makes the register entirely useless to read
 841         * back so we have to assume that nothing made it to the card
 842         * in the event of an error.
 843         */
 844        if (data->error)
 845                data->bytes_xfered = 0;
 846        else
 847                data->bytes_xfered = data->blksz * data->blocks;
 848
 849        if (data->stop) {
 850                /*
 851                 * The controller needs a reset of internal state machines
 852                 * upon error conditions.
 853                 */
 854                if (data->error) {
 855                        sdhci_reset(host, SDHCI_RESET_CMD);
 856                        sdhci_reset(host, SDHCI_RESET_DATA);
 857                }
 858
 859                sdhci_send_command(host, data->stop);
 860        } else
 861                tasklet_schedule(&host->finish_tasklet);
 862}
 863
 864static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
 865{
 866        int flags;
 867        u32 mask;
 868        unsigned long timeout;
 869
 870        WARN_ON(host->cmd);
 871
 872        /* Wait max 10 ms */
 873        timeout = 10;
 874
 875        mask = SDHCI_CMD_INHIBIT;
 876        if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
 877                mask |= SDHCI_DATA_INHIBIT;
 878
 879        /* We shouldn't wait for data inihibit for stop commands, even
 880           though they might use busy signaling */
 881        if (host->mrq->data && (cmd == host->mrq->data->stop))
 882                mask &= ~SDHCI_DATA_INHIBIT;
 883
 884        while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
 885                if (timeout == 0) {
 886                        printk(KERN_ERR "%s: Controller never released "
 887                                "inhibit bit(s).\n", mmc_hostname(host->mmc));
 888                        sdhci_dumpregs(host);
 889                        cmd->error = -EIO;
 890                        tasklet_schedule(&host->finish_tasklet);
 891                        return;
 892                }
 893                timeout--;
 894                mdelay(1);
 895        }
 896
 897        mod_timer(&host->timer, jiffies + 10 * HZ);
 898
 899        host->cmd = cmd;
 900
 901        sdhci_prepare_data(host, cmd->data);
 902
 903        sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
 904
 905        sdhci_set_transfer_mode(host, cmd->data);
 906
 907        if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
 908                printk(KERN_ERR "%s: Unsupported response type!\n",
 909                        mmc_hostname(host->mmc));
 910                cmd->error = -EINVAL;
 911                tasklet_schedule(&host->finish_tasklet);
 912                return;
 913        }
 914
 915        if (!(cmd->flags & MMC_RSP_PRESENT))
 916                flags = SDHCI_CMD_RESP_NONE;
 917        else if (cmd->flags & MMC_RSP_136)
 918                flags = SDHCI_CMD_RESP_LONG;
 919        else if (cmd->flags & MMC_RSP_BUSY)
 920                flags = SDHCI_CMD_RESP_SHORT_BUSY;
 921        else
 922                flags = SDHCI_CMD_RESP_SHORT;
 923
 924        if (cmd->flags & MMC_RSP_CRC)
 925                flags |= SDHCI_CMD_CRC;
 926        if (cmd->flags & MMC_RSP_OPCODE)
 927                flags |= SDHCI_CMD_INDEX;
 928        if (cmd->data)
 929                flags |= SDHCI_CMD_DATA;
 930
 931        sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
 932}
 933
 934static void sdhci_finish_command(struct sdhci_host *host)
 935{
 936        int i;
 937
 938        BUG_ON(host->cmd == NULL);
 939
 940        if (host->cmd->flags & MMC_RSP_PRESENT) {
 941                if (host->cmd->flags & MMC_RSP_136) {
 942                        /* CRC is stripped so we need to do some shifting. */
 943                        for (i = 0;i < 4;i++) {
 944                                host->cmd->resp[i] = sdhci_readl(host,
 945                                        SDHCI_RESPONSE + (3-i)*4) << 8;
 946                                if (i != 3)
 947                                        host->cmd->resp[i] |=
 948                                                sdhci_readb(host,
 949                                                SDHCI_RESPONSE + (3-i)*4-1);
 950                        }
 951                } else {
 952                        host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
 953                }
 954        }
 955
 956        host->cmd->error = 0;
 957
 958        if (host->data && host->data_early)
 959                sdhci_finish_data(host);
 960
 961        if (!host->cmd->data)
 962                tasklet_schedule(&host->finish_tasklet);
 963
 964        host->cmd = NULL;
 965}
 966
 967static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 968{
 969        int div;
 970        u16 clk;
 971        unsigned long timeout;
 972
 973        if (clock == host->clock)
 974                return;
 975
 976        if (host->ops->set_clock) {
 977                host->ops->set_clock(host, clock);
 978                if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
 979                        return;
 980        }
 981
 982        sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
 983
 984        if (clock == 0)
 985                goto out;
 986
 987        for (div = 1;div < 256;div *= 2) {
 988                if ((host->max_clk / div) <= clock)
 989                        break;
 990        }
 991        div >>= 1;
 992
 993        clk = div << SDHCI_DIVIDER_SHIFT;
 994        clk |= SDHCI_CLOCK_INT_EN;
 995        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 996
 997        /* Wait max 20 ms */
 998        timeout = 20;
 999        while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1000                & SDHCI_CLOCK_INT_STABLE)) {
1001                if (timeout == 0) {
1002                        printk(KERN_ERR "%s: Internal clock never "
1003                                "stabilised.\n", mmc_hostname(host->mmc));
1004                        sdhci_dumpregs(host);
1005                        return;
1006                }
1007                timeout--;
1008                mdelay(1);
1009        }
1010
1011        clk |= SDHCI_CLOCK_CARD_EN;
1012        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1013
1014out:
1015        host->clock = clock;
1016}
1017
1018static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1019{
1020        u8 pwr;
1021
1022        if (power == (unsigned short)-1)
1023                pwr = 0;
1024        else {
1025                switch (1 << power) {
1026                case MMC_VDD_165_195:
1027                        pwr = SDHCI_POWER_180;
1028                        break;
1029                case MMC_VDD_29_30:
1030                case MMC_VDD_30_31:
1031                        pwr = SDHCI_POWER_300;
1032                        break;
1033                case MMC_VDD_32_33:
1034                case MMC_VDD_33_34:
1035                        pwr = SDHCI_POWER_330;
1036                        break;
1037                default:
1038                        BUG();
1039                }
1040        }
1041
1042        if (host->pwr == pwr)
1043                return;
1044
1045        host->pwr = pwr;
1046
1047        if (pwr == 0) {
1048                sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1049                return;
1050        }
1051
1052        /*
1053         * Spec says that we should clear the power reg before setting
1054         * a new value. Some controllers don't seem to like this though.
1055         */
1056        if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1057                sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1058
1059        /*
1060         * At least the Marvell CaFe chip gets confused if we set the voltage
1061         * and set turn on power at the same time, so set the voltage first.
1062         */
1063        if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1064                sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1065
1066        pwr |= SDHCI_POWER_ON;
1067
1068        sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1069
1070        /*
1071         * Some controllers need an extra 10ms delay of 10ms before they
1072         * can apply clock after applying power
1073         */
1074        if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1075                mdelay(10);
1076}
1077
1078/*****************************************************************************\
1079 *                                                                           *
1080 * MMC callbacks                                                             *
1081 *                                                                           *
1082\*****************************************************************************/
1083
1084static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1085{
1086        struct sdhci_host *host;
1087        bool present;
1088        unsigned long flags;
1089
1090        host = mmc_priv(mmc);
1091
1092        spin_lock_irqsave(&host->lock, flags);
1093
1094        WARN_ON(host->mrq != NULL);
1095
1096#ifndef SDHCI_USE_LEDS_CLASS
1097        sdhci_activate_led(host);
1098#endif
1099
1100        host->mrq = mrq;
1101
1102        /* If polling, assume that the card is always present. */
1103        if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1104                present = true;
1105        else
1106                present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1107                                SDHCI_CARD_PRESENT;
1108
1109        if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1110                host->mrq->cmd->error = -ENOMEDIUM;
1111                tasklet_schedule(&host->finish_tasklet);
1112        } else
1113                sdhci_send_command(host, mrq->cmd);
1114
1115        mmiowb();
1116        spin_unlock_irqrestore(&host->lock, flags);
1117}
1118
1119static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1120{
1121        struct sdhci_host *host;
1122        unsigned long flags;
1123        u8 ctrl;
1124
1125        host = mmc_priv(mmc);
1126
1127        spin_lock_irqsave(&host->lock, flags);
1128
1129        if (host->flags & SDHCI_DEVICE_DEAD)
1130                goto out;
1131
1132        /*
1133         * Reset the chip on each power off.
1134         * Should clear out any weird states.
1135         */
1136        if (ios->power_mode == MMC_POWER_OFF) {
1137                sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1138                sdhci_reinit(host);
1139        }
1140
1141        sdhci_set_clock(host, ios->clock);
1142
1143        if (ios->power_mode == MMC_POWER_OFF)
1144                sdhci_set_power(host, -1);
1145        else
1146                sdhci_set_power(host, ios->vdd);
1147
1148        ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1149
1150        if (ios->bus_width == MMC_BUS_WIDTH_4)
1151                ctrl |= SDHCI_CTRL_4BITBUS;
1152        else
1153                ctrl &= ~SDHCI_CTRL_4BITBUS;
1154
1155        if (ios->timing == MMC_TIMING_SD_HS)
1156                ctrl |= SDHCI_CTRL_HISPD;
1157        else
1158                ctrl &= ~SDHCI_CTRL_HISPD;
1159
1160        sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1161
1162        /*
1163         * Some (ENE) controllers go apeshit on some ios operation,
1164         * signalling timeout and CRC errors even on CMD0. Resetting
1165         * it on each ios seems to solve the problem.
1166         */
1167        if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1168                sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1169
1170out:
1171        mmiowb();
1172        spin_unlock_irqrestore(&host->lock, flags);
1173}
1174
1175static int sdhci_get_ro(struct mmc_host *mmc)
1176{
1177        struct sdhci_host *host;
1178        unsigned long flags;
1179        int present;
1180
1181        host = mmc_priv(mmc);
1182
1183        spin_lock_irqsave(&host->lock, flags);
1184
1185        if (host->flags & SDHCI_DEVICE_DEAD)
1186                present = 0;
1187        else
1188                present = sdhci_readl(host, SDHCI_PRESENT_STATE);
1189
1190        spin_unlock_irqrestore(&host->lock, flags);
1191
1192        if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
1193                return !!(present & SDHCI_WRITE_PROTECT);
1194        return !(present & SDHCI_WRITE_PROTECT);
1195}
1196
1197static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1198{
1199        struct sdhci_host *host;
1200        unsigned long flags;
1201
1202        host = mmc_priv(mmc);
1203
1204        spin_lock_irqsave(&host->lock, flags);
1205
1206        if (host->flags & SDHCI_DEVICE_DEAD)
1207                goto out;
1208
1209        if (enable)
1210                sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1211        else
1212                sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1213out:
1214        mmiowb();
1215
1216        spin_unlock_irqrestore(&host->lock, flags);
1217}
1218
1219static const struct mmc_host_ops sdhci_ops = {
1220        .request        = sdhci_request,
1221        .set_ios        = sdhci_set_ios,
1222        .get_ro         = sdhci_get_ro,
1223        .enable_sdio_irq = sdhci_enable_sdio_irq,
1224};
1225
1226/*****************************************************************************\
1227 *                                                                           *
1228 * Tasklets                                                                  *
1229 *                                                                           *
1230\*****************************************************************************/
1231
1232static void sdhci_tasklet_card(unsigned long param)
1233{
1234        struct sdhci_host *host;
1235        unsigned long flags;
1236
1237        host = (struct sdhci_host*)param;
1238
1239        spin_lock_irqsave(&host->lock, flags);
1240
1241        if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1242                if (host->mrq) {
1243                        printk(KERN_ERR "%s: Card removed during transfer!\n",
1244                                mmc_hostname(host->mmc));
1245                        printk(KERN_ERR "%s: Resetting controller.\n",
1246                                mmc_hostname(host->mmc));
1247
1248                        sdhci_reset(host, SDHCI_RESET_CMD);
1249                        sdhci_reset(host, SDHCI_RESET_DATA);
1250
1251                        host->mrq->cmd->error = -ENOMEDIUM;
1252                        tasklet_schedule(&host->finish_tasklet);
1253                }
1254        }
1255
1256        spin_unlock_irqrestore(&host->lock, flags);
1257
1258        mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1259}
1260
1261static void sdhci_tasklet_finish(unsigned long param)
1262{
1263        struct sdhci_host *host;
1264        unsigned long flags;
1265        struct mmc_request *mrq;
1266
1267        host = (struct sdhci_host*)param;
1268
1269        spin_lock_irqsave(&host->lock, flags);
1270
1271        del_timer(&host->timer);
1272
1273        mrq = host->mrq;
1274
1275        /*
1276         * The controller needs a reset of internal state machines
1277         * upon error conditions.
1278         */
1279        if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1280                (mrq->cmd->error ||
1281                 (mrq->data && (mrq->data->error ||
1282                  (mrq->data->stop && mrq->data->stop->error))) ||
1283                   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1284
1285                /* Some controllers need this kick or reset won't work here */
1286                if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1287                        unsigned int clock;
1288
1289                        /* This is to force an update */
1290                        clock = host->clock;
1291                        host->clock = 0;
1292                        sdhci_set_clock(host, clock);
1293                }
1294
1295                /* Spec says we should do both at the same time, but Ricoh
1296                   controllers do not like that. */
1297                sdhci_reset(host, SDHCI_RESET_CMD);
1298                sdhci_reset(host, SDHCI_RESET_DATA);
1299        }
1300
1301        host->mrq = NULL;
1302        host->cmd = NULL;
1303        host->data = NULL;
1304
1305#ifndef SDHCI_USE_LEDS_CLASS
1306        sdhci_deactivate_led(host);
1307#endif
1308
1309        mmiowb();
1310        spin_unlock_irqrestore(&host->lock, flags);
1311
1312        mmc_request_done(host->mmc, mrq);
1313}
1314
1315static void sdhci_timeout_timer(unsigned long data)
1316{
1317        struct sdhci_host *host;
1318        unsigned long flags;
1319
1320        host = (struct sdhci_host*)data;
1321
1322        spin_lock_irqsave(&host->lock, flags);
1323
1324        if (host->mrq) {
1325                printk(KERN_ERR "%s: Timeout waiting for hardware "
1326                        "interrupt.\n", mmc_hostname(host->mmc));
1327                sdhci_dumpregs(host);
1328
1329                if (host->data) {
1330                        host->data->error = -ETIMEDOUT;
1331                        sdhci_finish_data(host);
1332                } else {
1333                        if (host->cmd)
1334                                host->cmd->error = -ETIMEDOUT;
1335                        else
1336                                host->mrq->cmd->error = -ETIMEDOUT;
1337
1338                        tasklet_schedule(&host->finish_tasklet);
1339                }
1340        }
1341
1342        mmiowb();
1343        spin_unlock_irqrestore(&host->lock, flags);
1344}
1345
1346/*****************************************************************************\
1347 *                                                                           *
1348 * Interrupt handling                                                        *
1349 *                                                                           *
1350\*****************************************************************************/
1351
1352static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1353{
1354        BUG_ON(intmask == 0);
1355
1356        if (!host->cmd) {
1357                printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1358                        "though no command operation was in progress.\n",
1359                        mmc_hostname(host->mmc), (unsigned)intmask);
1360                sdhci_dumpregs(host);
1361                return;
1362        }
1363
1364        if (intmask & SDHCI_INT_TIMEOUT)
1365                host->cmd->error = -ETIMEDOUT;
1366        else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1367                        SDHCI_INT_INDEX))
1368                host->cmd->error = -EILSEQ;
1369
1370        if (host->cmd->error) {
1371                tasklet_schedule(&host->finish_tasklet);
1372                return;
1373        }
1374
1375        /*
1376         * The host can send and interrupt when the busy state has
1377         * ended, allowing us to wait without wasting CPU cycles.
1378         * Unfortunately this is overloaded on the "data complete"
1379         * interrupt, so we need to take some care when handling
1380         * it.
1381         *
1382         * Note: The 1.0 specification is a bit ambiguous about this
1383         *       feature so there might be some problems with older
1384         *       controllers.
1385         */
1386        if (host->cmd->flags & MMC_RSP_BUSY) {
1387                if (host->cmd->data)
1388                        DBG("Cannot wait for busy signal when also "
1389                                "doing a data transfer");
1390                else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1391                        return;
1392
1393                /* The controller does not support the end-of-busy IRQ,
1394                 * fall through and take the SDHCI_INT_RESPONSE */
1395        }
1396
1397        if (intmask & SDHCI_INT_RESPONSE)
1398                sdhci_finish_command(host);
1399}
1400
1401#ifdef DEBUG
1402static void sdhci_show_adma_error(struct sdhci_host *host)
1403{
1404        const char *name = mmc_hostname(host->mmc);
1405        u8 *desc = host->adma_desc;
1406        __le32 *dma;
1407        __le16 *len;
1408        u8 attr;
1409
1410        sdhci_dumpregs(host);
1411
1412        while (true) {
1413                dma = (__le32 *)(desc + 4);
1414                len = (__le16 *)(desc + 2);
1415                attr = *desc;
1416
1417                DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1418                    name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1419
1420                desc += 8;
1421
1422                if (attr & 2)
1423                        break;
1424        }
1425}
1426#else
1427static void sdhci_show_adma_error(struct sdhci_host *host) { }
1428#endif
1429
1430static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1431{
1432        BUG_ON(intmask == 0);
1433
1434        if (!host->data) {
1435                /*
1436                 * The "data complete" interrupt is also used to
1437                 * indicate that a busy state has ended. See comment
1438                 * above in sdhci_cmd_irq().
1439                 */
1440                if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1441                        if (intmask & SDHCI_INT_DATA_END) {
1442                                sdhci_finish_command(host);
1443                                return;
1444                        }
1445                }
1446
1447                printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1448                        "though no data operation was in progress.\n",
1449                        mmc_hostname(host->mmc), (unsigned)intmask);
1450                sdhci_dumpregs(host);
1451
1452                return;
1453        }
1454
1455        if (intmask & SDHCI_INT_DATA_TIMEOUT)
1456                host->data->error = -ETIMEDOUT;
1457        else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1458                host->data->error = -EILSEQ;
1459        else if (intmask & SDHCI_INT_ADMA_ERROR) {
1460                printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1461                sdhci_show_adma_error(host);
1462                host->data->error = -EIO;
1463        }
1464
1465        if (host->data->error)
1466                sdhci_finish_data(host);
1467        else {
1468                if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1469                        sdhci_transfer_pio(host);
1470
1471                /*
1472                 * We currently don't do anything fancy with DMA
1473                 * boundaries, but as we can't disable the feature
1474                 * we need to at least restart the transfer.
1475                 */
1476                if (intmask & SDHCI_INT_DMA_END)
1477                        sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
1478                                SDHCI_DMA_ADDRESS);
1479
1480                if (intmask & SDHCI_INT_DATA_END) {
1481                        if (host->cmd) {
1482                                /*
1483                                 * Data managed to finish before the
1484                                 * command completed. Make sure we do
1485                                 * things in the proper order.
1486                                 */
1487                                host->data_early = 1;
1488                        } else {
1489                                sdhci_finish_data(host);
1490                        }
1491                }
1492        }
1493}
1494
1495static irqreturn_t sdhci_irq(int irq, void *dev_id)
1496{
1497        irqreturn_t result;
1498        struct sdhci_host* host = dev_id;
1499        u32 intmask;
1500        int cardint = 0;
1501
1502        spin_lock(&host->lock);
1503
1504        intmask = sdhci_readl(host, SDHCI_INT_STATUS);
1505
1506        if (!intmask || intmask == 0xffffffff) {
1507                result = IRQ_NONE;
1508                goto out;
1509        }
1510
1511        DBG("*** %s got interrupt: 0x%08x\n",
1512                mmc_hostname(host->mmc), intmask);
1513
1514        if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1515                sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1516                        SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
1517                tasklet_schedule(&host->card_tasklet);
1518        }
1519
1520        intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1521
1522        if (intmask & SDHCI_INT_CMD_MASK) {
1523                sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1524                        SDHCI_INT_STATUS);
1525                sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1526        }
1527
1528        if (intmask & SDHCI_INT_DATA_MASK) {
1529                sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1530                        SDHCI_INT_STATUS);
1531                sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1532        }
1533
1534        intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1535
1536        intmask &= ~SDHCI_INT_ERROR;
1537
1538        if (intmask & SDHCI_INT_BUS_POWER) {
1539                printk(KERN_ERR "%s: Card is consuming too much power!\n",
1540                        mmc_hostname(host->mmc));
1541                sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
1542        }
1543
1544        intmask &= ~SDHCI_INT_BUS_POWER;
1545
1546        if (intmask & SDHCI_INT_CARD_INT)
1547                cardint = 1;
1548
1549        intmask &= ~SDHCI_INT_CARD_INT;
1550
1551        if (intmask) {
1552                printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1553                        mmc_hostname(host->mmc), intmask);
1554                sdhci_dumpregs(host);
1555
1556                sdhci_writel(host, intmask, SDHCI_INT_STATUS);
1557        }
1558
1559        result = IRQ_HANDLED;
1560
1561        mmiowb();
1562out:
1563        spin_unlock(&host->lock);
1564
1565        /*
1566         * We have to delay this as it calls back into the driver.
1567         */
1568        if (cardint)
1569                mmc_signal_sdio_irq(host->mmc);
1570
1571        return result;
1572}
1573
1574/*****************************************************************************\
1575 *                                                                           *
1576 * Suspend/resume                                                            *
1577 *                                                                           *
1578\*****************************************************************************/
1579
1580#ifdef CONFIG_PM
1581
1582int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1583{
1584        int ret;
1585
1586        sdhci_disable_card_detection(host);
1587
1588        ret = mmc_suspend_host(host->mmc, state);
1589        if (ret)
1590                return ret;
1591
1592        free_irq(host->irq, host);
1593
1594        return 0;
1595}
1596
1597EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1598
1599int sdhci_resume_host(struct sdhci_host *host)
1600{
1601        int ret;
1602
1603        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1604                if (host->ops->enable_dma)
1605                        host->ops->enable_dma(host);
1606        }
1607
1608        ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1609                          mmc_hostname(host->mmc), host);
1610        if (ret)
1611                return ret;
1612
1613        sdhci_init(host);
1614        mmiowb();
1615
1616        ret = mmc_resume_host(host->mmc);
1617        if (ret)
1618                return ret;
1619
1620        sdhci_enable_card_detection(host);
1621
1622        return 0;
1623}
1624
1625EXPORT_SYMBOL_GPL(sdhci_resume_host);
1626
1627#endif /* CONFIG_PM */
1628
1629/*****************************************************************************\
1630 *                                                                           *
1631 * Device allocation/registration                                            *
1632 *                                                                           *
1633\*****************************************************************************/
1634
1635struct sdhci_host *sdhci_alloc_host(struct device *dev,
1636        size_t priv_size)
1637{
1638        struct mmc_host *mmc;
1639        struct sdhci_host *host;
1640
1641        WARN_ON(dev == NULL);
1642
1643        mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1644        if (!mmc)
1645                return ERR_PTR(-ENOMEM);
1646
1647        host = mmc_priv(mmc);
1648        host->mmc = mmc;
1649
1650        return host;
1651}
1652
1653EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1654
1655int sdhci_add_host(struct sdhci_host *host)
1656{
1657        struct mmc_host *mmc;
1658        unsigned int caps;
1659        int ret;
1660
1661        WARN_ON(host == NULL);
1662        if (host == NULL)
1663                return -EINVAL;
1664
1665        mmc = host->mmc;
1666
1667        if (debug_quirks)
1668                host->quirks = debug_quirks;
1669
1670        sdhci_reset(host, SDHCI_RESET_ALL);
1671
1672        host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1673        host->version = (host->version & SDHCI_SPEC_VER_MASK)
1674                                >> SDHCI_SPEC_VER_SHIFT;
1675        if (host->version > SDHCI_SPEC_200) {
1676                printk(KERN_ERR "%s: Unknown controller version (%d). "
1677                        "You may experience problems.\n", mmc_hostname(mmc),
1678                        host->version);
1679        }
1680
1681        caps = sdhci_readl(host, SDHCI_CAPABILITIES);
1682
1683        if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1684                host->flags |= SDHCI_USE_SDMA;
1685        else if (!(caps & SDHCI_CAN_DO_SDMA))
1686                DBG("Controller doesn't have SDMA capability\n");
1687        else
1688                host->flags |= SDHCI_USE_SDMA;
1689
1690        if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1691                (host->flags & SDHCI_USE_SDMA)) {
1692                DBG("Disabling DMA as it is marked broken\n");
1693                host->flags &= ~SDHCI_USE_SDMA;
1694        }
1695
1696        if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
1697                host->flags |= SDHCI_USE_ADMA;
1698
1699        if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1700                (host->flags & SDHCI_USE_ADMA)) {
1701                DBG("Disabling ADMA as it is marked broken\n");
1702                host->flags &= ~SDHCI_USE_ADMA;
1703        }
1704
1705        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1706                if (host->ops->enable_dma) {
1707                        if (host->ops->enable_dma(host)) {
1708                                printk(KERN_WARNING "%s: No suitable DMA "
1709                                        "available. Falling back to PIO.\n",
1710                                        mmc_hostname(mmc));
1711                                host->flags &=
1712                                        ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
1713                        }
1714                }
1715        }
1716
1717        if (host->flags & SDHCI_USE_ADMA) {
1718                /*
1719                 * We need to allocate descriptors for all sg entries
1720                 * (128) and potentially one alignment transfer for
1721                 * each of those entries.
1722                 */
1723                host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1724                host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1725                if (!host->adma_desc || !host->align_buffer) {
1726                        kfree(host->adma_desc);
1727                        kfree(host->align_buffer);
1728                        printk(KERN_WARNING "%s: Unable to allocate ADMA "
1729                                "buffers. Falling back to standard DMA.\n",
1730                                mmc_hostname(mmc));
1731                        host->flags &= ~SDHCI_USE_ADMA;
1732                }
1733        }
1734
1735        /*
1736         * If we use DMA, then it's up to the caller to set the DMA
1737         * mask, but PIO does not need the hw shim so we set a new
1738         * mask here in that case.
1739         */
1740        if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
1741                host->dma_mask = DMA_BIT_MASK(64);
1742                mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1743        }
1744
1745        host->max_clk =
1746                (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1747        host->max_clk *= 1000000;
1748        if (host->max_clk == 0) {
1749                if (!host->ops->get_max_clock) {
1750                        printk(KERN_ERR
1751                               "%s: Hardware doesn't specify base clock "
1752                               "frequency.\n", mmc_hostname(mmc));
1753                        return -ENODEV;
1754                }
1755                host->max_clk = host->ops->get_max_clock(host);
1756        }
1757
1758        host->timeout_clk =
1759                (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1760        if (host->timeout_clk == 0) {
1761                if (host->ops->get_timeout_clock) {
1762                        host->timeout_clk = host->ops->get_timeout_clock(host);
1763                } else if (!(host->quirks &
1764                                SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
1765                        printk(KERN_ERR
1766                               "%s: Hardware doesn't specify timeout clock "
1767                               "frequency.\n", mmc_hostname(mmc));
1768                        return -ENODEV;
1769                }
1770        }
1771        if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1772                host->timeout_clk *= 1000;
1773
1774        /*
1775         * Set host parameters.
1776         */
1777        mmc->ops = &sdhci_ops;
1778        if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
1779                        host->ops->set_clock && host->ops->get_min_clock)
1780                mmc->f_min = host->ops->get_min_clock(host);
1781        else
1782                mmc->f_min = host->max_clk / 256;
1783        mmc->f_max = host->max_clk;
1784        mmc->caps = MMC_CAP_SDIO_IRQ;
1785
1786        if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1787                mmc->caps |= MMC_CAP_4_BIT_DATA;
1788
1789        if (caps & SDHCI_CAN_DO_HISPD)
1790                mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1791
1792        if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1793                mmc->caps |= MMC_CAP_NEEDS_POLL;
1794
1795        mmc->ocr_avail = 0;
1796        if (caps & SDHCI_CAN_VDD_330)
1797                mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1798        if (caps & SDHCI_CAN_VDD_300)
1799                mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1800        if (caps & SDHCI_CAN_VDD_180)
1801                mmc->ocr_avail |= MMC_VDD_165_195;
1802
1803        if (mmc->ocr_avail == 0) {
1804                printk(KERN_ERR "%s: Hardware doesn't report any "
1805                        "support voltages.\n", mmc_hostname(mmc));
1806                return -ENODEV;
1807        }
1808
1809        spin_lock_init(&host->lock);
1810
1811        /*
1812         * Maximum number of segments. Depends on if the hardware
1813         * can do scatter/gather or not.
1814         */
1815        if (host->flags & SDHCI_USE_ADMA)
1816                mmc->max_hw_segs = 128;
1817        else if (host->flags & SDHCI_USE_SDMA)
1818                mmc->max_hw_segs = 1;
1819        else /* PIO */
1820                mmc->max_hw_segs = 128;
1821        mmc->max_phys_segs = 128;
1822
1823        /*
1824         * Maximum number of sectors in one transfer. Limited by DMA boundary
1825         * size (512KiB).
1826         */
1827        mmc->max_req_size = 524288;
1828
1829        /*
1830         * Maximum segment size. Could be one segment with the maximum number
1831         * of bytes. When doing hardware scatter/gather, each entry cannot
1832         * be larger than 64 KiB though.
1833         */
1834        if (host->flags & SDHCI_USE_ADMA)
1835                mmc->max_seg_size = 65536;
1836        else
1837                mmc->max_seg_size = mmc->max_req_size;
1838
1839        /*
1840         * Maximum block size. This varies from controller to controller and
1841         * is specified in the capabilities register.
1842         */
1843        if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1844                mmc->max_blk_size = 2;
1845        } else {
1846                mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
1847                                SDHCI_MAX_BLOCK_SHIFT;
1848                if (mmc->max_blk_size >= 3) {
1849                        printk(KERN_WARNING "%s: Invalid maximum block size, "
1850                                "assuming 512 bytes\n", mmc_hostname(mmc));
1851                        mmc->max_blk_size = 0;
1852                }
1853        }
1854
1855        mmc->max_blk_size = 512 << mmc->max_blk_size;
1856
1857        /*
1858         * Maximum block count.
1859         */
1860        mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
1861
1862        /*
1863         * Init tasklets.
1864         */
1865        tasklet_init(&host->card_tasklet,
1866                sdhci_tasklet_card, (unsigned long)host);
1867        tasklet_init(&host->finish_tasklet,
1868                sdhci_tasklet_finish, (unsigned long)host);
1869
1870        setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1871
1872        ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1873                mmc_hostname(mmc), host);
1874        if (ret)
1875                goto untasklet;
1876
1877        sdhci_init(host);
1878
1879#ifdef CONFIG_MMC_DEBUG
1880        sdhci_dumpregs(host);
1881#endif
1882
1883#ifdef SDHCI_USE_LEDS_CLASS
1884        snprintf(host->led_name, sizeof(host->led_name),
1885                "%s::", mmc_hostname(mmc));
1886        host->led.name = host->led_name;
1887        host->led.brightness = LED_OFF;
1888        host->led.default_trigger = mmc_hostname(mmc);
1889        host->led.brightness_set = sdhci_led_control;
1890
1891        ret = led_classdev_register(mmc_dev(mmc), &host->led);
1892        if (ret)
1893                goto reset;
1894#endif
1895
1896        mmiowb();
1897
1898        mmc_add_host(mmc);
1899
1900        printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
1901                mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1902                (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
1903                (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
1904
1905        sdhci_enable_card_detection(host);
1906
1907        return 0;
1908
1909#ifdef SDHCI_USE_LEDS_CLASS
1910reset:
1911        sdhci_reset(host, SDHCI_RESET_ALL);
1912        free_irq(host->irq, host);
1913#endif
1914untasklet:
1915        tasklet_kill(&host->card_tasklet);
1916        tasklet_kill(&host->finish_tasklet);
1917
1918        return ret;
1919}
1920
1921EXPORT_SYMBOL_GPL(sdhci_add_host);
1922
1923void sdhci_remove_host(struct sdhci_host *host, int dead)
1924{
1925        unsigned long flags;
1926
1927        if (dead) {
1928                spin_lock_irqsave(&host->lock, flags);
1929
1930                host->flags |= SDHCI_DEVICE_DEAD;
1931
1932                if (host->mrq) {
1933                        printk(KERN_ERR "%s: Controller removed during "
1934                                " transfer!\n", mmc_hostname(host->mmc));
1935
1936                        host->mrq->cmd->error = -ENOMEDIUM;
1937                        tasklet_schedule(&host->finish_tasklet);
1938                }
1939
1940                spin_unlock_irqrestore(&host->lock, flags);
1941        }
1942
1943        sdhci_disable_card_detection(host);
1944
1945        mmc_remove_host(host->mmc);
1946
1947#ifdef SDHCI_USE_LEDS_CLASS
1948        led_classdev_unregister(&host->led);
1949#endif
1950
1951        if (!dead)
1952                sdhci_reset(host, SDHCI_RESET_ALL);
1953
1954        free_irq(host->irq, host);
1955
1956        del_timer_sync(&host->timer);
1957
1958        tasklet_kill(&host->card_tasklet);
1959        tasklet_kill(&host->finish_tasklet);
1960
1961        kfree(host->adma_desc);
1962        kfree(host->align_buffer);
1963
1964        host->adma_desc = NULL;
1965        host->align_buffer = NULL;
1966}
1967
1968EXPORT_SYMBOL_GPL(sdhci_remove_host);
1969
1970void sdhci_free_host(struct sdhci_host *host)
1971{
1972        mmc_free_host(host->mmc);
1973}
1974
1975EXPORT_SYMBOL_GPL(sdhci_free_host);
1976
1977/*****************************************************************************\
1978 *                                                                           *
1979 * Driver init/exit                                                          *
1980 *                                                                           *
1981\*****************************************************************************/
1982
1983static int __init sdhci_drv_init(void)
1984{
1985        printk(KERN_INFO DRIVER_NAME
1986                ": Secure Digital Host Controller Interface driver\n");
1987        printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1988
1989        return 0;
1990}
1991
1992static void __exit sdhci_drv_exit(void)
1993{
1994}
1995
1996module_init(sdhci_drv_init);
1997module_exit(sdhci_drv_exit);
1998
1999module_param(debug_quirks, uint, 0444);
2000
2001MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2002MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
2003MODULE_LICENSE("GPL");
2004
2005MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
2006