linux/drivers/mmc/host/bfin_sdh.c
<<
>>
Prefs
   1/*
   2 * bfin_sdh.c - Analog Devices Blackfin SDH Controller
   3 *
   4 * Copyright (C) 2007-2009 Analog Device Inc.
   5 *
   6 * Licensed under the GPL-2 or later.
   7 */
   8
   9#define DRIVER_NAME     "bfin-sdh"
  10
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/ioport.h>
  14#include <linux/platform_device.h>
  15#include <linux/delay.h>
  16#include <linux/interrupt.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/mmc/host.h>
  19#include <linux/proc_fs.h>
  20#include <linux/gfp.h>
  21
  22#include <asm/cacheflush.h>
  23#include <asm/dma.h>
  24#include <asm/portmux.h>
  25#include <asm/bfin_sdh.h>
  26
  27#if defined(CONFIG_BF51x) || defined(__ADSPBF60x__)
  28#define bfin_read_SDH_CLK_CTL           bfin_read_RSI_CLK_CTL
  29#define bfin_write_SDH_CLK_CTL          bfin_write_RSI_CLK_CTL
  30#define bfin_write_SDH_ARGUMENT         bfin_write_RSI_ARGUMENT
  31#define bfin_write_SDH_COMMAND          bfin_write_RSI_COMMAND
  32#define bfin_write_SDH_DATA_TIMER       bfin_write_RSI_DATA_TIMER
  33#define bfin_read_SDH_RESPONSE0         bfin_read_RSI_RESPONSE0
  34#define bfin_read_SDH_RESPONSE1         bfin_read_RSI_RESPONSE1
  35#define bfin_read_SDH_RESPONSE2         bfin_read_RSI_RESPONSE2
  36#define bfin_read_SDH_RESPONSE3         bfin_read_RSI_RESPONSE3
  37#define bfin_write_SDH_DATA_LGTH        bfin_write_RSI_DATA_LGTH
  38#define bfin_read_SDH_DATA_CTL          bfin_read_RSI_DATA_CTL
  39#define bfin_write_SDH_DATA_CTL         bfin_write_RSI_DATA_CTL
  40#define bfin_read_SDH_DATA_CNT          bfin_read_RSI_DATA_CNT
  41#define bfin_write_SDH_STATUS_CLR       bfin_write_RSI_STATUS_CLR
  42#define bfin_read_SDH_E_STATUS          bfin_read_RSI_E_STATUS
  43#define bfin_write_SDH_E_STATUS         bfin_write_RSI_E_STATUS
  44#define bfin_read_SDH_STATUS            bfin_read_RSI_STATUS
  45#define bfin_write_SDH_MASK0            bfin_write_RSI_MASK0
  46#define bfin_write_SDH_E_MASK           bfin_write_RSI_E_MASK
  47#define bfin_read_SDH_CFG               bfin_read_RSI_CFG
  48#define bfin_write_SDH_CFG              bfin_write_RSI_CFG
  49# if defined(__ADSPBF60x__)
  50#  define bfin_read_SDH_BLK_SIZE        bfin_read_RSI_BLKSZ
  51#  define bfin_write_SDH_BLK_SIZE       bfin_write_RSI_BLKSZ
  52# else
  53#  define bfin_read_SDH_PWR_CTL         bfin_read_RSI_PWR_CTL
  54#  define bfin_write_SDH_PWR_CTL        bfin_write_RSI_PWR_CTL
  55# endif
  56#endif
  57
  58struct sdh_host {
  59        struct mmc_host         *mmc;
  60        spinlock_t              lock;
  61        struct resource         *res;
  62        void __iomem            *base;
  63        int                     irq;
  64        int                     stat_irq;
  65        int                     dma_ch;
  66        int                     dma_dir;
  67        struct dma_desc_array   *sg_cpu;
  68        dma_addr_t              sg_dma;
  69        int                     dma_len;
  70
  71        unsigned long           sclk;
  72        unsigned int            imask;
  73        unsigned int            power_mode;
  74        unsigned int            clk_div;
  75
  76        struct mmc_request      *mrq;
  77        struct mmc_command      *cmd;
  78        struct mmc_data         *data;
  79};
  80
  81static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
  82{
  83        return pdev->dev.platform_data;
  84}
  85
  86static void sdh_stop_clock(struct sdh_host *host)
  87{
  88        bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
  89        SSYNC();
  90}
  91
  92static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
  93{
  94        unsigned long flags;
  95
  96        spin_lock_irqsave(&host->lock, flags);
  97        host->imask |= mask;
  98        bfin_write_SDH_MASK0(mask);
  99        SSYNC();
 100        spin_unlock_irqrestore(&host->lock, flags);
 101}
 102
 103static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
 104{
 105        unsigned long flags;
 106
 107        spin_lock_irqsave(&host->lock, flags);
 108        host->imask &= ~mask;
 109        bfin_write_SDH_MASK0(host->imask);
 110        SSYNC();
 111        spin_unlock_irqrestore(&host->lock, flags);
 112}
 113
 114static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
 115{
 116        unsigned int length;
 117        unsigned int data_ctl;
 118        unsigned int dma_cfg;
 119        unsigned int cycle_ns, timeout;
 120
 121        dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
 122        host->data = data;
 123        data_ctl = 0;
 124        dma_cfg = 0;
 125
 126        length = data->blksz * data->blocks;
 127        bfin_write_SDH_DATA_LGTH(length);
 128
 129        if (data->flags & MMC_DATA_STREAM)
 130                data_ctl |= DTX_MODE;
 131
 132        if (data->flags & MMC_DATA_READ)
 133                data_ctl |= DTX_DIR;
 134        /* Only supports power-of-2 block size */
 135        if (data->blksz & (data->blksz - 1))
 136                return -EINVAL;
 137#ifndef RSI_BLKSZ
 138        data_ctl |= ((ffs(data->blksz) - 1) << 4);
 139#else
 140        bfin_write_SDH_BLK_SIZE(data->blksz);
 141#endif
 142
 143        bfin_write_SDH_DATA_CTL(data_ctl);
 144        /* the time of a host clock period in ns */
 145        cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1)));
 146        timeout = data->timeout_ns / cycle_ns;
 147        timeout += data->timeout_clks;
 148        bfin_write_SDH_DATA_TIMER(timeout);
 149        SSYNC();
 150
 151        if (data->flags & MMC_DATA_READ) {
 152                host->dma_dir = DMA_FROM_DEVICE;
 153                dma_cfg |= WNR;
 154        } else
 155                host->dma_dir = DMA_TO_DEVICE;
 156
 157        sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
 158        host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
 159#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 160        dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN;
 161# ifdef RSI_BLKSZ
 162        dma_cfg |= PSIZE_32 | NDSIZE_3;
 163# else
 164        dma_cfg |= NDSIZE_5;
 165# endif
 166        {
 167                struct scatterlist *sg;
 168                int i;
 169                for_each_sg(data->sg, sg, host->dma_len, i) {
 170                        host->sg_cpu[i].start_addr = sg_dma_address(sg);
 171                        host->sg_cpu[i].cfg = dma_cfg;
 172                        host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
 173                        host->sg_cpu[i].x_modify = 4;
 174                        dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
 175                                "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 176                                i, host->sg_cpu[i].start_addr,
 177                                host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
 178                                host->sg_cpu[i].x_modify);
 179                }
 180        }
 181        flush_dcache_range((unsigned int)host->sg_cpu,
 182                (unsigned int)host->sg_cpu +
 183                        host->dma_len * sizeof(struct dma_desc_array));
 184        /* Set the last descriptor to stop mode */
 185        host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
 186        host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
 187
 188        set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
 189        set_dma_x_count(host->dma_ch, 0);
 190        set_dma_x_modify(host->dma_ch, 0);
 191        SSYNC();
 192        set_dma_config(host->dma_ch, dma_cfg);
 193#elif defined(CONFIG_BF51x)
 194        /* RSI DMA doesn't work in array mode */
 195        dma_cfg |= WDSIZE_32 | DMAEN;
 196        set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
 197        set_dma_x_count(host->dma_ch, length / 4);
 198        set_dma_x_modify(host->dma_ch, 4);
 199        SSYNC();
 200        set_dma_config(host->dma_ch, dma_cfg);
 201#endif
 202        bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
 203
 204        SSYNC();
 205
 206        dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
 207        return 0;
 208}
 209
 210static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
 211{
 212        unsigned int sdh_cmd;
 213        unsigned int stat_mask;
 214
 215        dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
 216        WARN_ON(host->cmd != NULL);
 217        host->cmd = cmd;
 218
 219        sdh_cmd = 0;
 220        stat_mask = 0;
 221
 222        sdh_cmd |= cmd->opcode;
 223
 224        if (cmd->flags & MMC_RSP_PRESENT) {
 225                sdh_cmd |= CMD_RSP;
 226                stat_mask |= CMD_RESP_END;
 227        } else {
 228                stat_mask |= CMD_SENT;
 229        }
 230
 231        if (cmd->flags & MMC_RSP_136)
 232                sdh_cmd |= CMD_L_RSP;
 233
 234        stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
 235
 236        sdh_enable_stat_irq(host, stat_mask);
 237
 238        bfin_write_SDH_ARGUMENT(cmd->arg);
 239        bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
 240        bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
 241        SSYNC();
 242}
 243
 244static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
 245{
 246        dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
 247        host->mrq = NULL;
 248        host->cmd = NULL;
 249        host->data = NULL;
 250        mmc_request_done(host->mmc, mrq);
 251}
 252
 253static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
 254{
 255        struct mmc_command *cmd = host->cmd;
 256        int ret = 0;
 257
 258        dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
 259        if (!cmd)
 260                return 0;
 261
 262        host->cmd = NULL;
 263
 264        if (cmd->flags & MMC_RSP_PRESENT) {
 265                cmd->resp[0] = bfin_read_SDH_RESPONSE0();
 266                if (cmd->flags & MMC_RSP_136) {
 267                        cmd->resp[1] = bfin_read_SDH_RESPONSE1();
 268                        cmd->resp[2] = bfin_read_SDH_RESPONSE2();
 269                        cmd->resp[3] = bfin_read_SDH_RESPONSE3();
 270                }
 271        }
 272        if (stat & CMD_TIME_OUT)
 273                cmd->error = -ETIMEDOUT;
 274        else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
 275                cmd->error = -EILSEQ;
 276
 277        sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
 278
 279        if (host->data && !cmd->error) {
 280                if (host->data->flags & MMC_DATA_WRITE) {
 281                        ret = sdh_setup_data(host, host->data);
 282                        if (ret)
 283                                return 0;
 284                }
 285
 286                sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
 287        } else
 288                sdh_finish_request(host, host->mrq);
 289
 290        return 1;
 291}
 292
 293static int sdh_data_done(struct sdh_host *host, unsigned int stat)
 294{
 295        struct mmc_data *data = host->data;
 296
 297        dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
 298        if (!data)
 299                return 0;
 300
 301        disable_dma(host->dma_ch);
 302        dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
 303                     host->dma_dir);
 304
 305        if (stat & DAT_TIME_OUT)
 306                data->error = -ETIMEDOUT;
 307        else if (stat & DAT_CRC_FAIL)
 308                data->error = -EILSEQ;
 309        else if (stat & (RX_OVERRUN | TX_UNDERRUN))
 310                data->error = -EIO;
 311
 312        if (!data->error)
 313                data->bytes_xfered = data->blocks * data->blksz;
 314        else
 315                data->bytes_xfered = 0;
 316
 317        bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
 318                        DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
 319        bfin_write_SDH_DATA_CTL(0);
 320        SSYNC();
 321
 322        host->data = NULL;
 323        if (host->mrq->stop) {
 324                sdh_stop_clock(host);
 325                sdh_start_cmd(host, host->mrq->stop);
 326        } else {
 327                sdh_finish_request(host, host->mrq);
 328        }
 329
 330        return 1;
 331}
 332
 333static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
 334{
 335        struct sdh_host *host = mmc_priv(mmc);
 336        int ret = 0;
 337
 338        dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
 339        WARN_ON(host->mrq != NULL);
 340
 341        spin_lock(&host->lock);
 342        host->mrq = mrq;
 343        host->data = mrq->data;
 344
 345        if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
 346                ret = sdh_setup_data(host, mrq->data);
 347                if (ret)
 348                        goto data_err;
 349        }
 350
 351        sdh_start_cmd(host, mrq->cmd);
 352data_err:
 353        spin_unlock(&host->lock);
 354}
 355
 356static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 357{
 358        struct sdh_host *host;
 359        u16 clk_ctl = 0;
 360#ifndef RSI_BLKSZ
 361        u16 pwr_ctl = 0;
 362#endif
 363        u16 cfg;
 364        host = mmc_priv(mmc);
 365
 366        spin_lock(&host->lock);
 367
 368        cfg = bfin_read_SDH_CFG();
 369        cfg |= MWE;
 370        switch (ios->bus_width) {
 371        case MMC_BUS_WIDTH_4:
 372#ifndef RSI_BLKSZ
 373                cfg &= ~PD_SDDAT3;
 374#endif
 375                cfg |= PUP_SDDAT3;
 376                /* Enable 4 bit SDIO */
 377                cfg |= SD4E;
 378                clk_ctl |= WIDE_BUS_4;
 379                break;
 380        case MMC_BUS_WIDTH_8:
 381#ifndef RSI_BLKSZ
 382                cfg &= ~PD_SDDAT3;
 383#endif
 384                cfg |= PUP_SDDAT3;
 385                /* Disable 4 bit SDIO */
 386                cfg &= ~SD4E;
 387                clk_ctl |= BYTE_BUS_8;
 388                break;
 389        default:
 390                cfg &= ~PUP_SDDAT3;
 391                /* Disable 4 bit SDIO */
 392                cfg &= ~SD4E;
 393        }
 394
 395        host->power_mode = ios->power_mode;
 396#ifndef RSI_BLKSZ
 397        if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
 398                pwr_ctl |= ROD_CTL;
 399# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
 400                pwr_ctl |= SD_CMD_OD;
 401# endif
 402        }
 403
 404        if (ios->power_mode != MMC_POWER_OFF)
 405                pwr_ctl |= PWR_ON;
 406        else
 407                pwr_ctl &= ~PWR_ON;
 408
 409        bfin_write_SDH_PWR_CTL(pwr_ctl);
 410#else
 411# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
 412        if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
 413                cfg |= SD_CMD_OD;
 414        else
 415                cfg &= ~SD_CMD_OD;
 416# endif
 417
 418
 419        if (ios->power_mode != MMC_POWER_OFF)
 420                cfg |= PWR_ON;
 421        else
 422                cfg &= ~PWR_ON;
 423
 424        bfin_write_SDH_CFG(cfg);
 425#endif
 426        SSYNC();
 427
 428        if (ios->power_mode == MMC_POWER_ON && ios->clock) {
 429                unsigned char clk_div;
 430                clk_div = (get_sclk() / ios->clock - 1) / 2;
 431                clk_div = min_t(unsigned char, clk_div, 0xFF);
 432                clk_ctl |= clk_div;
 433                clk_ctl |= CLK_E;
 434                host->clk_div = clk_div;
 435                bfin_write_SDH_CLK_CTL(clk_ctl);
 436
 437        } else
 438                sdh_stop_clock(host);
 439
 440        /* set up sdh interrupt mask*/
 441        if (ios->power_mode == MMC_POWER_ON)
 442                bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL |
 443                        RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END |
 444                        CMD_TIME_OUT | CMD_CRC_FAIL);
 445        else
 446                bfin_write_SDH_MASK0(0);
 447        SSYNC();
 448
 449        spin_unlock(&host->lock);
 450
 451        dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
 452                host->clk_div,
 453                host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
 454                ios->clock);
 455}
 456
 457static const struct mmc_host_ops sdh_ops = {
 458        .request        = sdh_request,
 459        .set_ios        = sdh_set_ios,
 460};
 461
 462static irqreturn_t sdh_dma_irq(int irq, void *devid)
 463{
 464        struct sdh_host *host = devid;
 465
 466        dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__,
 467                get_dma_curr_irqstat(host->dma_ch));
 468        clear_dma_irqstat(host->dma_ch);
 469        SSYNC();
 470
 471        return IRQ_HANDLED;
 472}
 473
 474static irqreturn_t sdh_stat_irq(int irq, void *devid)
 475{
 476        struct sdh_host *host = devid;
 477        unsigned int status;
 478        int handled = 0;
 479
 480        dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
 481
 482        spin_lock(&host->lock);
 483
 484        status = bfin_read_SDH_E_STATUS();
 485        if (status & SD_CARD_DET) {
 486                mmc_detect_change(host->mmc, 0);
 487                bfin_write_SDH_E_STATUS(SD_CARD_DET);
 488        }
 489        status = bfin_read_SDH_STATUS();
 490        if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
 491                handled |= sdh_cmd_done(host, status);
 492                bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
 493                                CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
 494                SSYNC();
 495        }
 496
 497        status = bfin_read_SDH_STATUS();
 498        if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
 499                handled |= sdh_data_done(host, status);
 500
 501        spin_unlock(&host->lock);
 502
 503        dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
 504
 505        return IRQ_RETVAL(handled);
 506}
 507
 508static void sdh_reset(void)
 509{
 510#if defined(CONFIG_BF54x)
 511        /* Secure Digital Host shares DMA with Nand controller */
 512        bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
 513#endif
 514
 515        bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
 516        SSYNC();
 517
 518        /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and
 519         * mmc stack will do the detection.
 520         */
 521        bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
 522        SSYNC();
 523}
 524
 525static int __devinit sdh_probe(struct platform_device *pdev)
 526{
 527        struct mmc_host *mmc;
 528        struct sdh_host *host;
 529        struct bfin_sd_host *drv_data = get_sdh_data(pdev);
 530        int ret;
 531
 532        if (!drv_data) {
 533                dev_err(&pdev->dev, "missing platform driver data\n");
 534                ret = -EINVAL;
 535                goto out;
 536        }
 537
 538        mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
 539        if (!mmc) {
 540                ret = -ENOMEM;
 541                goto out;
 542        }
 543
 544        mmc->ops = &sdh_ops;
 545#if defined(CONFIG_BF51x)
 546        mmc->max_segs = 1;
 547#else
 548        mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array);
 549#endif
 550#ifdef RSI_BLKSZ
 551        mmc->max_seg_size = -1;
 552#else
 553        mmc->max_seg_size = 1 << 16;
 554#endif
 555        mmc->max_blk_size = 1 << 11;
 556        mmc->max_blk_count = 1 << 11;
 557        mmc->max_req_size = PAGE_SIZE;
 558        mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 559        mmc->f_max = get_sclk();
 560        mmc->f_min = mmc->f_max >> 9;
 561        mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
 562        host = mmc_priv(mmc);
 563        host->mmc = mmc;
 564        host->sclk = get_sclk();
 565
 566        spin_lock_init(&host->lock);
 567        host->irq = drv_data->irq_int0;
 568        host->dma_ch = drv_data->dma_chan;
 569
 570        ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
 571        if (ret) {
 572                dev_err(&pdev->dev, "unable to request DMA channel\n");
 573                goto out1;
 574        }
 575
 576        ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
 577        if (ret) {
 578                dev_err(&pdev->dev, "unable to request DMA irq\n");
 579                goto out2;
 580        }
 581
 582        host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
 583        if (host->sg_cpu == NULL) {
 584                ret = -ENOMEM;
 585                goto out2;
 586        }
 587
 588        platform_set_drvdata(pdev, mmc);
 589
 590        ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
 591        if (ret) {
 592                dev_err(&pdev->dev, "unable to request status irq\n");
 593                goto out3;
 594        }
 595
 596        ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
 597        if (ret) {
 598                dev_err(&pdev->dev, "unable to request peripheral pins\n");
 599                goto out4;
 600        }
 601
 602        sdh_reset();
 603
 604        mmc_add_host(mmc);
 605        return 0;
 606
 607out4:
 608        free_irq(host->irq, host);
 609out3:
 610        mmc_remove_host(mmc);
 611        dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
 612out2:
 613        free_dma(host->dma_ch);
 614out1:
 615        mmc_free_host(mmc);
 616 out:
 617        return ret;
 618}
 619
 620static int __devexit sdh_remove(struct platform_device *pdev)
 621{
 622        struct mmc_host *mmc = platform_get_drvdata(pdev);
 623
 624        platform_set_drvdata(pdev, NULL);
 625
 626        if (mmc) {
 627                struct sdh_host *host = mmc_priv(mmc);
 628
 629                mmc_remove_host(mmc);
 630
 631                sdh_stop_clock(host);
 632                free_irq(host->irq, host);
 633                free_dma(host->dma_ch);
 634                dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
 635
 636                mmc_free_host(mmc);
 637        }
 638
 639        return 0;
 640}
 641
 642#ifdef CONFIG_PM
 643static int sdh_suspend(struct platform_device *dev, pm_message_t state)
 644{
 645        struct mmc_host *mmc = platform_get_drvdata(dev);
 646        struct bfin_sd_host *drv_data = get_sdh_data(dev);
 647        int ret = 0;
 648
 649        if (mmc)
 650                ret = mmc_suspend_host(mmc);
 651
 652        peripheral_free_list(drv_data->pin_req);
 653
 654        return ret;
 655}
 656
 657static int sdh_resume(struct platform_device *dev)
 658{
 659        struct mmc_host *mmc = platform_get_drvdata(dev);
 660        struct bfin_sd_host *drv_data = get_sdh_data(dev);
 661        int ret = 0;
 662
 663        ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
 664        if (ret) {
 665                dev_err(&dev->dev, "unable to request peripheral pins\n");
 666                return ret;
 667        }
 668
 669        sdh_reset();
 670
 671        if (mmc)
 672                ret = mmc_resume_host(mmc);
 673
 674        return ret;
 675}
 676#else
 677# define sdh_suspend NULL
 678# define sdh_resume  NULL
 679#endif
 680
 681static struct platform_driver sdh_driver = {
 682        .probe   = sdh_probe,
 683        .remove  = __devexit_p(sdh_remove),
 684        .suspend = sdh_suspend,
 685        .resume  = sdh_resume,
 686        .driver  = {
 687                .name = DRIVER_NAME,
 688        },
 689};
 690
 691module_platform_driver(sdh_driver);
 692
 693MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
 694MODULE_AUTHOR("Cliff Cai, Roy Huang");
 695MODULE_LICENSE("GPL");
 696