linux/drivers/mtd/nand/lpc32xx_mlc.c
<<
>>
Prefs
   1/*
   2 * Driver for NAND MLC Controller in LPC32xx
   3 *
   4 * Author: Roland Stigge <stigge@antcom.de>
   5 *
   6 * Copyright © 2011 WORK Microwave GmbH
   7 * Copyright © 2011, 2012 Roland Stigge
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 *
  20 * NAND Flash Controller Operation:
  21 * - Read: Auto Decode
  22 * - Write: Auto Encode
  23 * - Tested Page Sizes: 2048, 4096
  24 */
  25
  26#include <linux/slab.h>
  27#include <linux/module.h>
  28#include <linux/platform_device.h>
  29#include <linux/mtd/mtd.h>
  30#include <linux/mtd/rawnand.h>
  31#include <linux/mtd/partitions.h>
  32#include <linux/clk.h>
  33#include <linux/err.h>
  34#include <linux/delay.h>
  35#include <linux/completion.h>
  36#include <linux/interrupt.h>
  37#include <linux/of.h>
  38#include <linux/of_gpio.h>
  39#include <linux/mtd/lpc32xx_mlc.h>
  40#include <linux/io.h>
  41#include <linux/mm.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/dmaengine.h>
  44#include <linux/mtd/nand_ecc.h>
  45
  46#define DRV_NAME "lpc32xx_mlc"
  47
  48/**********************************************************************
  49* MLC NAND controller register offsets
  50**********************************************************************/
  51
  52#define MLC_BUFF(x)                     (x + 0x00000)
  53#define MLC_DATA(x)                     (x + 0x08000)
  54#define MLC_CMD(x)                      (x + 0x10000)
  55#define MLC_ADDR(x)                     (x + 0x10004)
  56#define MLC_ECC_ENC_REG(x)              (x + 0x10008)
  57#define MLC_ECC_DEC_REG(x)              (x + 0x1000C)
  58#define MLC_ECC_AUTO_ENC_REG(x)         (x + 0x10010)
  59#define MLC_ECC_AUTO_DEC_REG(x)         (x + 0x10014)
  60#define MLC_RPR(x)                      (x + 0x10018)
  61#define MLC_WPR(x)                      (x + 0x1001C)
  62#define MLC_RUBP(x)                     (x + 0x10020)
  63#define MLC_ROBP(x)                     (x + 0x10024)
  64#define MLC_SW_WP_ADD_LOW(x)            (x + 0x10028)
  65#define MLC_SW_WP_ADD_HIG(x)            (x + 0x1002C)
  66#define MLC_ICR(x)                      (x + 0x10030)
  67#define MLC_TIME_REG(x)                 (x + 0x10034)
  68#define MLC_IRQ_MR(x)                   (x + 0x10038)
  69#define MLC_IRQ_SR(x)                   (x + 0x1003C)
  70#define MLC_LOCK_PR(x)                  (x + 0x10044)
  71#define MLC_ISR(x)                      (x + 0x10048)
  72#define MLC_CEH(x)                      (x + 0x1004C)
  73
  74/**********************************************************************
  75* MLC_CMD bit definitions
  76**********************************************************************/
  77#define MLCCMD_RESET                    0xFF
  78
  79/**********************************************************************
  80* MLC_ICR bit definitions
  81**********************************************************************/
  82#define MLCICR_WPROT                    (1 << 3)
  83#define MLCICR_LARGEBLOCK               (1 << 2)
  84#define MLCICR_LONGADDR                 (1 << 1)
  85#define MLCICR_16BIT                    (1 << 0)  /* unsupported by LPC32x0! */
  86
  87/**********************************************************************
  88* MLC_TIME_REG bit definitions
  89**********************************************************************/
  90#define MLCTIMEREG_TCEA_DELAY(n)        (((n) & 0x03) << 24)
  91#define MLCTIMEREG_BUSY_DELAY(n)        (((n) & 0x1F) << 19)
  92#define MLCTIMEREG_NAND_TA(n)           (((n) & 0x07) << 16)
  93#define MLCTIMEREG_RD_HIGH(n)           (((n) & 0x0F) << 12)
  94#define MLCTIMEREG_RD_LOW(n)            (((n) & 0x0F) << 8)
  95#define MLCTIMEREG_WR_HIGH(n)           (((n) & 0x0F) << 4)
  96#define MLCTIMEREG_WR_LOW(n)            (((n) & 0x0F) << 0)
  97
  98/**********************************************************************
  99* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
 100**********************************************************************/
 101#define MLCIRQ_NAND_READY               (1 << 5)
 102#define MLCIRQ_CONTROLLER_READY         (1 << 4)
 103#define MLCIRQ_DECODE_FAILURE           (1 << 3)
 104#define MLCIRQ_DECODE_ERROR             (1 << 2)
 105#define MLCIRQ_ECC_READY                (1 << 1)
 106#define MLCIRQ_WRPROT_FAULT             (1 << 0)
 107
 108/**********************************************************************
 109* MLC_LOCK_PR bit definitions
 110**********************************************************************/
 111#define MLCLOCKPR_MAGIC                 0xA25E
 112
 113/**********************************************************************
 114* MLC_ISR bit definitions
 115**********************************************************************/
 116#define MLCISR_DECODER_FAILURE          (1 << 6)
 117#define MLCISR_ERRORS                   ((1 << 4) | (1 << 5))
 118#define MLCISR_ERRORS_DETECTED          (1 << 3)
 119#define MLCISR_ECC_READY                (1 << 2)
 120#define MLCISR_CONTROLLER_READY         (1 << 1)
 121#define MLCISR_NAND_READY               (1 << 0)
 122
 123/**********************************************************************
 124* MLC_CEH bit definitions
 125**********************************************************************/
 126#define MLCCEH_NORMAL                   (1 << 0)
 127
 128struct lpc32xx_nand_cfg_mlc {
 129        uint32_t tcea_delay;
 130        uint32_t busy_delay;
 131        uint32_t nand_ta;
 132        uint32_t rd_high;
 133        uint32_t rd_low;
 134        uint32_t wr_high;
 135        uint32_t wr_low;
 136        int wp_gpio;
 137        struct mtd_partition *parts;
 138        unsigned num_parts;
 139};
 140
 141static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
 142                                 struct mtd_oob_region *oobregion)
 143{
 144        struct nand_chip *nand_chip = mtd_to_nand(mtd);
 145
 146        if (section >= nand_chip->ecc.steps)
 147                return -ERANGE;
 148
 149        oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
 150        oobregion->length = nand_chip->ecc.bytes;
 151
 152        return 0;
 153}
 154
 155static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
 156                                  struct mtd_oob_region *oobregion)
 157{
 158        struct nand_chip *nand_chip = mtd_to_nand(mtd);
 159
 160        if (section >= nand_chip->ecc.steps)
 161                return -ERANGE;
 162
 163        oobregion->offset = 16 * section;
 164        oobregion->length = 16 - nand_chip->ecc.bytes;
 165
 166        return 0;
 167}
 168
 169static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
 170        .ecc = lpc32xx_ooblayout_ecc,
 171        .free = lpc32xx_ooblayout_free,
 172};
 173
 174static struct nand_bbt_descr lpc32xx_nand_bbt = {
 175        .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
 176                   NAND_BBT_WRITE,
 177        .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
 178};
 179
 180static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
 181        .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
 182                   NAND_BBT_WRITE,
 183        .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
 184};
 185
 186struct lpc32xx_nand_host {
 187        struct nand_chip        nand_chip;
 188        struct lpc32xx_mlc_platform_data *pdata;
 189        struct clk              *clk;
 190        void __iomem            *io_base;
 191        int                     irq;
 192        struct lpc32xx_nand_cfg_mlc     *ncfg;
 193        struct completion       comp_nand;
 194        struct completion       comp_controller;
 195        uint32_t llptr;
 196        /*
 197         * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
 198         */
 199        dma_addr_t              oob_buf_phy;
 200        /*
 201         * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
 202         */
 203        uint8_t                 *oob_buf;
 204        /* Physical address of DMA base address */
 205        dma_addr_t              io_base_phy;
 206
 207        struct completion       comp_dma;
 208        struct dma_chan         *dma_chan;
 209        struct dma_slave_config dma_slave_config;
 210        struct scatterlist      sgl;
 211        uint8_t                 *dma_buf;
 212        uint8_t                 *dummy_buf;
 213        int                     mlcsubpages; /* number of 512bytes-subpages */
 214};
 215
 216/*
 217 * Activate/Deactivate DMA Operation:
 218 *
 219 * Using the PL080 DMA Controller for transferring the 512 byte subpages
 220 * instead of doing readl() / writel() in a loop slows it down significantly.
 221 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
 222 *
 223 * - readl() of 128 x 32 bits in a loop: ~20us
 224 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
 225 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
 226 *
 227 * This applies to the transfer itself. In the DMA case: only the
 228 * wait_for_completion() (DMA setup _not_ included).
 229 *
 230 * Note that the 512 bytes subpage transfer is done directly from/to a
 231 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
 232 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
 233 * controller transferring data between its internal buffer to/from the NAND
 234 * chip.)
 235 *
 236 * Therefore, using the PL080 DMA is disabled by default, for now.
 237 *
 238 */
 239static int use_dma;
 240
 241static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
 242{
 243        uint32_t clkrate, tmp;
 244
 245        /* Reset MLC controller */
 246        writel(MLCCMD_RESET, MLC_CMD(host->io_base));
 247        udelay(1000);
 248
 249        /* Get base clock for MLC block */
 250        clkrate = clk_get_rate(host->clk);
 251        if (clkrate == 0)
 252                clkrate = 104000000;
 253
 254        /* Unlock MLC_ICR
 255         * (among others, will be locked again automatically) */
 256        writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
 257
 258        /* Configure MLC Controller: Large Block, 5 Byte Address */
 259        tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
 260        writel(tmp, MLC_ICR(host->io_base));
 261
 262        /* Unlock MLC_TIME_REG
 263         * (among others, will be locked again automatically) */
 264        writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
 265
 266        /* Compute clock setup values, see LPC and NAND manual */
 267        tmp = 0;
 268        tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
 269        tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
 270        tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
 271        tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
 272        tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
 273        tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
 274        tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
 275        writel(tmp, MLC_TIME_REG(host->io_base));
 276
 277        /* Enable IRQ for CONTROLLER_READY and NAND_READY */
 278        writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
 279                        MLC_IRQ_MR(host->io_base));
 280
 281        /* Normal nCE operation: nCE controlled by controller */
 282        writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
 283}
 284
 285/*
 286 * Hardware specific access to control lines
 287 */
 288static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
 289                                  unsigned int ctrl)
 290{
 291        struct nand_chip *nand_chip = mtd_to_nand(mtd);
 292        struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
 293
 294        if (cmd != NAND_CMD_NONE) {
 295                if (ctrl & NAND_CLE)
 296                        writel(cmd, MLC_CMD(host->io_base));
 297                else
 298                        writel(cmd, MLC_ADDR(host->io_base));
 299        }
 300}
 301
 302/*
 303 * Read Device Ready (NAND device _and_ controller ready)
 304 */
 305static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
 306{
 307        struct nand_chip *nand_chip = mtd_to_nand(mtd);
 308        struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
 309
 310        if ((readb(MLC_ISR(host->io_base)) &
 311             (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
 312            (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
 313                return  1;
 314
 315        return 0;
 316}
 317
 318static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
 319{
 320        uint8_t sr;
 321
 322        /* Clear interrupt flag by reading status */
 323        sr = readb(MLC_IRQ_SR(host->io_base));
 324        if (sr & MLCIRQ_NAND_READY)
 325                complete(&host->comp_nand);
 326        if (sr & MLCIRQ_CONTROLLER_READY)
 327                complete(&host->comp_controller);
 328
 329        return IRQ_HANDLED;
 330}
 331
 332static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
 333{
 334        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 335
 336        if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
 337                goto exit;
 338
 339        wait_for_completion(&host->comp_nand);
 340
 341        while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
 342                /* Seems to be delayed sometimes by controller */
 343                dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
 344                cpu_relax();
 345        }
 346
 347exit:
 348        return NAND_STATUS_READY;
 349}
 350
 351static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
 352                                       struct nand_chip *chip)
 353{
 354        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 355
 356        if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
 357                goto exit;
 358
 359        wait_for_completion(&host->comp_controller);
 360
 361        while (!(readb(MLC_ISR(host->io_base)) &
 362                 MLCISR_CONTROLLER_READY)) {
 363                dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
 364                cpu_relax();
 365        }
 366
 367exit:
 368        return NAND_STATUS_READY;
 369}
 370
 371static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
 372{
 373        lpc32xx_waitfunc_nand(mtd, chip);
 374        lpc32xx_waitfunc_controller(mtd, chip);
 375
 376        return NAND_STATUS_READY;
 377}
 378
 379/*
 380 * Enable NAND write protect
 381 */
 382static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
 383{
 384        if (gpio_is_valid(host->ncfg->wp_gpio))
 385                gpio_set_value(host->ncfg->wp_gpio, 0);
 386}
 387
 388/*
 389 * Disable NAND write protect
 390 */
 391static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
 392{
 393        if (gpio_is_valid(host->ncfg->wp_gpio))
 394                gpio_set_value(host->ncfg->wp_gpio, 1);
 395}
 396
 397static void lpc32xx_dma_complete_func(void *completion)
 398{
 399        complete(completion);
 400}
 401
 402static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
 403                            enum dma_transfer_direction dir)
 404{
 405        struct nand_chip *chip = mtd_to_nand(mtd);
 406        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 407        struct dma_async_tx_descriptor *desc;
 408        int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 409        int res;
 410
 411        sg_init_one(&host->sgl, mem, len);
 412
 413        res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
 414                         DMA_BIDIRECTIONAL);
 415        if (res != 1) {
 416                dev_err(mtd->dev.parent, "Failed to map sg list\n");
 417                return -ENXIO;
 418        }
 419        desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
 420                                       flags);
 421        if (!desc) {
 422                dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
 423                goto out1;
 424        }
 425
 426        init_completion(&host->comp_dma);
 427        desc->callback = lpc32xx_dma_complete_func;
 428        desc->callback_param = &host->comp_dma;
 429
 430        dmaengine_submit(desc);
 431        dma_async_issue_pending(host->dma_chan);
 432
 433        wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
 434
 435        dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 436                     DMA_BIDIRECTIONAL);
 437        return 0;
 438out1:
 439        dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 440                     DMA_BIDIRECTIONAL);
 441        return -ENXIO;
 442}
 443
 444static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 445                             uint8_t *buf, int oob_required, int page)
 446{
 447        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 448        int i, j;
 449        uint8_t *oobbuf = chip->oob_poi;
 450        uint32_t mlc_isr;
 451        int res;
 452        uint8_t *dma_buf;
 453        bool dma_mapped;
 454
 455        if ((void *)buf <= high_memory) {
 456                dma_buf = buf;
 457                dma_mapped = true;
 458        } else {
 459                dma_buf = host->dma_buf;
 460                dma_mapped = false;
 461        }
 462
 463        /* Writing Command and Address */
 464        chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
 465
 466        /* For all sub-pages */
 467        for (i = 0; i < host->mlcsubpages; i++) {
 468                /* Start Auto Decode Command */
 469                writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
 470
 471                /* Wait for Controller Ready */
 472                lpc32xx_waitfunc_controller(mtd, chip);
 473
 474                /* Check ECC Error status */
 475                mlc_isr = readl(MLC_ISR(host->io_base));
 476                if (mlc_isr & MLCISR_DECODER_FAILURE) {
 477                        mtd->ecc_stats.failed++;
 478                        dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
 479                } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
 480                        mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
 481                }
 482
 483                /* Read 512 + 16 Bytes */
 484                if (use_dma) {
 485                        res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
 486                                               DMA_DEV_TO_MEM);
 487                        if (res)
 488                                return res;
 489                } else {
 490                        for (j = 0; j < (512 >> 2); j++) {
 491                                *((uint32_t *)(buf)) =
 492                                        readl(MLC_BUFF(host->io_base));
 493                                buf += 4;
 494                        }
 495                }
 496                for (j = 0; j < (16 >> 2); j++) {
 497                        *((uint32_t *)(oobbuf)) =
 498                                readl(MLC_BUFF(host->io_base));
 499                        oobbuf += 4;
 500                }
 501        }
 502
 503        if (use_dma && !dma_mapped)
 504                memcpy(buf, dma_buf, mtd->writesize);
 505
 506        return 0;
 507}
 508
 509static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
 510                                       struct nand_chip *chip,
 511                                       const uint8_t *buf, int oob_required,
 512                                       int page)
 513{
 514        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 515        const uint8_t *oobbuf = chip->oob_poi;
 516        uint8_t *dma_buf = (uint8_t *)buf;
 517        int res;
 518        int i, j;
 519
 520        if (use_dma && (void *)buf >= high_memory) {
 521                dma_buf = host->dma_buf;
 522                memcpy(dma_buf, buf, mtd->writesize);
 523        }
 524
 525        for (i = 0; i < host->mlcsubpages; i++) {
 526                /* Start Encode */
 527                writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
 528
 529                /* Write 512 + 6 Bytes to Buffer */
 530                if (use_dma) {
 531                        res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
 532                                               DMA_MEM_TO_DEV);
 533                        if (res)
 534                                return res;
 535                } else {
 536                        for (j = 0; j < (512 >> 2); j++) {
 537                                writel(*((uint32_t *)(buf)),
 538                                       MLC_BUFF(host->io_base));
 539                                buf += 4;
 540                        }
 541                }
 542                writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
 543                oobbuf += 4;
 544                writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
 545                oobbuf += 12;
 546
 547                /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
 548                writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
 549
 550                /* Wait for Controller Ready */
 551                lpc32xx_waitfunc_controller(mtd, chip);
 552        }
 553        return 0;
 554}
 555
 556static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
 557                            int page)
 558{
 559        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 560
 561        /* Read whole page - necessary with MLC controller! */
 562        lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
 563
 564        return 0;
 565}
 566
 567static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
 568                              int page)
 569{
 570        /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
 571        return 0;
 572}
 573
 574/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
 575static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
 576{
 577        /* Always enabled! */
 578}
 579
 580static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
 581{
 582        struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
 583        dma_cap_mask_t mask;
 584
 585        if (!host->pdata || !host->pdata->dma_filter) {
 586                dev_err(mtd->dev.parent, "no DMA platform data\n");
 587                return -ENOENT;
 588        }
 589
 590        dma_cap_zero(mask);
 591        dma_cap_set(DMA_SLAVE, mask);
 592        host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
 593                                             "nand-mlc");
 594        if (!host->dma_chan) {
 595                dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
 596                return -EBUSY;
 597        }
 598
 599        /*
 600         * Set direction to a sensible value even if the dmaengine driver
 601         * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
 602         * driver criticizes it as "alien transfer direction".
 603         */
 604        host->dma_slave_config.direction = DMA_DEV_TO_MEM;
 605        host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 606        host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 607        host->dma_slave_config.src_maxburst = 128;
 608        host->dma_slave_config.dst_maxburst = 128;
 609        /* DMA controller does flow control: */
 610        host->dma_slave_config.device_fc = false;
 611        host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
 612        host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
 613        if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
 614                dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
 615                goto out1;
 616        }
 617
 618        return 0;
 619out1:
 620        dma_release_channel(host->dma_chan);
 621        return -ENXIO;
 622}
 623
 624static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
 625{
 626        struct lpc32xx_nand_cfg_mlc *ncfg;
 627        struct device_node *np = dev->of_node;
 628
 629        ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
 630        if (!ncfg)
 631                return NULL;
 632
 633        of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
 634        of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
 635        of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
 636        of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
 637        of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
 638        of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
 639        of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
 640
 641        if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
 642            !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
 643            !ncfg->wr_low) {
 644                dev_err(dev, "chip parameters not specified correctly\n");
 645                return NULL;
 646        }
 647
 648        ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
 649
 650        return ncfg;
 651}
 652
 653/*
 654 * Probe for NAND controller
 655 */
 656static int lpc32xx_nand_probe(struct platform_device *pdev)
 657{
 658        struct lpc32xx_nand_host *host;
 659        struct mtd_info *mtd;
 660        struct nand_chip *nand_chip;
 661        struct resource *rc;
 662        int res;
 663
 664        /* Allocate memory for the device structure (and zero it) */
 665        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 666        if (!host)
 667                return -ENOMEM;
 668
 669        rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 670        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
 671        if (IS_ERR(host->io_base))
 672                return PTR_ERR(host->io_base);
 673        
 674        host->io_base_phy = rc->start;
 675
 676        nand_chip = &host->nand_chip;
 677        mtd = nand_to_mtd(nand_chip);
 678        if (pdev->dev.of_node)
 679                host->ncfg = lpc32xx_parse_dt(&pdev->dev);
 680        if (!host->ncfg) {
 681                dev_err(&pdev->dev,
 682                        "Missing or bad NAND config from device tree\n");
 683                return -ENOENT;
 684        }
 685        if (host->ncfg->wp_gpio == -EPROBE_DEFER)
 686                return -EPROBE_DEFER;
 687        if (gpio_is_valid(host->ncfg->wp_gpio) &&
 688                        gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
 689                dev_err(&pdev->dev, "GPIO not available\n");
 690                return -EBUSY;
 691        }
 692        lpc32xx_wp_disable(host);
 693
 694        host->pdata = dev_get_platdata(&pdev->dev);
 695
 696        /* link the private data structures */
 697        nand_set_controller_data(nand_chip, host);
 698        nand_set_flash_node(nand_chip, pdev->dev.of_node);
 699        mtd->dev.parent = &pdev->dev;
 700
 701        /* Get NAND clock */
 702        host->clk = clk_get(&pdev->dev, NULL);
 703        if (IS_ERR(host->clk)) {
 704                dev_err(&pdev->dev, "Clock initialization failure\n");
 705                res = -ENOENT;
 706                goto err_exit1;
 707        }
 708        res = clk_prepare_enable(host->clk);
 709        if (res)
 710                goto err_put_clk;
 711
 712        nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
 713        nand_chip->dev_ready = lpc32xx_nand_device_ready;
 714        nand_chip->chip_delay = 25; /* us */
 715        nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
 716        nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
 717
 718        /* Init NAND controller */
 719        lpc32xx_nand_setup(host);
 720
 721        platform_set_drvdata(pdev, host);
 722
 723        /* Initialize function pointers */
 724        nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
 725        nand_chip->ecc.read_page_raw = lpc32xx_read_page;
 726        nand_chip->ecc.read_page = lpc32xx_read_page;
 727        nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
 728        nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
 729        nand_chip->ecc.write_oob = lpc32xx_write_oob;
 730        nand_chip->ecc.read_oob = lpc32xx_read_oob;
 731        nand_chip->ecc.strength = 4;
 732        nand_chip->ecc.bytes = 10;
 733        nand_chip->waitfunc = lpc32xx_waitfunc;
 734
 735        nand_chip->options = NAND_NO_SUBPAGE_WRITE;
 736        nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
 737        nand_chip->bbt_td = &lpc32xx_nand_bbt;
 738        nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
 739
 740        if (use_dma) {
 741                res = lpc32xx_dma_setup(host);
 742                if (res) {
 743                        res = -EIO;
 744                        goto err_exit2;
 745                }
 746        }
 747
 748        /*
 749         * Scan to find existance of the device and
 750         * Get the type of NAND device SMALL block or LARGE block
 751         */
 752        res = nand_scan_ident(mtd, 1, NULL);
 753        if (res)
 754                goto err_exit3;
 755
 756        host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 757        if (!host->dma_buf) {
 758                res = -ENOMEM;
 759                goto err_exit3;
 760        }
 761
 762        host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 763        if (!host->dummy_buf) {
 764                res = -ENOMEM;
 765                goto err_exit3;
 766        }
 767
 768        nand_chip->ecc.mode = NAND_ECC_HW;
 769        nand_chip->ecc.size = 512;
 770        mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 771        host->mlcsubpages = mtd->writesize / 512;
 772
 773        /* initially clear interrupt status */
 774        readb(MLC_IRQ_SR(host->io_base));
 775
 776        init_completion(&host->comp_nand);
 777        init_completion(&host->comp_controller);
 778
 779        host->irq = platform_get_irq(pdev, 0);
 780        if (host->irq < 0) {
 781                dev_err(&pdev->dev, "failed to get platform irq\n");
 782                res = -EINVAL;
 783                goto err_exit3;
 784        }
 785
 786        if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
 787                        IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
 788                dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
 789                res = -ENXIO;
 790                goto err_exit3;
 791        }
 792
 793        /*
 794         * Fills out all the uninitialized function pointers with the defaults
 795         * And scans for a bad block table if appropriate.
 796         */
 797        res = nand_scan_tail(mtd);
 798        if (res)
 799                goto err_exit4;
 800
 801        mtd->name = DRV_NAME;
 802
 803        res = mtd_device_register(mtd, host->ncfg->parts,
 804                                  host->ncfg->num_parts);
 805        if (!res)
 806                return res;
 807
 808        nand_release(mtd);
 809
 810err_exit4:
 811        free_irq(host->irq, host);
 812err_exit3:
 813        if (use_dma)
 814                dma_release_channel(host->dma_chan);
 815err_exit2:
 816        clk_disable_unprepare(host->clk);
 817err_put_clk:
 818        clk_put(host->clk);
 819err_exit1:
 820        lpc32xx_wp_enable(host);
 821        gpio_free(host->ncfg->wp_gpio);
 822
 823        return res;
 824}
 825
 826/*
 827 * Remove NAND device
 828 */
 829static int lpc32xx_nand_remove(struct platform_device *pdev)
 830{
 831        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 832        struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
 833
 834        nand_release(mtd);
 835        free_irq(host->irq, host);
 836        if (use_dma)
 837                dma_release_channel(host->dma_chan);
 838
 839        clk_disable_unprepare(host->clk);
 840        clk_put(host->clk);
 841
 842        lpc32xx_wp_enable(host);
 843        gpio_free(host->ncfg->wp_gpio);
 844
 845        return 0;
 846}
 847
 848#ifdef CONFIG_PM
 849static int lpc32xx_nand_resume(struct platform_device *pdev)
 850{
 851        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 852        int ret;
 853
 854        /* Re-enable NAND clock */
 855        ret = clk_prepare_enable(host->clk);
 856        if (ret)
 857                return ret;
 858
 859        /* Fresh init of NAND controller */
 860        lpc32xx_nand_setup(host);
 861
 862        /* Disable write protect */
 863        lpc32xx_wp_disable(host);
 864
 865        return 0;
 866}
 867
 868static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
 869{
 870        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 871
 872        /* Enable write protect for safety */
 873        lpc32xx_wp_enable(host);
 874
 875        /* Disable clock */
 876        clk_disable_unprepare(host->clk);
 877        return 0;
 878}
 879
 880#else
 881#define lpc32xx_nand_resume NULL
 882#define lpc32xx_nand_suspend NULL
 883#endif
 884
 885static const struct of_device_id lpc32xx_nand_match[] = {
 886        { .compatible = "nxp,lpc3220-mlc" },
 887        { /* sentinel */ },
 888};
 889MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
 890
 891static struct platform_driver lpc32xx_nand_driver = {
 892        .probe          = lpc32xx_nand_probe,
 893        .remove         = lpc32xx_nand_remove,
 894        .resume         = lpc32xx_nand_resume,
 895        .suspend        = lpc32xx_nand_suspend,
 896        .driver         = {
 897                .name   = DRV_NAME,
 898                .of_match_table = lpc32xx_nand_match,
 899        },
 900};
 901
 902module_platform_driver(lpc32xx_nand_driver);
 903
 904MODULE_LICENSE("GPL");
 905MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
 906MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
 907