linux/drivers/mtd/nand/raw/lpc32xx_mlc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for NAND MLC Controller in LPC32xx
   4 *
   5 * Author: Roland Stigge <stigge@antcom.de>
   6 *
   7 * Copyright © 2011 WORK Microwave GmbH
   8 * Copyright © 2011, 2012 Roland Stigge
   9 *
  10 * NAND Flash Controller Operation:
  11 * - Read: Auto Decode
  12 * - Write: Auto Encode
  13 * - Tested Page Sizes: 2048, 4096
  14 */
  15
  16#include <linux/slab.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/mtd/mtd.h>
  20#include <linux/mtd/rawnand.h>
  21#include <linux/mtd/partitions.h>
  22#include <linux/clk.h>
  23#include <linux/err.h>
  24#include <linux/delay.h>
  25#include <linux/completion.h>
  26#include <linux/interrupt.h>
  27#include <linux/of.h>
  28#include <linux/of_gpio.h>
  29#include <linux/mtd/lpc32xx_mlc.h>
  30#include <linux/io.h>
  31#include <linux/mm.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/dmaengine.h>
  34
  35#define DRV_NAME "lpc32xx_mlc"
  36
  37/**********************************************************************
  38* MLC NAND controller register offsets
  39**********************************************************************/
  40
  41#define MLC_BUFF(x)                     (x + 0x00000)
  42#define MLC_DATA(x)                     (x + 0x08000)
  43#define MLC_CMD(x)                      (x + 0x10000)
  44#define MLC_ADDR(x)                     (x + 0x10004)
  45#define MLC_ECC_ENC_REG(x)              (x + 0x10008)
  46#define MLC_ECC_DEC_REG(x)              (x + 0x1000C)
  47#define MLC_ECC_AUTO_ENC_REG(x)         (x + 0x10010)
  48#define MLC_ECC_AUTO_DEC_REG(x)         (x + 0x10014)
  49#define MLC_RPR(x)                      (x + 0x10018)
  50#define MLC_WPR(x)                      (x + 0x1001C)
  51#define MLC_RUBP(x)                     (x + 0x10020)
  52#define MLC_ROBP(x)                     (x + 0x10024)
  53#define MLC_SW_WP_ADD_LOW(x)            (x + 0x10028)
  54#define MLC_SW_WP_ADD_HIG(x)            (x + 0x1002C)
  55#define MLC_ICR(x)                      (x + 0x10030)
  56#define MLC_TIME_REG(x)                 (x + 0x10034)
  57#define MLC_IRQ_MR(x)                   (x + 0x10038)
  58#define MLC_IRQ_SR(x)                   (x + 0x1003C)
  59#define MLC_LOCK_PR(x)                  (x + 0x10044)
  60#define MLC_ISR(x)                      (x + 0x10048)
  61#define MLC_CEH(x)                      (x + 0x1004C)
  62
  63/**********************************************************************
  64* MLC_CMD bit definitions
  65**********************************************************************/
  66#define MLCCMD_RESET                    0xFF
  67
  68/**********************************************************************
  69* MLC_ICR bit definitions
  70**********************************************************************/
  71#define MLCICR_WPROT                    (1 << 3)
  72#define MLCICR_LARGEBLOCK               (1 << 2)
  73#define MLCICR_LONGADDR                 (1 << 1)
  74#define MLCICR_16BIT                    (1 << 0)  /* unsupported by LPC32x0! */
  75
  76/**********************************************************************
  77* MLC_TIME_REG bit definitions
  78**********************************************************************/
  79#define MLCTIMEREG_TCEA_DELAY(n)        (((n) & 0x03) << 24)
  80#define MLCTIMEREG_BUSY_DELAY(n)        (((n) & 0x1F) << 19)
  81#define MLCTIMEREG_NAND_TA(n)           (((n) & 0x07) << 16)
  82#define MLCTIMEREG_RD_HIGH(n)           (((n) & 0x0F) << 12)
  83#define MLCTIMEREG_RD_LOW(n)            (((n) & 0x0F) << 8)
  84#define MLCTIMEREG_WR_HIGH(n)           (((n) & 0x0F) << 4)
  85#define MLCTIMEREG_WR_LOW(n)            (((n) & 0x0F) << 0)
  86
  87/**********************************************************************
  88* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
  89**********************************************************************/
  90#define MLCIRQ_NAND_READY               (1 << 5)
  91#define MLCIRQ_CONTROLLER_READY         (1 << 4)
  92#define MLCIRQ_DECODE_FAILURE           (1 << 3)
  93#define MLCIRQ_DECODE_ERROR             (1 << 2)
  94#define MLCIRQ_ECC_READY                (1 << 1)
  95#define MLCIRQ_WRPROT_FAULT             (1 << 0)
  96
  97/**********************************************************************
  98* MLC_LOCK_PR bit definitions
  99**********************************************************************/
 100#define MLCLOCKPR_MAGIC                 0xA25E
 101
 102/**********************************************************************
 103* MLC_ISR bit definitions
 104**********************************************************************/
 105#define MLCISR_DECODER_FAILURE          (1 << 6)
 106#define MLCISR_ERRORS                   ((1 << 4) | (1 << 5))
 107#define MLCISR_ERRORS_DETECTED          (1 << 3)
 108#define MLCISR_ECC_READY                (1 << 2)
 109#define MLCISR_CONTROLLER_READY         (1 << 1)
 110#define MLCISR_NAND_READY               (1 << 0)
 111
 112/**********************************************************************
 113* MLC_CEH bit definitions
 114**********************************************************************/
 115#define MLCCEH_NORMAL                   (1 << 0)
 116
 117struct lpc32xx_nand_cfg_mlc {
 118        uint32_t tcea_delay;
 119        uint32_t busy_delay;
 120        uint32_t nand_ta;
 121        uint32_t rd_high;
 122        uint32_t rd_low;
 123        uint32_t wr_high;
 124        uint32_t wr_low;
 125        int wp_gpio;
 126        struct mtd_partition *parts;
 127        unsigned num_parts;
 128};
 129
 130static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
 131                                 struct mtd_oob_region *oobregion)
 132{
 133        struct nand_chip *nand_chip = mtd_to_nand(mtd);
 134
 135        if (section >= nand_chip->ecc.steps)
 136                return -ERANGE;
 137
 138        oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
 139        oobregion->length = nand_chip->ecc.bytes;
 140
 141        return 0;
 142}
 143
 144static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
 145                                  struct mtd_oob_region *oobregion)
 146{
 147        struct nand_chip *nand_chip = mtd_to_nand(mtd);
 148
 149        if (section >= nand_chip->ecc.steps)
 150                return -ERANGE;
 151
 152        oobregion->offset = 16 * section;
 153        oobregion->length = 16 - nand_chip->ecc.bytes;
 154
 155        return 0;
 156}
 157
 158static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
 159        .ecc = lpc32xx_ooblayout_ecc,
 160        .free = lpc32xx_ooblayout_free,
 161};
 162
 163static struct nand_bbt_descr lpc32xx_nand_bbt = {
 164        .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
 165                   NAND_BBT_WRITE,
 166        .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
 167};
 168
 169static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
 170        .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
 171                   NAND_BBT_WRITE,
 172        .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
 173};
 174
 175struct lpc32xx_nand_host {
 176        struct platform_device  *pdev;
 177        struct nand_chip        nand_chip;
 178        struct lpc32xx_mlc_platform_data *pdata;
 179        struct clk              *clk;
 180        void __iomem            *io_base;
 181        int                     irq;
 182        struct lpc32xx_nand_cfg_mlc     *ncfg;
 183        struct completion       comp_nand;
 184        struct completion       comp_controller;
 185        uint32_t llptr;
 186        /*
 187         * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
 188         */
 189        dma_addr_t              oob_buf_phy;
 190        /*
 191         * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
 192         */
 193        uint8_t                 *oob_buf;
 194        /* Physical address of DMA base address */
 195        dma_addr_t              io_base_phy;
 196
 197        struct completion       comp_dma;
 198        struct dma_chan         *dma_chan;
 199        struct dma_slave_config dma_slave_config;
 200        struct scatterlist      sgl;
 201        uint8_t                 *dma_buf;
 202        uint8_t                 *dummy_buf;
 203        int                     mlcsubpages; /* number of 512bytes-subpages */
 204};
 205
 206/*
 207 * Activate/Deactivate DMA Operation:
 208 *
 209 * Using the PL080 DMA Controller for transferring the 512 byte subpages
 210 * instead of doing readl() / writel() in a loop slows it down significantly.
 211 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
 212 *
 213 * - readl() of 128 x 32 bits in a loop: ~20us
 214 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
 215 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
 216 *
 217 * This applies to the transfer itself. In the DMA case: only the
 218 * wait_for_completion() (DMA setup _not_ included).
 219 *
 220 * Note that the 512 bytes subpage transfer is done directly from/to a
 221 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
 222 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
 223 * controller transferring data between its internal buffer to/from the NAND
 224 * chip.)
 225 *
 226 * Therefore, using the PL080 DMA is disabled by default, for now.
 227 *
 228 */
 229static int use_dma;
 230
 231static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
 232{
 233        uint32_t clkrate, tmp;
 234
 235        /* Reset MLC controller */
 236        writel(MLCCMD_RESET, MLC_CMD(host->io_base));
 237        udelay(1000);
 238
 239        /* Get base clock for MLC block */
 240        clkrate = clk_get_rate(host->clk);
 241        if (clkrate == 0)
 242                clkrate = 104000000;
 243
 244        /* Unlock MLC_ICR
 245         * (among others, will be locked again automatically) */
 246        writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
 247
 248        /* Configure MLC Controller: Large Block, 5 Byte Address */
 249        tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
 250        writel(tmp, MLC_ICR(host->io_base));
 251
 252        /* Unlock MLC_TIME_REG
 253         * (among others, will be locked again automatically) */
 254        writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
 255
 256        /* Compute clock setup values, see LPC and NAND manual */
 257        tmp = 0;
 258        tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
 259        tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
 260        tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
 261        tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
 262        tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
 263        tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
 264        tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
 265        writel(tmp, MLC_TIME_REG(host->io_base));
 266
 267        /* Enable IRQ for CONTROLLER_READY and NAND_READY */
 268        writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
 269                        MLC_IRQ_MR(host->io_base));
 270
 271        /* Normal nCE operation: nCE controlled by controller */
 272        writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
 273}
 274
 275/*
 276 * Hardware specific access to control lines
 277 */
 278static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
 279                                  unsigned int ctrl)
 280{
 281        struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
 282
 283        if (cmd != NAND_CMD_NONE) {
 284                if (ctrl & NAND_CLE)
 285                        writel(cmd, MLC_CMD(host->io_base));
 286                else
 287                        writel(cmd, MLC_ADDR(host->io_base));
 288        }
 289}
 290
 291/*
 292 * Read Device Ready (NAND device _and_ controller ready)
 293 */
 294static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
 295{
 296        struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
 297
 298        if ((readb(MLC_ISR(host->io_base)) &
 299             (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
 300            (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
 301                return  1;
 302
 303        return 0;
 304}
 305
 306static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
 307{
 308        uint8_t sr;
 309
 310        /* Clear interrupt flag by reading status */
 311        sr = readb(MLC_IRQ_SR(host->io_base));
 312        if (sr & MLCIRQ_NAND_READY)
 313                complete(&host->comp_nand);
 314        if (sr & MLCIRQ_CONTROLLER_READY)
 315                complete(&host->comp_controller);
 316
 317        return IRQ_HANDLED;
 318}
 319
 320static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
 321{
 322        struct mtd_info *mtd = nand_to_mtd(chip);
 323        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 324
 325        if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
 326                goto exit;
 327
 328        wait_for_completion(&host->comp_nand);
 329
 330        while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
 331                /* Seems to be delayed sometimes by controller */
 332                dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
 333                cpu_relax();
 334        }
 335
 336exit:
 337        return NAND_STATUS_READY;
 338}
 339
 340static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
 341{
 342        struct mtd_info *mtd = nand_to_mtd(chip);
 343        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 344
 345        if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
 346                goto exit;
 347
 348        wait_for_completion(&host->comp_controller);
 349
 350        while (!(readb(MLC_ISR(host->io_base)) &
 351                 MLCISR_CONTROLLER_READY)) {
 352                dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
 353                cpu_relax();
 354        }
 355
 356exit:
 357        return NAND_STATUS_READY;
 358}
 359
 360static int lpc32xx_waitfunc(struct nand_chip *chip)
 361{
 362        lpc32xx_waitfunc_nand(chip);
 363        lpc32xx_waitfunc_controller(chip);
 364
 365        return NAND_STATUS_READY;
 366}
 367
 368/*
 369 * Enable NAND write protect
 370 */
 371static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
 372{
 373        if (gpio_is_valid(host->ncfg->wp_gpio))
 374                gpio_set_value(host->ncfg->wp_gpio, 0);
 375}
 376
 377/*
 378 * Disable NAND write protect
 379 */
 380static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
 381{
 382        if (gpio_is_valid(host->ncfg->wp_gpio))
 383                gpio_set_value(host->ncfg->wp_gpio, 1);
 384}
 385
 386static void lpc32xx_dma_complete_func(void *completion)
 387{
 388        complete(completion);
 389}
 390
 391static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
 392                            enum dma_transfer_direction dir)
 393{
 394        struct nand_chip *chip = mtd_to_nand(mtd);
 395        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 396        struct dma_async_tx_descriptor *desc;
 397        int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 398        int res;
 399
 400        sg_init_one(&host->sgl, mem, len);
 401
 402        res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
 403                         DMA_BIDIRECTIONAL);
 404        if (res != 1) {
 405                dev_err(mtd->dev.parent, "Failed to map sg list\n");
 406                return -ENXIO;
 407        }
 408        desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
 409                                       flags);
 410        if (!desc) {
 411                dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
 412                goto out1;
 413        }
 414
 415        init_completion(&host->comp_dma);
 416        desc->callback = lpc32xx_dma_complete_func;
 417        desc->callback_param = &host->comp_dma;
 418
 419        dmaengine_submit(desc);
 420        dma_async_issue_pending(host->dma_chan);
 421
 422        wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
 423
 424        dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 425                     DMA_BIDIRECTIONAL);
 426        return 0;
 427out1:
 428        dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 429                     DMA_BIDIRECTIONAL);
 430        return -ENXIO;
 431}
 432
 433static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
 434                             int oob_required, int page)
 435{
 436        struct mtd_info *mtd = nand_to_mtd(chip);
 437        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 438        int i, j;
 439        uint8_t *oobbuf = chip->oob_poi;
 440        uint32_t mlc_isr;
 441        int res;
 442        uint8_t *dma_buf;
 443        bool dma_mapped;
 444
 445        if ((void *)buf <= high_memory) {
 446                dma_buf = buf;
 447                dma_mapped = true;
 448        } else {
 449                dma_buf = host->dma_buf;
 450                dma_mapped = false;
 451        }
 452
 453        /* Writing Command and Address */
 454        nand_read_page_op(chip, page, 0, NULL, 0);
 455
 456        /* For all sub-pages */
 457        for (i = 0; i < host->mlcsubpages; i++) {
 458                /* Start Auto Decode Command */
 459                writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
 460
 461                /* Wait for Controller Ready */
 462                lpc32xx_waitfunc_controller(chip);
 463
 464                /* Check ECC Error status */
 465                mlc_isr = readl(MLC_ISR(host->io_base));
 466                if (mlc_isr & MLCISR_DECODER_FAILURE) {
 467                        mtd->ecc_stats.failed++;
 468                        dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
 469                } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
 470                        mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
 471                }
 472
 473                /* Read 512 + 16 Bytes */
 474                if (use_dma) {
 475                        res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
 476                                               DMA_DEV_TO_MEM);
 477                        if (res)
 478                                return res;
 479                } else {
 480                        for (j = 0; j < (512 >> 2); j++) {
 481                                *((uint32_t *)(buf)) =
 482                                        readl(MLC_BUFF(host->io_base));
 483                                buf += 4;
 484                        }
 485                }
 486                for (j = 0; j < (16 >> 2); j++) {
 487                        *((uint32_t *)(oobbuf)) =
 488                                readl(MLC_BUFF(host->io_base));
 489                        oobbuf += 4;
 490                }
 491        }
 492
 493        if (use_dma && !dma_mapped)
 494                memcpy(buf, dma_buf, mtd->writesize);
 495
 496        return 0;
 497}
 498
 499static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
 500                                       const uint8_t *buf, int oob_required,
 501                                       int page)
 502{
 503        struct mtd_info *mtd = nand_to_mtd(chip);
 504        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 505        const uint8_t *oobbuf = chip->oob_poi;
 506        uint8_t *dma_buf = (uint8_t *)buf;
 507        int res;
 508        int i, j;
 509
 510        if (use_dma && (void *)buf >= high_memory) {
 511                dma_buf = host->dma_buf;
 512                memcpy(dma_buf, buf, mtd->writesize);
 513        }
 514
 515        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 516
 517        for (i = 0; i < host->mlcsubpages; i++) {
 518                /* Start Encode */
 519                writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
 520
 521                /* Write 512 + 6 Bytes to Buffer */
 522                if (use_dma) {
 523                        res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
 524                                               DMA_MEM_TO_DEV);
 525                        if (res)
 526                                return res;
 527                } else {
 528                        for (j = 0; j < (512 >> 2); j++) {
 529                                writel(*((uint32_t *)(buf)),
 530                                       MLC_BUFF(host->io_base));
 531                                buf += 4;
 532                        }
 533                }
 534                writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
 535                oobbuf += 4;
 536                writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
 537                oobbuf += 12;
 538
 539                /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
 540                writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
 541
 542                /* Wait for Controller Ready */
 543                lpc32xx_waitfunc_controller(chip);
 544        }
 545
 546        return nand_prog_page_end_op(chip);
 547}
 548
 549static int lpc32xx_read_oob(struct nand_chip *chip, int page)
 550{
 551        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 552
 553        /* Read whole page - necessary with MLC controller! */
 554        lpc32xx_read_page(chip, host->dummy_buf, 1, page);
 555
 556        return 0;
 557}
 558
 559static int lpc32xx_write_oob(struct nand_chip *chip, int page)
 560{
 561        /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
 562        return 0;
 563}
 564
 565/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
 566static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
 567{
 568        /* Always enabled! */
 569}
 570
 571static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
 572{
 573        struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
 574        dma_cap_mask_t mask;
 575
 576        if (!host->pdata || !host->pdata->dma_filter) {
 577                dev_err(mtd->dev.parent, "no DMA platform data\n");
 578                return -ENOENT;
 579        }
 580
 581        dma_cap_zero(mask);
 582        dma_cap_set(DMA_SLAVE, mask);
 583        host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
 584                                             "nand-mlc");
 585        if (!host->dma_chan) {
 586                dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
 587                return -EBUSY;
 588        }
 589
 590        /*
 591         * Set direction to a sensible value even if the dmaengine driver
 592         * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
 593         * driver criticizes it as "alien transfer direction".
 594         */
 595        host->dma_slave_config.direction = DMA_DEV_TO_MEM;
 596        host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 597        host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 598        host->dma_slave_config.src_maxburst = 128;
 599        host->dma_slave_config.dst_maxburst = 128;
 600        /* DMA controller does flow control: */
 601        host->dma_slave_config.device_fc = false;
 602        host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
 603        host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
 604        if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
 605                dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
 606                goto out1;
 607        }
 608
 609        return 0;
 610out1:
 611        dma_release_channel(host->dma_chan);
 612        return -ENXIO;
 613}
 614
 615static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
 616{
 617        struct lpc32xx_nand_cfg_mlc *ncfg;
 618        struct device_node *np = dev->of_node;
 619
 620        ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
 621        if (!ncfg)
 622                return NULL;
 623
 624        of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
 625        of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
 626        of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
 627        of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
 628        of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
 629        of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
 630        of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
 631
 632        if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
 633            !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
 634            !ncfg->wr_low) {
 635                dev_err(dev, "chip parameters not specified correctly\n");
 636                return NULL;
 637        }
 638
 639        ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
 640
 641        return ncfg;
 642}
 643
 644static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
 645{
 646        struct mtd_info *mtd = nand_to_mtd(chip);
 647        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 648        struct device *dev = &host->pdev->dev;
 649
 650        if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 651                return 0;
 652
 653        host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
 654        if (!host->dma_buf)
 655                return -ENOMEM;
 656
 657        host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
 658        if (!host->dummy_buf)
 659                return -ENOMEM;
 660
 661        chip->ecc.size = 512;
 662        chip->ecc.hwctl = lpc32xx_ecc_enable;
 663        chip->ecc.read_page_raw = lpc32xx_read_page;
 664        chip->ecc.read_page = lpc32xx_read_page;
 665        chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
 666        chip->ecc.write_page = lpc32xx_write_page_lowlevel;
 667        chip->ecc.write_oob = lpc32xx_write_oob;
 668        chip->ecc.read_oob = lpc32xx_read_oob;
 669        chip->ecc.strength = 4;
 670        chip->ecc.bytes = 10;
 671
 672        mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 673        host->mlcsubpages = mtd->writesize / 512;
 674
 675        return 0;
 676}
 677
 678static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
 679        .attach_chip = lpc32xx_nand_attach_chip,
 680};
 681
 682/*
 683 * Probe for NAND controller
 684 */
 685static int lpc32xx_nand_probe(struct platform_device *pdev)
 686{
 687        struct lpc32xx_nand_host *host;
 688        struct mtd_info *mtd;
 689        struct nand_chip *nand_chip;
 690        struct resource *rc;
 691        int res;
 692
 693        /* Allocate memory for the device structure (and zero it) */
 694        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 695        if (!host)
 696                return -ENOMEM;
 697
 698        host->pdev = pdev;
 699
 700        rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 701        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
 702        if (IS_ERR(host->io_base))
 703                return PTR_ERR(host->io_base);
 704
 705        host->io_base_phy = rc->start;
 706
 707        nand_chip = &host->nand_chip;
 708        mtd = nand_to_mtd(nand_chip);
 709        if (pdev->dev.of_node)
 710                host->ncfg = lpc32xx_parse_dt(&pdev->dev);
 711        if (!host->ncfg) {
 712                dev_err(&pdev->dev,
 713                        "Missing or bad NAND config from device tree\n");
 714                return -ENOENT;
 715        }
 716        if (host->ncfg->wp_gpio == -EPROBE_DEFER)
 717                return -EPROBE_DEFER;
 718        if (gpio_is_valid(host->ncfg->wp_gpio) &&
 719                        gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
 720                dev_err(&pdev->dev, "GPIO not available\n");
 721                return -EBUSY;
 722        }
 723        lpc32xx_wp_disable(host);
 724
 725        host->pdata = dev_get_platdata(&pdev->dev);
 726
 727        /* link the private data structures */
 728        nand_set_controller_data(nand_chip, host);
 729        nand_set_flash_node(nand_chip, pdev->dev.of_node);
 730        mtd->dev.parent = &pdev->dev;
 731
 732        /* Get NAND clock */
 733        host->clk = clk_get(&pdev->dev, NULL);
 734        if (IS_ERR(host->clk)) {
 735                dev_err(&pdev->dev, "Clock initialization failure\n");
 736                res = -ENOENT;
 737                goto free_gpio;
 738        }
 739        res = clk_prepare_enable(host->clk);
 740        if (res)
 741                goto put_clk;
 742
 743        nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
 744        nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
 745        nand_chip->legacy.chip_delay = 25; /* us */
 746        nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
 747        nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
 748
 749        /* Init NAND controller */
 750        lpc32xx_nand_setup(host);
 751
 752        platform_set_drvdata(pdev, host);
 753
 754        /* Initialize function pointers */
 755        nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
 756
 757        nand_chip->options = NAND_NO_SUBPAGE_WRITE;
 758        nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
 759        nand_chip->bbt_td = &lpc32xx_nand_bbt;
 760        nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
 761
 762        if (use_dma) {
 763                res = lpc32xx_dma_setup(host);
 764                if (res) {
 765                        res = -EIO;
 766                        goto unprepare_clk;
 767                }
 768        }
 769
 770        /* initially clear interrupt status */
 771        readb(MLC_IRQ_SR(host->io_base));
 772
 773        init_completion(&host->comp_nand);
 774        init_completion(&host->comp_controller);
 775
 776        host->irq = platform_get_irq(pdev, 0);
 777        if (host->irq < 0) {
 778                res = -EINVAL;
 779                goto release_dma_chan;
 780        }
 781
 782        if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
 783                        IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
 784                dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
 785                res = -ENXIO;
 786                goto release_dma_chan;
 787        }
 788
 789        /*
 790         * Scan to find existence of the device and get the type of NAND device:
 791         * SMALL block or LARGE block.
 792         */
 793        nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
 794        res = nand_scan(nand_chip, 1);
 795        if (res)
 796                goto free_irq;
 797
 798        mtd->name = DRV_NAME;
 799
 800        res = mtd_device_register(mtd, host->ncfg->parts,
 801                                  host->ncfg->num_parts);
 802        if (res)
 803                goto cleanup_nand;
 804
 805        return 0;
 806
 807cleanup_nand:
 808        nand_cleanup(nand_chip);
 809free_irq:
 810        free_irq(host->irq, host);
 811release_dma_chan:
 812        if (use_dma)
 813                dma_release_channel(host->dma_chan);
 814unprepare_clk:
 815        clk_disable_unprepare(host->clk);
 816put_clk:
 817        clk_put(host->clk);
 818free_gpio:
 819        lpc32xx_wp_enable(host);
 820        gpio_free(host->ncfg->wp_gpio);
 821
 822        return res;
 823}
 824
 825/*
 826 * Remove NAND device
 827 */
 828static int lpc32xx_nand_remove(struct platform_device *pdev)
 829{
 830        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 831        struct nand_chip *chip = &host->nand_chip;
 832        int ret;
 833
 834        ret = mtd_device_unregister(nand_to_mtd(chip));
 835        WARN_ON(ret);
 836        nand_cleanup(chip);
 837
 838        free_irq(host->irq, host);
 839        if (use_dma)
 840                dma_release_channel(host->dma_chan);
 841
 842        clk_disable_unprepare(host->clk);
 843        clk_put(host->clk);
 844
 845        lpc32xx_wp_enable(host);
 846        gpio_free(host->ncfg->wp_gpio);
 847
 848        return 0;
 849}
 850
 851#ifdef CONFIG_PM
 852static int lpc32xx_nand_resume(struct platform_device *pdev)
 853{
 854        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 855        int ret;
 856
 857        /* Re-enable NAND clock */
 858        ret = clk_prepare_enable(host->clk);
 859        if (ret)
 860                return ret;
 861
 862        /* Fresh init of NAND controller */
 863        lpc32xx_nand_setup(host);
 864
 865        /* Disable write protect */
 866        lpc32xx_wp_disable(host);
 867
 868        return 0;
 869}
 870
 871static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
 872{
 873        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 874
 875        /* Enable write protect for safety */
 876        lpc32xx_wp_enable(host);
 877
 878        /* Disable clock */
 879        clk_disable_unprepare(host->clk);
 880        return 0;
 881}
 882
 883#else
 884#define lpc32xx_nand_resume NULL
 885#define lpc32xx_nand_suspend NULL
 886#endif
 887
 888static const struct of_device_id lpc32xx_nand_match[] = {
 889        { .compatible = "nxp,lpc3220-mlc" },
 890        { /* sentinel */ },
 891};
 892MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
 893
 894static struct platform_driver lpc32xx_nand_driver = {
 895        .probe          = lpc32xx_nand_probe,
 896        .remove         = lpc32xx_nand_remove,
 897        .resume         = lpc32xx_nand_resume,
 898        .suspend        = lpc32xx_nand_suspend,
 899        .driver         = {
 900                .name   = DRV_NAME,
 901                .of_match_table = lpc32xx_nand_match,
 902        },
 903};
 904
 905module_platform_driver(lpc32xx_nand_driver);
 906
 907MODULE_LICENSE("GPL");
 908MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
 909MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
 910