linux/drivers/mtd/nand/pxa3xx_nand.c
<<
>>
Prefs
   1/*
   2 * drivers/mtd/nand/pxa3xx_nand.c
   3 *
   4 * Copyright © 2005 Intel Corporation
   5 * Copyright © 2006 Marvell International Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/platform_device.h>
  18#include <linux/dmaengine.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/dma/pxa-dma.h>
  21#include <linux/delay.h>
  22#include <linux/clk.h>
  23#include <linux/mtd/mtd.h>
  24#include <linux/mtd/rawnand.h>
  25#include <linux/mtd/partitions.h>
  26#include <linux/io.h>
  27#include <linux/iopoll.h>
  28#include <linux/irq.h>
  29#include <linux/slab.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/platform_data/mtd-nand-pxa3xx.h>
  33
  34#define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
  35#define NAND_STOP_DELAY         msecs_to_jiffies(40)
  36#define PAGE_CHUNK_SIZE         (2048)
  37
  38/*
  39 * Define a buffer size for the initial command that detects the flash device:
  40 * STATUS, READID and PARAM.
  41 * ONFI param page is 256 bytes, and there are three redundant copies
  42 * to be read. JEDEC param page is 512 bytes, and there are also three
  43 * redundant copies to be read.
  44 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
  45 */
  46#define INIT_BUFFER_SIZE        2048
  47
  48/* registers and bit definitions */
  49#define NDCR            (0x00) /* Control register */
  50#define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
  51#define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
  52#define NDSR            (0x14) /* Status Register */
  53#define NDPCR           (0x18) /* Page Count Register */
  54#define NDBDR0          (0x1C) /* Bad Block Register 0 */
  55#define NDBDR1          (0x20) /* Bad Block Register 1 */
  56#define NDECCCTRL       (0x28) /* ECC control */
  57#define NDDB            (0x40) /* Data Buffer */
  58#define NDCB0           (0x48) /* Command Buffer0 */
  59#define NDCB1           (0x4C) /* Command Buffer1 */
  60#define NDCB2           (0x50) /* Command Buffer2 */
  61
  62#define NDCR_SPARE_EN           (0x1 << 31)
  63#define NDCR_ECC_EN             (0x1 << 30)
  64#define NDCR_DMA_EN             (0x1 << 29)
  65#define NDCR_ND_RUN             (0x1 << 28)
  66#define NDCR_DWIDTH_C           (0x1 << 27)
  67#define NDCR_DWIDTH_M           (0x1 << 26)
  68#define NDCR_PAGE_SZ            (0x1 << 24)
  69#define NDCR_NCSX               (0x1 << 23)
  70#define NDCR_ND_MODE            (0x3 << 21)
  71#define NDCR_NAND_MODE          (0x0)
  72#define NDCR_CLR_PG_CNT         (0x1 << 20)
  73#define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
  74#define NFCV2_NDCR_STOP_ON_UNCOR        (0x1 << 19)
  75#define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
  76#define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
  77
  78#define NDCR_RA_START           (0x1 << 15)
  79#define NDCR_PG_PER_BLK         (0x1 << 14)
  80#define NDCR_ND_ARB_EN          (0x1 << 12)
  81#define NDCR_INT_MASK           (0xFFF)
  82
  83#define NDSR_MASK               (0xfff)
  84#define NDSR_ERR_CNT_OFF        (16)
  85#define NDSR_ERR_CNT_MASK       (0x1f)
  86#define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
  87#define NDSR_RDY                (0x1 << 12)
  88#define NDSR_FLASH_RDY          (0x1 << 11)
  89#define NDSR_CS0_PAGED          (0x1 << 10)
  90#define NDSR_CS1_PAGED          (0x1 << 9)
  91#define NDSR_CS0_CMDD           (0x1 << 8)
  92#define NDSR_CS1_CMDD           (0x1 << 7)
  93#define NDSR_CS0_BBD            (0x1 << 6)
  94#define NDSR_CS1_BBD            (0x1 << 5)
  95#define NDSR_UNCORERR           (0x1 << 4)
  96#define NDSR_CORERR             (0x1 << 3)
  97#define NDSR_WRDREQ             (0x1 << 2)
  98#define NDSR_RDDREQ             (0x1 << 1)
  99#define NDSR_WRCMDREQ           (0x1)
 100
 101#define NDCB0_LEN_OVRD          (0x1 << 28)
 102#define NDCB0_ST_ROW_EN         (0x1 << 26)
 103#define NDCB0_AUTO_RS           (0x1 << 25)
 104#define NDCB0_CSEL              (0x1 << 24)
 105#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
 106#define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
 107#define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
 108#define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
 109#define NDCB0_NC                (0x1 << 20)
 110#define NDCB0_DBC               (0x1 << 19)
 111#define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
 112#define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
 113#define NDCB0_CMD2_MASK         (0xff << 8)
 114#define NDCB0_CMD1_MASK         (0xff)
 115#define NDCB0_ADDR_CYC_SHIFT    (16)
 116
 117#define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
 118#define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
 119#define EXT_CMD_TYPE_READ       4 /* Read */
 120#define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
 121#define EXT_CMD_TYPE_FINAL      3 /* Final command */
 122#define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
 123#define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
 124
 125/*
 126 * This should be large enough to read 'ONFI' and 'JEDEC'.
 127 * Let's use 7 bytes, which is the maximum ID count supported
 128 * by the controller (see NDCR_RD_ID_CNT_MASK).
 129 */
 130#define READ_ID_BYTES           7
 131
 132/* macros for registers read/write */
 133#define nand_writel(info, off, val)                                     \
 134        do {                                                            \
 135                dev_vdbg(&info->pdev->dev,                              \
 136                         "%s():%d nand_writel(0x%x, 0x%04x)\n",         \
 137                         __func__, __LINE__, (val), (off));             \
 138                writel_relaxed((val), (info)->mmio_base + (off));       \
 139        } while (0)
 140
 141#define nand_readl(info, off)                                           \
 142        ({                                                              \
 143                unsigned int _v;                                        \
 144                _v = readl_relaxed((info)->mmio_base + (off));          \
 145                dev_vdbg(&info->pdev->dev,                              \
 146                         "%s():%d nand_readl(0x%04x) = 0x%x\n",         \
 147                         __func__, __LINE__, (off), _v);                \
 148                _v;                                                     \
 149        })
 150
 151/* error code and state */
 152enum {
 153        ERR_NONE        = 0,
 154        ERR_DMABUSERR   = -1,
 155        ERR_SENDCMD     = -2,
 156        ERR_UNCORERR    = -3,
 157        ERR_BBERR       = -4,
 158        ERR_CORERR      = -5,
 159};
 160
 161enum {
 162        STATE_IDLE = 0,
 163        STATE_PREPARED,
 164        STATE_CMD_HANDLE,
 165        STATE_DMA_READING,
 166        STATE_DMA_WRITING,
 167        STATE_DMA_DONE,
 168        STATE_PIO_READING,
 169        STATE_PIO_WRITING,
 170        STATE_CMD_DONE,
 171        STATE_READY,
 172};
 173
 174enum pxa3xx_nand_variant {
 175        PXA3XX_NAND_VARIANT_PXA,
 176        PXA3XX_NAND_VARIANT_ARMADA370,
 177};
 178
 179struct pxa3xx_nand_host {
 180        struct nand_chip        chip;
 181        void                    *info_data;
 182
 183        /* page size of attached chip */
 184        int                     use_ecc;
 185        int                     cs;
 186
 187        /* calculated from pxa3xx_nand_flash data */
 188        unsigned int            col_addr_cycles;
 189        unsigned int            row_addr_cycles;
 190};
 191
 192struct pxa3xx_nand_info {
 193        struct nand_hw_control  controller;
 194        struct platform_device   *pdev;
 195
 196        struct clk              *clk;
 197        void __iomem            *mmio_base;
 198        unsigned long           mmio_phys;
 199        struct completion       cmd_complete, dev_ready;
 200
 201        unsigned int            buf_start;
 202        unsigned int            buf_count;
 203        unsigned int            buf_size;
 204        unsigned int            data_buff_pos;
 205        unsigned int            oob_buff_pos;
 206
 207        /* DMA information */
 208        struct scatterlist      sg;
 209        enum dma_data_direction dma_dir;
 210        struct dma_chan         *dma_chan;
 211        dma_cookie_t            dma_cookie;
 212        int                     drcmr_dat;
 213
 214        unsigned char           *data_buff;
 215        unsigned char           *oob_buff;
 216        dma_addr_t              data_buff_phys;
 217        int                     data_dma_ch;
 218
 219        struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
 220        unsigned int            state;
 221
 222        /*
 223         * This driver supports NFCv1 (as found in PXA SoC)
 224         * and NFCv2 (as found in Armada 370/XP SoC).
 225         */
 226        enum pxa3xx_nand_variant variant;
 227
 228        int                     cs;
 229        int                     use_ecc;        /* use HW ECC ? */
 230        int                     ecc_bch;        /* using BCH ECC? */
 231        int                     use_dma;        /* use DMA ? */
 232        int                     use_spare;      /* use spare ? */
 233        int                     need_wait;
 234
 235        /* Amount of real data per full chunk */
 236        unsigned int            chunk_size;
 237
 238        /* Amount of spare data per full chunk */
 239        unsigned int            spare_size;
 240
 241        /* Number of full chunks (i.e chunk_size + spare_size) */
 242        unsigned int            nfullchunks;
 243
 244        /*
 245         * Total number of chunks. If equal to nfullchunks, then there
 246         * are only full chunks. Otherwise, there is one last chunk of
 247         * size (last_chunk_size + last_spare_size)
 248         */
 249        unsigned int            ntotalchunks;
 250
 251        /* Amount of real data in the last chunk */
 252        unsigned int            last_chunk_size;
 253
 254        /* Amount of spare data in the last chunk */
 255        unsigned int            last_spare_size;
 256
 257        unsigned int            ecc_size;
 258        unsigned int            ecc_err_cnt;
 259        unsigned int            max_bitflips;
 260        int                     retcode;
 261
 262        /*
 263         * Variables only valid during command
 264         * execution. step_chunk_size and step_spare_size is the
 265         * amount of real data and spare data in the current
 266         * chunk. cur_chunk is the current chunk being
 267         * read/programmed.
 268         */
 269        unsigned int            step_chunk_size;
 270        unsigned int            step_spare_size;
 271        unsigned int            cur_chunk;
 272
 273        /* cached register value */
 274        uint32_t                reg_ndcr;
 275        uint32_t                ndtr0cs0;
 276        uint32_t                ndtr1cs0;
 277
 278        /* generated NDCBx register values */
 279        uint32_t                ndcb0;
 280        uint32_t                ndcb1;
 281        uint32_t                ndcb2;
 282        uint32_t                ndcb3;
 283};
 284
 285static bool use_dma = 1;
 286module_param(use_dma, bool, 0444);
 287MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
 288
 289struct pxa3xx_nand_timing {
 290        unsigned int    tCH;  /* Enable signal hold time */
 291        unsigned int    tCS;  /* Enable signal setup time */
 292        unsigned int    tWH;  /* ND_nWE high duration */
 293        unsigned int    tWP;  /* ND_nWE pulse time */
 294        unsigned int    tRH;  /* ND_nRE high duration */
 295        unsigned int    tRP;  /* ND_nRE pulse width */
 296        unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
 297        unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
 298        unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
 299};
 300
 301struct pxa3xx_nand_flash {
 302        uint32_t        chip_id;
 303        unsigned int    flash_width;    /* Width of Flash memory (DWIDTH_M) */
 304        unsigned int    dfc_width;      /* Width of flash controller(DWIDTH_C) */
 305        struct pxa3xx_nand_timing *timing;      /* NAND Flash timing */
 306};
 307
 308static struct pxa3xx_nand_timing timing[] = {
 309        { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
 310        { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
 311        { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
 312        { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
 313};
 314
 315static struct pxa3xx_nand_flash builtin_flash_types[] = {
 316        { 0x46ec, 16, 16, &timing[1] },
 317        { 0xdaec,  8,  8, &timing[1] },
 318        { 0xd7ec,  8,  8, &timing[1] },
 319        { 0xa12c,  8,  8, &timing[2] },
 320        { 0xb12c, 16, 16, &timing[2] },
 321        { 0xdc2c,  8,  8, &timing[2] },
 322        { 0xcc2c, 16, 16, &timing[2] },
 323        { 0xba20, 16, 16, &timing[3] },
 324};
 325
 326static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
 327                                struct mtd_oob_region *oobregion)
 328{
 329        struct nand_chip *chip = mtd_to_nand(mtd);
 330        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
 331        struct pxa3xx_nand_info *info = host->info_data;
 332        int nchunks = mtd->writesize / info->chunk_size;
 333
 334        if (section >= nchunks)
 335                return -ERANGE;
 336
 337        oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
 338                            info->spare_size;
 339        oobregion->length = info->ecc_size;
 340
 341        return 0;
 342}
 343
 344static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
 345                                 struct mtd_oob_region *oobregion)
 346{
 347        struct nand_chip *chip = mtd_to_nand(mtd);
 348        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
 349        struct pxa3xx_nand_info *info = host->info_data;
 350        int nchunks = mtd->writesize / info->chunk_size;
 351
 352        if (section >= nchunks)
 353                return -ERANGE;
 354
 355        if (!info->spare_size)
 356                return 0;
 357
 358        oobregion->offset = section * (info->ecc_size + info->spare_size);
 359        oobregion->length = info->spare_size;
 360        if (!section) {
 361                /*
 362                 * Bootrom looks in bytes 0 & 5 for bad blocks for the
 363                 * 4KB page / 4bit BCH combination.
 364                 */
 365                if (mtd->writesize == 4096 && info->chunk_size == 2048) {
 366                        oobregion->offset += 6;
 367                        oobregion->length -= 6;
 368                } else {
 369                        oobregion->offset += 2;
 370                        oobregion->length -= 2;
 371                }
 372        }
 373
 374        return 0;
 375}
 376
 377static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
 378        .ecc = pxa3xx_ooblayout_ecc,
 379        .free = pxa3xx_ooblayout_free,
 380};
 381
 382static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
 383static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
 384
 385static struct nand_bbt_descr bbt_main_descr = {
 386        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 387                | NAND_BBT_2BIT | NAND_BBT_VERSION,
 388        .offs = 8,
 389        .len = 6,
 390        .veroffs = 14,
 391        .maxblocks = 8,         /* Last 8 blocks in each chip */
 392        .pattern = bbt_pattern
 393};
 394
 395static struct nand_bbt_descr bbt_mirror_descr = {
 396        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 397                | NAND_BBT_2BIT | NAND_BBT_VERSION,
 398        .offs = 8,
 399        .len = 6,
 400        .veroffs = 14,
 401        .maxblocks = 8,         /* Last 8 blocks in each chip */
 402        .pattern = bbt_mirror_pattern
 403};
 404
 405#define NDTR0_tCH(c)    (min((c), 7) << 19)
 406#define NDTR0_tCS(c)    (min((c), 7) << 16)
 407#define NDTR0_tWH(c)    (min((c), 7) << 11)
 408#define NDTR0_tWP(c)    (min((c), 7) << 8)
 409#define NDTR0_tRH(c)    (min((c), 7) << 3)
 410#define NDTR0_tRP(c)    (min((c), 7) << 0)
 411
 412#define NDTR1_tR(c)     (min((c), 65535) << 16)
 413#define NDTR1_tWHR(c)   (min((c), 15) << 4)
 414#define NDTR1_tAR(c)    (min((c), 15) << 0)
 415
 416/* convert nano-seconds to nand flash controller clock cycles */
 417#define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
 418
 419static const struct of_device_id pxa3xx_nand_dt_ids[] = {
 420        {
 421                .compatible = "marvell,pxa3xx-nand",
 422                .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
 423        },
 424        {
 425                .compatible = "marvell,armada370-nand",
 426                .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
 427        },
 428        {}
 429};
 430MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
 431
 432static enum pxa3xx_nand_variant
 433pxa3xx_nand_get_variant(struct platform_device *pdev)
 434{
 435        const struct of_device_id *of_id =
 436                        of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
 437        if (!of_id)
 438                return PXA3XX_NAND_VARIANT_PXA;
 439        return (enum pxa3xx_nand_variant)of_id->data;
 440}
 441
 442static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
 443                                   const struct pxa3xx_nand_timing *t)
 444{
 445        struct pxa3xx_nand_info *info = host->info_data;
 446        unsigned long nand_clk = clk_get_rate(info->clk);
 447        uint32_t ndtr0, ndtr1;
 448
 449        ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
 450                NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
 451                NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
 452                NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
 453                NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
 454                NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
 455
 456        ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
 457                NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
 458                NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
 459
 460        info->ndtr0cs0 = ndtr0;
 461        info->ndtr1cs0 = ndtr1;
 462        nand_writel(info, NDTR0CS0, ndtr0);
 463        nand_writel(info, NDTR1CS0, ndtr1);
 464}
 465
 466static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
 467                                       const struct nand_sdr_timings *t)
 468{
 469        struct pxa3xx_nand_info *info = host->info_data;
 470        struct nand_chip *chip = &host->chip;
 471        unsigned long nand_clk = clk_get_rate(info->clk);
 472        uint32_t ndtr0, ndtr1;
 473
 474        u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
 475        u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
 476        u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
 477        u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
 478        u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
 479        u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
 480        u32 tR = chip->chip_delay * 1000;
 481        u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
 482        u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
 483
 484        /* fallback to a default value if tR = 0 */
 485        if (!tR)
 486                tR = 20000;
 487
 488        ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
 489                NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
 490                NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
 491                NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
 492                NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
 493                NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
 494
 495        ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
 496                NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
 497                NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
 498
 499        info->ndtr0cs0 = ndtr0;
 500        info->ndtr1cs0 = ndtr1;
 501        nand_writel(info, NDTR0CS0, ndtr0);
 502        nand_writel(info, NDTR1CS0, ndtr1);
 503}
 504
 505static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
 506                                           unsigned int *flash_width,
 507                                           unsigned int *dfc_width)
 508{
 509        struct nand_chip *chip = &host->chip;
 510        struct pxa3xx_nand_info *info = host->info_data;
 511        const struct pxa3xx_nand_flash *f = NULL;
 512        struct mtd_info *mtd = nand_to_mtd(&host->chip);
 513        int i, id, ntypes;
 514
 515        ntypes = ARRAY_SIZE(builtin_flash_types);
 516
 517        chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
 518
 519        id = chip->read_byte(mtd);
 520        id |= chip->read_byte(mtd) << 0x8;
 521
 522        for (i = 0; i < ntypes; i++) {
 523                f = &builtin_flash_types[i];
 524
 525                if (f->chip_id == id)
 526                        break;
 527        }
 528
 529        if (i == ntypes) {
 530                dev_err(&info->pdev->dev, "Error: timings not found\n");
 531                return -EINVAL;
 532        }
 533
 534        pxa3xx_nand_set_timing(host, f->timing);
 535
 536        *flash_width = f->flash_width;
 537        *dfc_width = f->dfc_width;
 538
 539        return 0;
 540}
 541
 542static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
 543                                         int mode)
 544{
 545        const struct nand_sdr_timings *timings;
 546
 547        mode = fls(mode) - 1;
 548        if (mode < 0)
 549                mode = 0;
 550
 551        timings = onfi_async_timing_mode_to_sdr_timings(mode);
 552        if (IS_ERR(timings))
 553                return PTR_ERR(timings);
 554
 555        pxa3xx_nand_set_sdr_timing(host, timings);
 556
 557        return 0;
 558}
 559
 560static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
 561{
 562        struct nand_chip *chip = &host->chip;
 563        struct pxa3xx_nand_info *info = host->info_data;
 564        unsigned int flash_width = 0, dfc_width = 0;
 565        int mode, err;
 566
 567        mode = onfi_get_async_timing_mode(chip);
 568        if (mode == ONFI_TIMING_MODE_UNKNOWN) {
 569                err = pxa3xx_nand_init_timings_compat(host, &flash_width,
 570                                                      &dfc_width);
 571                if (err)
 572                        return err;
 573
 574                if (flash_width == 16) {
 575                        info->reg_ndcr |= NDCR_DWIDTH_M;
 576                        chip->options |= NAND_BUSWIDTH_16;
 577                }
 578
 579                info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
 580        } else {
 581                err = pxa3xx_nand_init_timings_onfi(host, mode);
 582                if (err)
 583                        return err;
 584        }
 585
 586        return 0;
 587}
 588
 589/**
 590 * NOTE: it is a must to set ND_RUN firstly, then write
 591 * command buffer, otherwise, it does not work.
 592 * We enable all the interrupt at the same time, and
 593 * let pxa3xx_nand_irq to handle all logic.
 594 */
 595static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
 596{
 597        uint32_t ndcr;
 598
 599        ndcr = info->reg_ndcr;
 600
 601        if (info->use_ecc) {
 602                ndcr |= NDCR_ECC_EN;
 603                if (info->ecc_bch)
 604                        nand_writel(info, NDECCCTRL, 0x1);
 605        } else {
 606                ndcr &= ~NDCR_ECC_EN;
 607                if (info->ecc_bch)
 608                        nand_writel(info, NDECCCTRL, 0x0);
 609        }
 610
 611        if (info->use_dma)
 612                ndcr |= NDCR_DMA_EN;
 613        else
 614                ndcr &= ~NDCR_DMA_EN;
 615
 616        if (info->use_spare)
 617                ndcr |= NDCR_SPARE_EN;
 618        else
 619                ndcr &= ~NDCR_SPARE_EN;
 620
 621        ndcr |= NDCR_ND_RUN;
 622
 623        /* clear status bits and run */
 624        nand_writel(info, NDSR, NDSR_MASK);
 625        nand_writel(info, NDCR, 0);
 626        nand_writel(info, NDCR, ndcr);
 627}
 628
 629static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
 630{
 631        uint32_t ndcr;
 632        int timeout = NAND_STOP_DELAY;
 633
 634        /* wait RUN bit in NDCR become 0 */
 635        ndcr = nand_readl(info, NDCR);
 636        while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
 637                ndcr = nand_readl(info, NDCR);
 638                udelay(1);
 639        }
 640
 641        if (timeout <= 0) {
 642                ndcr &= ~NDCR_ND_RUN;
 643                nand_writel(info, NDCR, ndcr);
 644        }
 645        if (info->dma_chan)
 646                dmaengine_terminate_all(info->dma_chan);
 647
 648        /* clear status bits */
 649        nand_writel(info, NDSR, NDSR_MASK);
 650}
 651
 652static void __maybe_unused
 653enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 654{
 655        uint32_t ndcr;
 656
 657        ndcr = nand_readl(info, NDCR);
 658        nand_writel(info, NDCR, ndcr & ~int_mask);
 659}
 660
 661static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 662{
 663        uint32_t ndcr;
 664
 665        ndcr = nand_readl(info, NDCR);
 666        nand_writel(info, NDCR, ndcr | int_mask);
 667}
 668
 669static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
 670{
 671        if (info->ecc_bch) {
 672                u32 val;
 673                int ret;
 674
 675                /*
 676                 * According to the datasheet, when reading from NDDB
 677                 * with BCH enabled, after each 32 bytes reads, we
 678                 * have to make sure that the NDSR.RDDREQ bit is set.
 679                 *
 680                 * Drain the FIFO 8 32 bits reads at a time, and skip
 681                 * the polling on the last read.
 682                 */
 683                while (len > 8) {
 684                        ioread32_rep(info->mmio_base + NDDB, data, 8);
 685
 686                        ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
 687                                                         val & NDSR_RDDREQ, 1000, 5000);
 688                        if (ret) {
 689                                dev_err(&info->pdev->dev,
 690                                        "Timeout on RDDREQ while draining the FIFO\n");
 691                                return;
 692                        }
 693
 694                        data += 32;
 695                        len -= 8;
 696                }
 697        }
 698
 699        ioread32_rep(info->mmio_base + NDDB, data, len);
 700}
 701
 702static void handle_data_pio(struct pxa3xx_nand_info *info)
 703{
 704        switch (info->state) {
 705        case STATE_PIO_WRITING:
 706                if (info->step_chunk_size)
 707                        writesl(info->mmio_base + NDDB,
 708                                info->data_buff + info->data_buff_pos,
 709                                DIV_ROUND_UP(info->step_chunk_size, 4));
 710
 711                if (info->step_spare_size)
 712                        writesl(info->mmio_base + NDDB,
 713                                info->oob_buff + info->oob_buff_pos,
 714                                DIV_ROUND_UP(info->step_spare_size, 4));
 715                break;
 716        case STATE_PIO_READING:
 717                if (info->step_chunk_size)
 718                        drain_fifo(info,
 719                                   info->data_buff + info->data_buff_pos,
 720                                   DIV_ROUND_UP(info->step_chunk_size, 4));
 721
 722                if (info->step_spare_size)
 723                        drain_fifo(info,
 724                                   info->oob_buff + info->oob_buff_pos,
 725                                   DIV_ROUND_UP(info->step_spare_size, 4));
 726                break;
 727        default:
 728                dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 729                                info->state);
 730                BUG();
 731        }
 732
 733        /* Update buffer pointers for multi-page read/write */
 734        info->data_buff_pos += info->step_chunk_size;
 735        info->oob_buff_pos += info->step_spare_size;
 736}
 737
 738static void pxa3xx_nand_data_dma_irq(void *data)
 739{
 740        struct pxa3xx_nand_info *info = data;
 741        struct dma_tx_state state;
 742        enum dma_status status;
 743
 744        status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
 745        if (likely(status == DMA_COMPLETE)) {
 746                info->state = STATE_DMA_DONE;
 747        } else {
 748                dev_err(&info->pdev->dev, "DMA error on data channel\n");
 749                info->retcode = ERR_DMABUSERR;
 750        }
 751        dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
 752
 753        nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
 754        enable_int(info, NDCR_INT_MASK);
 755}
 756
 757static void start_data_dma(struct pxa3xx_nand_info *info)
 758{
 759        enum dma_transfer_direction direction;
 760        struct dma_async_tx_descriptor *tx;
 761
 762        switch (info->state) {
 763        case STATE_DMA_WRITING:
 764                info->dma_dir = DMA_TO_DEVICE;
 765                direction = DMA_MEM_TO_DEV;
 766                break;
 767        case STATE_DMA_READING:
 768                info->dma_dir = DMA_FROM_DEVICE;
 769                direction = DMA_DEV_TO_MEM;
 770                break;
 771        default:
 772                dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 773                                info->state);
 774                BUG();
 775        }
 776        info->sg.length = info->chunk_size;
 777        if (info->use_spare)
 778                info->sg.length += info->spare_size + info->ecc_size;
 779        dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
 780
 781        tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
 782                                     DMA_PREP_INTERRUPT);
 783        if (!tx) {
 784                dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
 785                return;
 786        }
 787        tx->callback = pxa3xx_nand_data_dma_irq;
 788        tx->callback_param = info;
 789        info->dma_cookie = dmaengine_submit(tx);
 790        dma_async_issue_pending(info->dma_chan);
 791        dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
 792                __func__, direction, info->dma_cookie, info->sg.length);
 793}
 794
 795static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
 796{
 797        struct pxa3xx_nand_info *info = data;
 798
 799        handle_data_pio(info);
 800
 801        info->state = STATE_CMD_DONE;
 802        nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
 803
 804        return IRQ_HANDLED;
 805}
 806
 807static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 808{
 809        struct pxa3xx_nand_info *info = devid;
 810        unsigned int status, is_completed = 0, is_ready = 0;
 811        unsigned int ready, cmd_done;
 812        irqreturn_t ret = IRQ_HANDLED;
 813
 814        if (info->cs == 0) {
 815                ready           = NDSR_FLASH_RDY;
 816                cmd_done        = NDSR_CS0_CMDD;
 817        } else {
 818                ready           = NDSR_RDY;
 819                cmd_done        = NDSR_CS1_CMDD;
 820        }
 821
 822        status = nand_readl(info, NDSR);
 823
 824        if (status & NDSR_UNCORERR)
 825                info->retcode = ERR_UNCORERR;
 826        if (status & NDSR_CORERR) {
 827                info->retcode = ERR_CORERR;
 828                if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
 829                    info->ecc_bch)
 830                        info->ecc_err_cnt = NDSR_ERR_CNT(status);
 831                else
 832                        info->ecc_err_cnt = 1;
 833
 834                /*
 835                 * Each chunk composing a page is corrected independently,
 836                 * and we need to store maximum number of corrected bitflips
 837                 * to return it to the MTD layer in ecc.read_page().
 838                 */
 839                info->max_bitflips = max_t(unsigned int,
 840                                           info->max_bitflips,
 841                                           info->ecc_err_cnt);
 842        }
 843        if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
 844                /* whether use dma to transfer data */
 845                if (info->use_dma) {
 846                        disable_int(info, NDCR_INT_MASK);
 847                        info->state = (status & NDSR_RDDREQ) ?
 848                                      STATE_DMA_READING : STATE_DMA_WRITING;
 849                        start_data_dma(info);
 850                        goto NORMAL_IRQ_EXIT;
 851                } else {
 852                        info->state = (status & NDSR_RDDREQ) ?
 853                                      STATE_PIO_READING : STATE_PIO_WRITING;
 854                        ret = IRQ_WAKE_THREAD;
 855                        goto NORMAL_IRQ_EXIT;
 856                }
 857        }
 858        if (status & cmd_done) {
 859                info->state = STATE_CMD_DONE;
 860                is_completed = 1;
 861        }
 862        if (status & ready) {
 863                info->state = STATE_READY;
 864                is_ready = 1;
 865        }
 866
 867        /*
 868         * Clear all status bit before issuing the next command, which
 869         * can and will alter the status bits and will deserve a new
 870         * interrupt on its own. This lets the controller exit the IRQ
 871         */
 872        nand_writel(info, NDSR, status);
 873
 874        if (status & NDSR_WRCMDREQ) {
 875                status &= ~NDSR_WRCMDREQ;
 876                info->state = STATE_CMD_HANDLE;
 877
 878                /*
 879                 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
 880                 * must be loaded by writing directly either 12 or 16
 881                 * bytes directly to NDCB0, four bytes at a time.
 882                 *
 883                 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
 884                 * but each NDCBx register can be read.
 885                 */
 886                nand_writel(info, NDCB0, info->ndcb0);
 887                nand_writel(info, NDCB0, info->ndcb1);
 888                nand_writel(info, NDCB0, info->ndcb2);
 889
 890                /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
 891                if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
 892                        nand_writel(info, NDCB0, info->ndcb3);
 893        }
 894
 895        if (is_completed)
 896                complete(&info->cmd_complete);
 897        if (is_ready)
 898                complete(&info->dev_ready);
 899NORMAL_IRQ_EXIT:
 900        return ret;
 901}
 902
 903static inline int is_buf_blank(uint8_t *buf, size_t len)
 904{
 905        for (; len > 0; len--)
 906                if (*buf++ != 0xff)
 907                        return 0;
 908        return 1;
 909}
 910
 911static void set_command_address(struct pxa3xx_nand_info *info,
 912                unsigned int page_size, uint16_t column, int page_addr)
 913{
 914        /* small page addr setting */
 915        if (page_size < PAGE_CHUNK_SIZE) {
 916                info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
 917                                | (column & 0xFF);
 918
 919                info->ndcb2 = 0;
 920        } else {
 921                info->ndcb1 = ((page_addr & 0xFFFF) << 16)
 922                                | (column & 0xFFFF);
 923
 924                if (page_addr & 0xFF0000)
 925                        info->ndcb2 = (page_addr & 0xFF0000) >> 16;
 926                else
 927                        info->ndcb2 = 0;
 928        }
 929}
 930
 931static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
 932{
 933        struct pxa3xx_nand_host *host = info->host[info->cs];
 934        struct mtd_info *mtd = nand_to_mtd(&host->chip);
 935
 936        /* reset data and oob column point to handle data */
 937        info->buf_start         = 0;
 938        info->buf_count         = 0;
 939        info->data_buff_pos     = 0;
 940        info->oob_buff_pos      = 0;
 941        info->step_chunk_size   = 0;
 942        info->step_spare_size   = 0;
 943        info->cur_chunk         = 0;
 944        info->use_ecc           = 0;
 945        info->use_spare         = 1;
 946        info->retcode           = ERR_NONE;
 947        info->ecc_err_cnt       = 0;
 948        info->ndcb3             = 0;
 949        info->need_wait         = 0;
 950
 951        switch (command) {
 952        case NAND_CMD_READ0:
 953        case NAND_CMD_PAGEPROG:
 954                info->use_ecc = 1;
 955                break;
 956        case NAND_CMD_PARAM:
 957                info->use_spare = 0;
 958                break;
 959        default:
 960                info->ndcb1 = 0;
 961                info->ndcb2 = 0;
 962                break;
 963        }
 964
 965        /*
 966         * If we are about to issue a read command, or about to set
 967         * the write address, then clean the data buffer.
 968         */
 969        if (command == NAND_CMD_READ0 ||
 970            command == NAND_CMD_READOOB ||
 971            command == NAND_CMD_SEQIN) {
 972
 973                info->buf_count = mtd->writesize + mtd->oobsize;
 974                memset(info->data_buff, 0xFF, info->buf_count);
 975        }
 976
 977}
 978
 979static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
 980                int ext_cmd_type, uint16_t column, int page_addr)
 981{
 982        int addr_cycle, exec_cmd;
 983        struct pxa3xx_nand_host *host;
 984        struct mtd_info *mtd;
 985
 986        host = info->host[info->cs];
 987        mtd = nand_to_mtd(&host->chip);
 988        addr_cycle = 0;
 989        exec_cmd = 1;
 990
 991        if (info->cs != 0)
 992                info->ndcb0 = NDCB0_CSEL;
 993        else
 994                info->ndcb0 = 0;
 995
 996        if (command == NAND_CMD_SEQIN)
 997                exec_cmd = 0;
 998
 999        addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
1000                                    + host->col_addr_cycles);
1001
1002        switch (command) {
1003        case NAND_CMD_READOOB:
1004        case NAND_CMD_READ0:
1005                info->buf_start = column;
1006                info->ndcb0 |= NDCB0_CMD_TYPE(0)
1007                                | addr_cycle
1008                                | NAND_CMD_READ0;
1009
1010                if (command == NAND_CMD_READOOB)
1011                        info->buf_start += mtd->writesize;
1012
1013                if (info->cur_chunk < info->nfullchunks) {
1014                        info->step_chunk_size = info->chunk_size;
1015                        info->step_spare_size = info->spare_size;
1016                } else {
1017                        info->step_chunk_size = info->last_chunk_size;
1018                        info->step_spare_size = info->last_spare_size;
1019                }
1020
1021                /*
1022                 * Multiple page read needs an 'extended command type' field,
1023                 * which is either naked-read or last-read according to the
1024                 * state.
1025                 */
1026                if (mtd->writesize == PAGE_CHUNK_SIZE) {
1027                        info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
1028                } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1029                        info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1030                                        | NDCB0_LEN_OVRD
1031                                        | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1032                        info->ndcb3 = info->step_chunk_size +
1033                                info->step_spare_size;
1034                }
1035
1036                set_command_address(info, mtd->writesize, column, page_addr);
1037                break;
1038
1039        case NAND_CMD_SEQIN:
1040
1041                info->buf_start = column;
1042                set_command_address(info, mtd->writesize, 0, page_addr);
1043
1044                /*
1045                 * Multiple page programming needs to execute the initial
1046                 * SEQIN command that sets the page address.
1047                 */
1048                if (mtd->writesize > PAGE_CHUNK_SIZE) {
1049                        info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1050                                | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1051                                | addr_cycle
1052                                | command;
1053                        exec_cmd = 1;
1054                }
1055                break;
1056
1057        case NAND_CMD_PAGEPROG:
1058                if (is_buf_blank(info->data_buff,
1059                                        (mtd->writesize + mtd->oobsize))) {
1060                        exec_cmd = 0;
1061                        break;
1062                }
1063
1064                if (info->cur_chunk < info->nfullchunks) {
1065                        info->step_chunk_size = info->chunk_size;
1066                        info->step_spare_size = info->spare_size;
1067                } else {
1068                        info->step_chunk_size = info->last_chunk_size;
1069                        info->step_spare_size = info->last_spare_size;
1070                }
1071
1072                /* Second command setting for large pages */
1073                if (mtd->writesize > PAGE_CHUNK_SIZE) {
1074                        /*
1075                         * Multiple page write uses the 'extended command'
1076                         * field. This can be used to issue a command dispatch
1077                         * or a naked-write depending on the current stage.
1078                         */
1079                        info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1080                                        | NDCB0_LEN_OVRD
1081                                        | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1082                        info->ndcb3 = info->step_chunk_size +
1083                                      info->step_spare_size;
1084
1085                        /*
1086                         * This is the command dispatch that completes a chunked
1087                         * page program operation.
1088                         */
1089                        if (info->cur_chunk == info->ntotalchunks) {
1090                                info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1091                                        | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1092                                        | command;
1093                                info->ndcb1 = 0;
1094                                info->ndcb2 = 0;
1095                                info->ndcb3 = 0;
1096                        }
1097                } else {
1098                        info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1099                                        | NDCB0_AUTO_RS
1100                                        | NDCB0_ST_ROW_EN
1101                                        | NDCB0_DBC
1102                                        | (NAND_CMD_PAGEPROG << 8)
1103                                        | NAND_CMD_SEQIN
1104                                        | addr_cycle;
1105                }
1106                break;
1107
1108        case NAND_CMD_PARAM:
1109                info->buf_count = INIT_BUFFER_SIZE;
1110                info->ndcb0 |= NDCB0_CMD_TYPE(0)
1111                                | NDCB0_ADDR_CYC(1)
1112                                | NDCB0_LEN_OVRD
1113                                | command;
1114                info->ndcb1 = (column & 0xFF);
1115                info->ndcb3 = INIT_BUFFER_SIZE;
1116                info->step_chunk_size = INIT_BUFFER_SIZE;
1117                break;
1118
1119        case NAND_CMD_READID:
1120                info->buf_count = READ_ID_BYTES;
1121                info->ndcb0 |= NDCB0_CMD_TYPE(3)
1122                                | NDCB0_ADDR_CYC(1)
1123                                | command;
1124                info->ndcb1 = (column & 0xFF);
1125
1126                info->step_chunk_size = 8;
1127                break;
1128        case NAND_CMD_STATUS:
1129                info->buf_count = 1;
1130                info->ndcb0 |= NDCB0_CMD_TYPE(4)
1131                                | NDCB0_ADDR_CYC(1)
1132                                | command;
1133
1134                info->step_chunk_size = 8;
1135                break;
1136
1137        case NAND_CMD_ERASE1:
1138                info->ndcb0 |= NDCB0_CMD_TYPE(2)
1139                                | NDCB0_AUTO_RS
1140                                | NDCB0_ADDR_CYC(3)
1141                                | NDCB0_DBC
1142                                | (NAND_CMD_ERASE2 << 8)
1143                                | NAND_CMD_ERASE1;
1144                info->ndcb1 = page_addr;
1145                info->ndcb2 = 0;
1146
1147                break;
1148        case NAND_CMD_RESET:
1149                info->ndcb0 |= NDCB0_CMD_TYPE(5)
1150                                | command;
1151
1152                break;
1153
1154        case NAND_CMD_ERASE2:
1155                exec_cmd = 0;
1156                break;
1157
1158        default:
1159                exec_cmd = 0;
1160                dev_err(&info->pdev->dev, "non-supported command %x\n",
1161                                command);
1162                break;
1163        }
1164
1165        return exec_cmd;
1166}
1167
1168static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1169                         int column, int page_addr)
1170{
1171        struct nand_chip *chip = mtd_to_nand(mtd);
1172        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1173        struct pxa3xx_nand_info *info = host->info_data;
1174        int exec_cmd;
1175
1176        /*
1177         * if this is a x16 device ,then convert the input
1178         * "byte" address into a "word" address appropriate
1179         * for indexing a word-oriented device
1180         */
1181        if (info->reg_ndcr & NDCR_DWIDTH_M)
1182                column /= 2;
1183
1184        /*
1185         * There may be different NAND chip hooked to
1186         * different chip select, so check whether
1187         * chip select has been changed, if yes, reset the timing
1188         */
1189        if (info->cs != host->cs) {
1190                info->cs = host->cs;
1191                nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1192                nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1193        }
1194
1195        prepare_start_command(info, command);
1196
1197        info->state = STATE_PREPARED;
1198        exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1199
1200        if (exec_cmd) {
1201                init_completion(&info->cmd_complete);
1202                init_completion(&info->dev_ready);
1203                info->need_wait = 1;
1204                pxa3xx_nand_start(info);
1205
1206                if (!wait_for_completion_timeout(&info->cmd_complete,
1207                    CHIP_DELAY_TIMEOUT)) {
1208                        dev_err(&info->pdev->dev, "Wait time out!!!\n");
1209                        /* Stop State Machine for next command cycle */
1210                        pxa3xx_nand_stop(info);
1211                }
1212        }
1213        info->state = STATE_IDLE;
1214}
1215
1216static void nand_cmdfunc_extended(struct mtd_info *mtd,
1217                                  const unsigned command,
1218                                  int column, int page_addr)
1219{
1220        struct nand_chip *chip = mtd_to_nand(mtd);
1221        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1222        struct pxa3xx_nand_info *info = host->info_data;
1223        int exec_cmd, ext_cmd_type;
1224
1225        /*
1226         * if this is a x16 device then convert the input
1227         * "byte" address into a "word" address appropriate
1228         * for indexing a word-oriented device
1229         */
1230        if (info->reg_ndcr & NDCR_DWIDTH_M)
1231                column /= 2;
1232
1233        /*
1234         * There may be different NAND chip hooked to
1235         * different chip select, so check whether
1236         * chip select has been changed, if yes, reset the timing
1237         */
1238        if (info->cs != host->cs) {
1239                info->cs = host->cs;
1240                nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1241                nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1242        }
1243
1244        /* Select the extended command for the first command */
1245        switch (command) {
1246        case NAND_CMD_READ0:
1247        case NAND_CMD_READOOB:
1248                ext_cmd_type = EXT_CMD_TYPE_MONO;
1249                break;
1250        case NAND_CMD_SEQIN:
1251                ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1252                break;
1253        case NAND_CMD_PAGEPROG:
1254                ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1255                break;
1256        default:
1257                ext_cmd_type = 0;
1258                break;
1259        }
1260
1261        prepare_start_command(info, command);
1262
1263        /*
1264         * Prepare the "is ready" completion before starting a command
1265         * transaction sequence. If the command is not executed the
1266         * completion will be completed, see below.
1267         *
1268         * We can do that inside the loop because the command variable
1269         * is invariant and thus so is the exec_cmd.
1270         */
1271        info->need_wait = 1;
1272        init_completion(&info->dev_ready);
1273        do {
1274                info->state = STATE_PREPARED;
1275
1276                exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1277                                               column, page_addr);
1278                if (!exec_cmd) {
1279                        info->need_wait = 0;
1280                        complete(&info->dev_ready);
1281                        break;
1282                }
1283
1284                init_completion(&info->cmd_complete);
1285                pxa3xx_nand_start(info);
1286
1287                if (!wait_for_completion_timeout(&info->cmd_complete,
1288                    CHIP_DELAY_TIMEOUT)) {
1289                        dev_err(&info->pdev->dev, "Wait time out!!!\n");
1290                        /* Stop State Machine for next command cycle */
1291                        pxa3xx_nand_stop(info);
1292                        break;
1293                }
1294
1295                /* Only a few commands need several steps */
1296                if (command != NAND_CMD_PAGEPROG &&
1297                    command != NAND_CMD_READ0    &&
1298                    command != NAND_CMD_READOOB)
1299                        break;
1300
1301                info->cur_chunk++;
1302
1303                /* Check if the sequence is complete */
1304                if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1305                        break;
1306
1307                /*
1308                 * After a splitted program command sequence has issued
1309                 * the command dispatch, the command sequence is complete.
1310                 */
1311                if (info->cur_chunk == (info->ntotalchunks + 1) &&
1312                    command == NAND_CMD_PAGEPROG &&
1313                    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1314                        break;
1315
1316                if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1317                        /* Last read: issue a 'last naked read' */
1318                        if (info->cur_chunk == info->ntotalchunks - 1)
1319                                ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1320                        else
1321                                ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1322
1323                /*
1324                 * If a splitted program command has no more data to transfer,
1325                 * the command dispatch must be issued to complete.
1326                 */
1327                } else if (command == NAND_CMD_PAGEPROG &&
1328                           info->cur_chunk == info->ntotalchunks) {
1329                                ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1330                }
1331        } while (1);
1332
1333        info->state = STATE_IDLE;
1334}
1335
1336static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1337                struct nand_chip *chip, const uint8_t *buf, int oob_required,
1338                int page)
1339{
1340        chip->write_buf(mtd, buf, mtd->writesize);
1341        chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1342
1343        return 0;
1344}
1345
1346static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1347                struct nand_chip *chip, uint8_t *buf, int oob_required,
1348                int page)
1349{
1350        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1351        struct pxa3xx_nand_info *info = host->info_data;
1352
1353        chip->read_buf(mtd, buf, mtd->writesize);
1354        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1355
1356        if (info->retcode == ERR_CORERR && info->use_ecc) {
1357                mtd->ecc_stats.corrected += info->ecc_err_cnt;
1358
1359        } else if (info->retcode == ERR_UNCORERR) {
1360                /*
1361                 * for blank page (all 0xff), HW will calculate its ECC as
1362                 * 0, which is different from the ECC information within
1363                 * OOB, ignore such uncorrectable errors
1364                 */
1365                if (is_buf_blank(buf, mtd->writesize))
1366                        info->retcode = ERR_NONE;
1367                else
1368                        mtd->ecc_stats.failed++;
1369        }
1370
1371        return info->max_bitflips;
1372}
1373
1374static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1375{
1376        struct nand_chip *chip = mtd_to_nand(mtd);
1377        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1378        struct pxa3xx_nand_info *info = host->info_data;
1379        char retval = 0xFF;
1380
1381        if (info->buf_start < info->buf_count)
1382                /* Has just send a new command? */
1383                retval = info->data_buff[info->buf_start++];
1384
1385        return retval;
1386}
1387
1388static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1389{
1390        struct nand_chip *chip = mtd_to_nand(mtd);
1391        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1392        struct pxa3xx_nand_info *info = host->info_data;
1393        u16 retval = 0xFFFF;
1394
1395        if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1396                retval = *((u16 *)(info->data_buff+info->buf_start));
1397                info->buf_start += 2;
1398        }
1399        return retval;
1400}
1401
1402static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1403{
1404        struct nand_chip *chip = mtd_to_nand(mtd);
1405        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1406        struct pxa3xx_nand_info *info = host->info_data;
1407        int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1408
1409        memcpy(buf, info->data_buff + info->buf_start, real_len);
1410        info->buf_start += real_len;
1411}
1412
1413static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1414                const uint8_t *buf, int len)
1415{
1416        struct nand_chip *chip = mtd_to_nand(mtd);
1417        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1418        struct pxa3xx_nand_info *info = host->info_data;
1419        int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1420
1421        memcpy(info->data_buff + info->buf_start, buf, real_len);
1422        info->buf_start += real_len;
1423}
1424
1425static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1426{
1427        return;
1428}
1429
1430static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1431{
1432        struct nand_chip *chip = mtd_to_nand(mtd);
1433        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1434        struct pxa3xx_nand_info *info = host->info_data;
1435
1436        if (info->need_wait) {
1437                info->need_wait = 0;
1438                if (!wait_for_completion_timeout(&info->dev_ready,
1439                    CHIP_DELAY_TIMEOUT)) {
1440                        dev_err(&info->pdev->dev, "Ready time out!!!\n");
1441                        return NAND_STATUS_FAIL;
1442                }
1443        }
1444
1445        /* pxa3xx_nand_send_command has waited for command complete */
1446        if (this->state == FL_WRITING || this->state == FL_ERASING) {
1447                if (info->retcode == ERR_NONE)
1448                        return 0;
1449                else
1450                        return NAND_STATUS_FAIL;
1451        }
1452
1453        return NAND_STATUS_READY;
1454}
1455
1456static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1457{
1458        struct pxa3xx_nand_host *host = info->host[info->cs];
1459        struct platform_device *pdev = info->pdev;
1460        struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1461        const struct nand_sdr_timings *timings;
1462
1463        /* Configure default flash values */
1464        info->chunk_size = PAGE_CHUNK_SIZE;
1465        info->reg_ndcr = 0x0; /* enable all interrupts */
1466        info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1467        info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1468        info->reg_ndcr |= NDCR_SPARE_EN;
1469
1470        /* use the common timing to make a try */
1471        timings = onfi_async_timing_mode_to_sdr_timings(0);
1472        if (IS_ERR(timings))
1473                return PTR_ERR(timings);
1474
1475        pxa3xx_nand_set_sdr_timing(host, timings);
1476        return 0;
1477}
1478
1479static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1480{
1481        struct pxa3xx_nand_host *host = info->host[info->cs];
1482        struct nand_chip *chip = &host->chip;
1483        struct mtd_info *mtd = nand_to_mtd(chip);
1484
1485        info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1486        info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1487        info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1488}
1489
1490static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1491{
1492        struct platform_device *pdev = info->pdev;
1493        struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1494        uint32_t ndcr = nand_readl(info, NDCR);
1495
1496        /* Set an initial chunk size */
1497        info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1498        info->reg_ndcr = ndcr &
1499                ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1500        info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1501        info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1502        info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1503}
1504
1505static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1506{
1507        struct platform_device *pdev = info->pdev;
1508        struct dma_slave_config config;
1509        dma_cap_mask_t mask;
1510        struct pxad_param param;
1511        int ret;
1512
1513        info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1514        if (info->data_buff == NULL)
1515                return -ENOMEM;
1516        if (use_dma == 0)
1517                return 0;
1518
1519        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1520        if (ret)
1521                return ret;
1522
1523        sg_init_one(&info->sg, info->data_buff, info->buf_size);
1524        dma_cap_zero(mask);
1525        dma_cap_set(DMA_SLAVE, mask);
1526        param.prio = PXAD_PRIO_LOWEST;
1527        param.drcmr = info->drcmr_dat;
1528        info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1529                                                          &param, &pdev->dev,
1530                                                          "data");
1531        if (!info->dma_chan) {
1532                dev_err(&pdev->dev, "unable to request data dma channel\n");
1533                return -ENODEV;
1534        }
1535
1536        memset(&config, 0, sizeof(config));
1537        config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1538        config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1539        config.src_addr = info->mmio_phys + NDDB;
1540        config.dst_addr = info->mmio_phys + NDDB;
1541        config.src_maxburst = 32;
1542        config.dst_maxburst = 32;
1543        ret = dmaengine_slave_config(info->dma_chan, &config);
1544        if (ret < 0) {
1545                dev_err(&info->pdev->dev,
1546                        "dma channel configuration failed: %d\n",
1547                        ret);
1548                return ret;
1549        }
1550
1551        /*
1552         * Now that DMA buffers are allocated we turn on
1553         * DMA proper for I/O operations.
1554         */
1555        info->use_dma = 1;
1556        return 0;
1557}
1558
1559static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1560{
1561        if (info->use_dma) {
1562                dmaengine_terminate_all(info->dma_chan);
1563                dma_release_channel(info->dma_chan);
1564        }
1565        kfree(info->data_buff);
1566}
1567
1568static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1569                        struct mtd_info *mtd,
1570                        int strength, int ecc_stepsize, int page_size)
1571{
1572        struct nand_chip *chip = mtd_to_nand(mtd);
1573        struct nand_ecc_ctrl *ecc = &chip->ecc;
1574
1575        if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1576                info->nfullchunks = 1;
1577                info->ntotalchunks = 1;
1578                info->chunk_size = 2048;
1579                info->spare_size = 40;
1580                info->ecc_size = 24;
1581                ecc->mode = NAND_ECC_HW;
1582                ecc->size = 512;
1583                ecc->strength = 1;
1584
1585        } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1586                info->nfullchunks = 1;
1587                info->ntotalchunks = 1;
1588                info->chunk_size = 512;
1589                info->spare_size = 8;
1590                info->ecc_size = 8;
1591                ecc->mode = NAND_ECC_HW;
1592                ecc->size = 512;
1593                ecc->strength = 1;
1594
1595        /*
1596         * Required ECC: 4-bit correction per 512 bytes
1597         * Select: 16-bit correction per 2048 bytes
1598         */
1599        } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1600                info->ecc_bch = 1;
1601                info->nfullchunks = 1;
1602                info->ntotalchunks = 1;
1603                info->chunk_size = 2048;
1604                info->spare_size = 32;
1605                info->ecc_size = 32;
1606                ecc->mode = NAND_ECC_HW;
1607                ecc->size = info->chunk_size;
1608                mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1609                ecc->strength = 16;
1610
1611        } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1612                info->ecc_bch = 1;
1613                info->nfullchunks = 2;
1614                info->ntotalchunks = 2;
1615                info->chunk_size = 2048;
1616                info->spare_size = 32;
1617                info->ecc_size = 32;
1618                ecc->mode = NAND_ECC_HW;
1619                ecc->size = info->chunk_size;
1620                mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1621                ecc->strength = 16;
1622
1623        /*
1624         * Required ECC: 8-bit correction per 512 bytes
1625         * Select: 16-bit correction per 1024 bytes
1626         */
1627        } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1628                info->ecc_bch = 1;
1629                info->nfullchunks = 4;
1630                info->ntotalchunks = 5;
1631                info->chunk_size = 1024;
1632                info->spare_size = 0;
1633                info->last_chunk_size = 0;
1634                info->last_spare_size = 64;
1635                info->ecc_size = 32;
1636                ecc->mode = NAND_ECC_HW;
1637                ecc->size = info->chunk_size;
1638                mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1639                ecc->strength = 16;
1640        } else {
1641                dev_err(&info->pdev->dev,
1642                        "ECC strength %d at page size %d is not supported\n",
1643                        strength, page_size);
1644                return -ENODEV;
1645        }
1646
1647        dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1648                 ecc->strength, ecc->size);
1649        return 0;
1650}
1651
1652static int pxa3xx_nand_scan(struct mtd_info *mtd)
1653{
1654        struct nand_chip *chip = mtd_to_nand(mtd);
1655        struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1656        struct pxa3xx_nand_info *info = host->info_data;
1657        struct platform_device *pdev = info->pdev;
1658        struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1659        int ret;
1660        uint16_t ecc_strength, ecc_step;
1661
1662        if (pdata->keep_config) {
1663                pxa3xx_nand_detect_config(info);
1664        } else {
1665                ret = pxa3xx_nand_config_ident(info);
1666                if (ret)
1667                        return ret;
1668        }
1669
1670        if (info->reg_ndcr & NDCR_DWIDTH_M)
1671                chip->options |= NAND_BUSWIDTH_16;
1672
1673        /* Device detection must be done with ECC disabled */
1674        if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1675                nand_writel(info, NDECCCTRL, 0x0);
1676
1677        if (pdata->flash_bbt)
1678                chip->bbt_options |= NAND_BBT_USE_FLASH;
1679
1680        chip->ecc.strength = pdata->ecc_strength;
1681        chip->ecc.size = pdata->ecc_step_size;
1682
1683        ret = nand_scan_ident(mtd, 1, NULL);
1684        if (ret)
1685                return ret;
1686
1687        if (!pdata->keep_config) {
1688                ret = pxa3xx_nand_init(host);
1689                if (ret) {
1690                        dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1691                                ret);
1692                        return ret;
1693                }
1694        }
1695
1696        if (chip->bbt_options & NAND_BBT_USE_FLASH) {
1697                /*
1698                 * We'll use a bad block table stored in-flash and don't
1699                 * allow writing the bad block marker to the flash.
1700                 */
1701                chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
1702                chip->bbt_td = &bbt_main_descr;
1703                chip->bbt_md = &bbt_mirror_descr;
1704        }
1705
1706        /*
1707         * If the page size is bigger than the FIFO size, let's check
1708         * we are given the right variant and then switch to the extended
1709         * (aka splitted) command handling,
1710         */
1711        if (mtd->writesize > PAGE_CHUNK_SIZE) {
1712                if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1713                        chip->cmdfunc = nand_cmdfunc_extended;
1714                } else {
1715                        dev_err(&info->pdev->dev,
1716                                "unsupported page size on this variant\n");
1717                        return -ENODEV;
1718                }
1719        }
1720
1721        ecc_strength = chip->ecc.strength;
1722        ecc_step = chip->ecc.size;
1723        if (!ecc_strength || !ecc_step) {
1724                ecc_strength = chip->ecc_strength_ds;
1725                ecc_step = chip->ecc_step_ds;
1726        }
1727
1728        /* Set default ECC strength requirements on non-ONFI devices */
1729        if (ecc_strength < 1 && ecc_step < 1) {
1730                ecc_strength = 1;
1731                ecc_step = 512;
1732        }
1733
1734        ret = pxa_ecc_init(info, mtd, ecc_strength,
1735                           ecc_step, mtd->writesize);
1736        if (ret)
1737                return ret;
1738
1739        /* calculate addressing information */
1740        if (mtd->writesize >= 2048)
1741                host->col_addr_cycles = 2;
1742        else
1743                host->col_addr_cycles = 1;
1744
1745        /* release the initial buffer */
1746        kfree(info->data_buff);
1747
1748        /* allocate the real data + oob buffer */
1749        info->buf_size = mtd->writesize + mtd->oobsize;
1750        ret = pxa3xx_nand_init_buff(info);
1751        if (ret)
1752                return ret;
1753        info->oob_buff = info->data_buff + mtd->writesize;
1754
1755        if ((mtd->size >> chip->page_shift) > 65536)
1756                host->row_addr_cycles = 3;
1757        else
1758                host->row_addr_cycles = 2;
1759
1760        if (!pdata->keep_config)
1761                pxa3xx_nand_config_tail(info);
1762
1763        return nand_scan_tail(mtd);
1764}
1765
1766static int alloc_nand_resource(struct platform_device *pdev)
1767{
1768        struct device_node *np = pdev->dev.of_node;
1769        struct pxa3xx_nand_platform_data *pdata;
1770        struct pxa3xx_nand_info *info;
1771        struct pxa3xx_nand_host *host;
1772        struct nand_chip *chip = NULL;
1773        struct mtd_info *mtd;
1774        struct resource *r;
1775        int ret, irq, cs;
1776
1777        pdata = dev_get_platdata(&pdev->dev);
1778        if (pdata->num_cs <= 0) {
1779                dev_err(&pdev->dev, "invalid number of chip selects\n");
1780                return -ENODEV;
1781        }
1782
1783        info = devm_kzalloc(&pdev->dev,
1784                            sizeof(*info) + sizeof(*host) * pdata->num_cs,
1785                            GFP_KERNEL);
1786        if (!info)
1787                return -ENOMEM;
1788
1789        info->pdev = pdev;
1790        info->variant = pxa3xx_nand_get_variant(pdev);
1791        for (cs = 0; cs < pdata->num_cs; cs++) {
1792                host = (void *)&info[1] + sizeof(*host) * cs;
1793                chip = &host->chip;
1794                nand_set_controller_data(chip, host);
1795                mtd = nand_to_mtd(chip);
1796                info->host[cs] = host;
1797                host->cs = cs;
1798                host->info_data = info;
1799                mtd->dev.parent = &pdev->dev;
1800                /* FIXME: all chips use the same device tree partitions */
1801                nand_set_flash_node(chip, np);
1802
1803                nand_set_controller_data(chip, host);
1804                chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1805                chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1806                chip->controller        = &info->controller;
1807                chip->waitfunc          = pxa3xx_nand_waitfunc;
1808                chip->select_chip       = pxa3xx_nand_select_chip;
1809                chip->read_word         = pxa3xx_nand_read_word;
1810                chip->read_byte         = pxa3xx_nand_read_byte;
1811                chip->read_buf          = pxa3xx_nand_read_buf;
1812                chip->write_buf         = pxa3xx_nand_write_buf;
1813                chip->options           |= NAND_NO_SUBPAGE_WRITE;
1814                chip->cmdfunc           = nand_cmdfunc;
1815                chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
1816                chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
1817        }
1818
1819        nand_hw_control_init(chip->controller);
1820        info->clk = devm_clk_get(&pdev->dev, NULL);
1821        if (IS_ERR(info->clk)) {
1822                ret = PTR_ERR(info->clk);
1823                dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
1824                return ret;
1825        }
1826        ret = clk_prepare_enable(info->clk);
1827        if (ret < 0)
1828                return ret;
1829
1830        if (!np && use_dma) {
1831                r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1832                if (r == NULL) {
1833                        dev_err(&pdev->dev,
1834                                "no resource defined for data DMA\n");
1835                        ret = -ENXIO;
1836                        goto fail_disable_clk;
1837                }
1838                info->drcmr_dat = r->start;
1839        }
1840
1841        irq = platform_get_irq(pdev, 0);
1842        if (irq < 0) {
1843                dev_err(&pdev->dev, "no IRQ resource defined\n");
1844                ret = -ENXIO;
1845                goto fail_disable_clk;
1846        }
1847
1848        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1849        info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1850        if (IS_ERR(info->mmio_base)) {
1851                ret = PTR_ERR(info->mmio_base);
1852                dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
1853                goto fail_disable_clk;
1854        }
1855        info->mmio_phys = r->start;
1856
1857        /* Allocate a buffer to allow flash detection */
1858        info->buf_size = INIT_BUFFER_SIZE;
1859        info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1860        if (info->data_buff == NULL) {
1861                ret = -ENOMEM;
1862                goto fail_disable_clk;
1863        }
1864
1865        /* initialize all interrupts to be disabled */
1866        disable_int(info, NDSR_MASK);
1867
1868        ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1869                                   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1870                                   pdev->name, info);
1871        if (ret < 0) {
1872                dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
1873                goto fail_free_buf;
1874        }
1875
1876        platform_set_drvdata(pdev, info);
1877
1878        return 0;
1879
1880fail_free_buf:
1881        free_irq(irq, info);
1882        kfree(info->data_buff);
1883fail_disable_clk:
1884        clk_disable_unprepare(info->clk);
1885        return ret;
1886}
1887
1888static int pxa3xx_nand_remove(struct platform_device *pdev)
1889{
1890        struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1891        struct pxa3xx_nand_platform_data *pdata;
1892        int irq, cs;
1893
1894        if (!info)
1895                return 0;
1896
1897        pdata = dev_get_platdata(&pdev->dev);
1898
1899        irq = platform_get_irq(pdev, 0);
1900        if (irq >= 0)
1901                free_irq(irq, info);
1902        pxa3xx_nand_free_buff(info);
1903
1904        /*
1905         * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1906         * In order to prevent a lockup of the system bus, the DFI bus
1907         * arbitration is granted to SMC upon driver removal. This is done by
1908         * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1909         * access to the bus anymore.
1910         */
1911        nand_writel(info, NDCR,
1912                    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1913                    NFCV1_NDCR_ARB_CNTL);
1914        clk_disable_unprepare(info->clk);
1915
1916        for (cs = 0; cs < pdata->num_cs; cs++)
1917                nand_release(nand_to_mtd(&info->host[cs]->chip));
1918        return 0;
1919}
1920
1921static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1922{
1923        struct pxa3xx_nand_platform_data *pdata;
1924        struct device_node *np = pdev->dev.of_node;
1925        const struct of_device_id *of_id =
1926                        of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1927
1928        if (!of_id)
1929                return 0;
1930
1931        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1932        if (!pdata)
1933                return -ENOMEM;
1934
1935        if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1936                pdata->enable_arbiter = 1;
1937        if (of_get_property(np, "marvell,nand-keep-config", NULL))
1938                pdata->keep_config = 1;
1939        of_property_read_u32(np, "num-cs", &pdata->num_cs);
1940
1941        pdev->dev.platform_data = pdata;
1942
1943        return 0;
1944}
1945
1946static int pxa3xx_nand_probe(struct platform_device *pdev)
1947{
1948        struct pxa3xx_nand_platform_data *pdata;
1949        struct pxa3xx_nand_info *info;
1950        int ret, cs, probe_success, dma_available;
1951
1952        dma_available = IS_ENABLED(CONFIG_ARM) &&
1953                (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1954        if (use_dma && !dma_available) {
1955                use_dma = 0;
1956                dev_warn(&pdev->dev,
1957                         "This platform can't do DMA on this device\n");
1958        }
1959
1960        ret = pxa3xx_nand_probe_dt(pdev);
1961        if (ret)
1962                return ret;
1963
1964        pdata = dev_get_platdata(&pdev->dev);
1965        if (!pdata) {
1966                dev_err(&pdev->dev, "no platform data defined\n");
1967                return -ENODEV;
1968        }
1969
1970        ret = alloc_nand_resource(pdev);
1971        if (ret)
1972                return ret;
1973
1974        info = platform_get_drvdata(pdev);
1975        probe_success = 0;
1976        for (cs = 0; cs < pdata->num_cs; cs++) {
1977                struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1978
1979                /*
1980                 * The mtd name matches the one used in 'mtdparts' kernel
1981                 * parameter. This name cannot be changed or otherwise
1982                 * user's mtd partitions configuration would get broken.
1983                 */
1984                mtd->name = "pxa3xx_nand-0";
1985                info->cs = cs;
1986                ret = pxa3xx_nand_scan(mtd);
1987                if (ret) {
1988                        dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1989                                cs);
1990                        continue;
1991                }
1992
1993                ret = mtd_device_register(mtd, pdata->parts[cs],
1994                                          pdata->nr_parts[cs]);
1995                if (!ret)
1996                        probe_success = 1;
1997        }
1998
1999        if (!probe_success) {
2000                pxa3xx_nand_remove(pdev);
2001                return -ENODEV;
2002        }
2003
2004        return 0;
2005}
2006
2007#ifdef CONFIG_PM
2008static int pxa3xx_nand_suspend(struct device *dev)
2009{
2010        struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2011
2012        if (info->state) {
2013                dev_err(dev, "driver busy, state = %d\n", info->state);
2014                return -EAGAIN;
2015        }
2016
2017        clk_disable(info->clk);
2018        return 0;
2019}
2020
2021static int pxa3xx_nand_resume(struct device *dev)
2022{
2023        struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2024        int ret;
2025
2026        ret = clk_enable(info->clk);
2027        if (ret < 0)
2028                return ret;
2029
2030        /* We don't want to handle interrupt without calling mtd routine */
2031        disable_int(info, NDCR_INT_MASK);
2032
2033        /*
2034         * Directly set the chip select to a invalid value,
2035         * then the driver would reset the timing according
2036         * to current chip select at the beginning of cmdfunc
2037         */
2038        info->cs = 0xff;
2039
2040        /*
2041         * As the spec says, the NDSR would be updated to 0x1800 when
2042         * doing the nand_clk disable/enable.
2043         * To prevent it damaging state machine of the driver, clear
2044         * all status before resume
2045         */
2046        nand_writel(info, NDSR, NDSR_MASK);
2047
2048        return 0;
2049}
2050#else
2051#define pxa3xx_nand_suspend     NULL
2052#define pxa3xx_nand_resume      NULL
2053#endif
2054
2055static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2056        .suspend        = pxa3xx_nand_suspend,
2057        .resume         = pxa3xx_nand_resume,
2058};
2059
2060static struct platform_driver pxa3xx_nand_driver = {
2061        .driver = {
2062                .name   = "pxa3xx-nand",
2063                .of_match_table = pxa3xx_nand_dt_ids,
2064                .pm     = &pxa3xx_nand_pm_ops,
2065        },
2066        .probe          = pxa3xx_nand_probe,
2067        .remove         = pxa3xx_nand_remove,
2068};
2069
2070module_platform_driver(pxa3xx_nand_driver);
2071
2072MODULE_LICENSE("GPL");
2073MODULE_DESCRIPTION("PXA3xx NAND controller driver");
2074