linux/drivers/mtd/nand/raw/denali.c
<<
>>
Prefs
   1/*
   2 * NAND Flash Controller Device Driver
   3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14
  15#include <linux/bitfield.h>
  16#include <linux/completion.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/interrupt.h>
  19#include <linux/io.h>
  20#include <linux/module.h>
  21#include <linux/mtd/mtd.h>
  22#include <linux/mtd/rawnand.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25
  26#include "denali.h"
  27
  28MODULE_LICENSE("GPL");
  29
  30#define DENALI_NAND_NAME    "denali-nand"
  31
  32/* for Indexed Addressing */
  33#define DENALI_INDEXED_CTRL     0x00
  34#define DENALI_INDEXED_DATA     0x10
  35
  36#define DENALI_MAP00            (0 << 26)       /* direct access to buffer */
  37#define DENALI_MAP01            (1 << 26)       /* read/write pages in PIO */
  38#define DENALI_MAP10            (2 << 26)       /* high-level control plane */
  39#define DENALI_MAP11            (3 << 26)       /* direct controller access */
  40
  41/* MAP11 access cycle type */
  42#define DENALI_MAP11_CMD        ((DENALI_MAP11) | 0)    /* command cycle */
  43#define DENALI_MAP11_ADDR       ((DENALI_MAP11) | 1)    /* address cycle */
  44#define DENALI_MAP11_DATA       ((DENALI_MAP11) | 2)    /* data cycle */
  45
  46/* MAP10 commands */
  47#define DENALI_ERASE            0x01
  48
  49#define DENALI_BANK(denali)     ((denali)->active_bank << 24)
  50
  51#define DENALI_INVALID_BANK     -1
  52#define DENALI_NR_BANKS         4
  53
  54static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  55{
  56        return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  57}
  58
  59/*
  60 * Direct Addressing - the slave address forms the control information (command
  61 * type, bank, block, and page address).  The slave data is the actual data to
  62 * be transferred.  This mode requires 28 bits of address region allocated.
  63 */
  64static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
  65{
  66        return ioread32(denali->host + addr);
  67}
  68
  69static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
  70                                u32 data)
  71{
  72        iowrite32(data, denali->host + addr);
  73}
  74
  75/*
  76 * Indexed Addressing - address translation module intervenes in passing the
  77 * control information.  This mode reduces the required address range.  The
  78 * control information and transferred data are latched by the registers in
  79 * the translation module.
  80 */
  81static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
  82{
  83        iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  84        return ioread32(denali->host + DENALI_INDEXED_DATA);
  85}
  86
  87static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
  88                                 u32 data)
  89{
  90        iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  91        iowrite32(data, denali->host + DENALI_INDEXED_DATA);
  92}
  93
  94/*
  95 * Use the configuration feature register to determine the maximum number of
  96 * banks that the hardware supports.
  97 */
  98static void denali_detect_max_banks(struct denali_nand_info *denali)
  99{
 100        uint32_t features = ioread32(denali->reg + FEATURES);
 101
 102        denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
 103
 104        /* the encoding changed from rev 5.0 to 5.1 */
 105        if (denali->revision < 0x0501)
 106                denali->max_banks <<= 1;
 107}
 108
 109static void denali_enable_irq(struct denali_nand_info *denali)
 110{
 111        int i;
 112
 113        for (i = 0; i < DENALI_NR_BANKS; i++)
 114                iowrite32(U32_MAX, denali->reg + INTR_EN(i));
 115        iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
 116}
 117
 118static void denali_disable_irq(struct denali_nand_info *denali)
 119{
 120        int i;
 121
 122        for (i = 0; i < DENALI_NR_BANKS; i++)
 123                iowrite32(0, denali->reg + INTR_EN(i));
 124        iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
 125}
 126
 127static void denali_clear_irq(struct denali_nand_info *denali,
 128                             int bank, uint32_t irq_status)
 129{
 130        /* write one to clear bits */
 131        iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
 132}
 133
 134static void denali_clear_irq_all(struct denali_nand_info *denali)
 135{
 136        int i;
 137
 138        for (i = 0; i < DENALI_NR_BANKS; i++)
 139                denali_clear_irq(denali, i, U32_MAX);
 140}
 141
 142static irqreturn_t denali_isr(int irq, void *dev_id)
 143{
 144        struct denali_nand_info *denali = dev_id;
 145        irqreturn_t ret = IRQ_NONE;
 146        uint32_t irq_status;
 147        int i;
 148
 149        spin_lock(&denali->irq_lock);
 150
 151        for (i = 0; i < DENALI_NR_BANKS; i++) {
 152                irq_status = ioread32(denali->reg + INTR_STATUS(i));
 153                if (irq_status)
 154                        ret = IRQ_HANDLED;
 155
 156                denali_clear_irq(denali, i, irq_status);
 157
 158                if (i != denali->active_bank)
 159                        continue;
 160
 161                denali->irq_status |= irq_status;
 162
 163                if (denali->irq_status & denali->irq_mask)
 164                        complete(&denali->complete);
 165        }
 166
 167        spin_unlock(&denali->irq_lock);
 168
 169        return ret;
 170}
 171
 172static void denali_reset_irq(struct denali_nand_info *denali)
 173{
 174        unsigned long flags;
 175
 176        spin_lock_irqsave(&denali->irq_lock, flags);
 177        denali->irq_status = 0;
 178        denali->irq_mask = 0;
 179        spin_unlock_irqrestore(&denali->irq_lock, flags);
 180}
 181
 182static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
 183                                    uint32_t irq_mask)
 184{
 185        unsigned long time_left, flags;
 186        uint32_t irq_status;
 187
 188        spin_lock_irqsave(&denali->irq_lock, flags);
 189
 190        irq_status = denali->irq_status;
 191
 192        if (irq_mask & irq_status) {
 193                /* return immediately if the IRQ has already happened. */
 194                spin_unlock_irqrestore(&denali->irq_lock, flags);
 195                return irq_status;
 196        }
 197
 198        denali->irq_mask = irq_mask;
 199        reinit_completion(&denali->complete);
 200        spin_unlock_irqrestore(&denali->irq_lock, flags);
 201
 202        time_left = wait_for_completion_timeout(&denali->complete,
 203                                                msecs_to_jiffies(1000));
 204        if (!time_left) {
 205                dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
 206                        irq_mask);
 207                return 0;
 208        }
 209
 210        return denali->irq_status;
 211}
 212
 213static uint32_t denali_check_irq(struct denali_nand_info *denali)
 214{
 215        unsigned long flags;
 216        uint32_t irq_status;
 217
 218        spin_lock_irqsave(&denali->irq_lock, flags);
 219        irq_status = denali->irq_status;
 220        spin_unlock_irqrestore(&denali->irq_lock, flags);
 221
 222        return irq_status;
 223}
 224
 225static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 226{
 227        struct denali_nand_info *denali = mtd_to_denali(mtd);
 228        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 229        int i;
 230
 231        for (i = 0; i < len; i++)
 232                buf[i] = denali->host_read(denali, addr);
 233}
 234
 235static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 236{
 237        struct denali_nand_info *denali = mtd_to_denali(mtd);
 238        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 239        int i;
 240
 241        for (i = 0; i < len; i++)
 242                denali->host_write(denali, addr, buf[i]);
 243}
 244
 245static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
 246{
 247        struct denali_nand_info *denali = mtd_to_denali(mtd);
 248        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 249        uint16_t *buf16 = (uint16_t *)buf;
 250        int i;
 251
 252        for (i = 0; i < len / 2; i++)
 253                buf16[i] = denali->host_read(denali, addr);
 254}
 255
 256static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
 257                               int len)
 258{
 259        struct denali_nand_info *denali = mtd_to_denali(mtd);
 260        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 261        const uint16_t *buf16 = (const uint16_t *)buf;
 262        int i;
 263
 264        for (i = 0; i < len / 2; i++)
 265                denali->host_write(denali, addr, buf16[i]);
 266}
 267
 268static uint8_t denali_read_byte(struct mtd_info *mtd)
 269{
 270        uint8_t byte;
 271
 272        denali_read_buf(mtd, &byte, 1);
 273
 274        return byte;
 275}
 276
 277static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
 278{
 279        denali_write_buf(mtd, &byte, 1);
 280}
 281
 282static uint16_t denali_read_word(struct mtd_info *mtd)
 283{
 284        uint16_t word;
 285
 286        denali_read_buf16(mtd, (uint8_t *)&word, 2);
 287
 288        return word;
 289}
 290
 291static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
 292{
 293        struct denali_nand_info *denali = mtd_to_denali(mtd);
 294        uint32_t type;
 295
 296        if (ctrl & NAND_CLE)
 297                type = DENALI_MAP11_CMD;
 298        else if (ctrl & NAND_ALE)
 299                type = DENALI_MAP11_ADDR;
 300        else
 301                return;
 302
 303        /*
 304         * Some commands are followed by chip->dev_ready or chip->waitfunc.
 305         * irq_status must be cleared here to catch the R/B# interrupt later.
 306         */
 307        if (ctrl & NAND_CTRL_CHANGE)
 308                denali_reset_irq(denali);
 309
 310        denali->host_write(denali, DENALI_BANK(denali) | type, dat);
 311}
 312
 313static int denali_dev_ready(struct mtd_info *mtd)
 314{
 315        struct denali_nand_info *denali = mtd_to_denali(mtd);
 316
 317        return !!(denali_check_irq(denali) & INTR__INT_ACT);
 318}
 319
 320static int denali_check_erased_page(struct mtd_info *mtd,
 321                                    struct nand_chip *chip, uint8_t *buf,
 322                                    unsigned long uncor_ecc_flags,
 323                                    unsigned int max_bitflips)
 324{
 325        struct denali_nand_info *denali = mtd_to_denali(mtd);
 326        uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
 327        int ecc_steps = chip->ecc.steps;
 328        int ecc_size = chip->ecc.size;
 329        int ecc_bytes = chip->ecc.bytes;
 330        int i, stat;
 331
 332        for (i = 0; i < ecc_steps; i++) {
 333                if (!(uncor_ecc_flags & BIT(i)))
 334                        continue;
 335
 336                stat = nand_check_erased_ecc_chunk(buf, ecc_size,
 337                                                  ecc_code, ecc_bytes,
 338                                                  NULL, 0,
 339                                                  chip->ecc.strength);
 340                if (stat < 0) {
 341                        mtd->ecc_stats.failed++;
 342                } else {
 343                        mtd->ecc_stats.corrected += stat;
 344                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
 345                }
 346
 347                buf += ecc_size;
 348                ecc_code += ecc_bytes;
 349        }
 350
 351        return max_bitflips;
 352}
 353
 354static int denali_hw_ecc_fixup(struct mtd_info *mtd,
 355                               struct denali_nand_info *denali,
 356                               unsigned long *uncor_ecc_flags)
 357{
 358        struct nand_chip *chip = mtd_to_nand(mtd);
 359        int bank = denali->active_bank;
 360        uint32_t ecc_cor;
 361        unsigned int max_bitflips;
 362
 363        ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
 364        ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
 365
 366        if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
 367                /*
 368                 * This flag is set when uncorrectable error occurs at least in
 369                 * one ECC sector.  We can not know "how many sectors", or
 370                 * "which sector(s)".  We need erase-page check for all sectors.
 371                 */
 372                *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
 373                return 0;
 374        }
 375
 376        max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
 377
 378        /*
 379         * The register holds the maximum of per-sector corrected bitflips.
 380         * This is suitable for the return value of the ->read_page() callback.
 381         * Unfortunately, we can not know the total number of corrected bits in
 382         * the page.  Increase the stats by max_bitflips. (compromised solution)
 383         */
 384        mtd->ecc_stats.corrected += max_bitflips;
 385
 386        return max_bitflips;
 387}
 388
 389static int denali_sw_ecc_fixup(struct mtd_info *mtd,
 390                               struct denali_nand_info *denali,
 391                               unsigned long *uncor_ecc_flags, uint8_t *buf)
 392{
 393        unsigned int ecc_size = denali->nand.ecc.size;
 394        unsigned int bitflips = 0;
 395        unsigned int max_bitflips = 0;
 396        uint32_t err_addr, err_cor_info;
 397        unsigned int err_byte, err_sector, err_device;
 398        uint8_t err_cor_value;
 399        unsigned int prev_sector = 0;
 400        uint32_t irq_status;
 401
 402        denali_reset_irq(denali);
 403
 404        do {
 405                err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
 406                err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
 407                err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
 408
 409                err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
 410                err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
 411                                          err_cor_info);
 412                err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
 413                                       err_cor_info);
 414
 415                /* reset the bitflip counter when crossing ECC sector */
 416                if (err_sector != prev_sector)
 417                        bitflips = 0;
 418
 419                if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
 420                        /*
 421                         * Check later if this is a real ECC error, or
 422                         * an erased sector.
 423                         */
 424                        *uncor_ecc_flags |= BIT(err_sector);
 425                } else if (err_byte < ecc_size) {
 426                        /*
 427                         * If err_byte is larger than ecc_size, means error
 428                         * happened in OOB, so we ignore it. It's no need for
 429                         * us to correct it err_device is represented the NAND
 430                         * error bits are happened in if there are more than
 431                         * one NAND connected.
 432                         */
 433                        int offset;
 434                        unsigned int flips_in_byte;
 435
 436                        offset = (err_sector * ecc_size + err_byte) *
 437                                        denali->devs_per_cs + err_device;
 438
 439                        /* correct the ECC error */
 440                        flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
 441                        buf[offset] ^= err_cor_value;
 442                        mtd->ecc_stats.corrected += flips_in_byte;
 443                        bitflips += flips_in_byte;
 444
 445                        max_bitflips = max(max_bitflips, bitflips);
 446                }
 447
 448                prev_sector = err_sector;
 449        } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
 450
 451        /*
 452         * Once handle all ECC errors, controller will trigger an
 453         * ECC_TRANSACTION_DONE interrupt.
 454         */
 455        irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
 456        if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
 457                return -EIO;
 458
 459        return max_bitflips;
 460}
 461
 462static void denali_setup_dma64(struct denali_nand_info *denali,
 463                               dma_addr_t dma_addr, int page, int write)
 464{
 465        uint32_t mode;
 466        const int page_count = 1;
 467
 468        mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
 469
 470        /* DMA is a three step process */
 471
 472        /*
 473         * 1. setup transfer type, interrupt when complete,
 474         *    burst len = 64 bytes, the number of pages
 475         */
 476        denali->host_write(denali, mode,
 477                           0x01002000 | (64 << 16) | (write << 8) | page_count);
 478
 479        /* 2. set memory low address */
 480        denali->host_write(denali, mode, lower_32_bits(dma_addr));
 481
 482        /* 3. set memory high address */
 483        denali->host_write(denali, mode, upper_32_bits(dma_addr));
 484}
 485
 486static void denali_setup_dma32(struct denali_nand_info *denali,
 487                               dma_addr_t dma_addr, int page, int write)
 488{
 489        uint32_t mode;
 490        const int page_count = 1;
 491
 492        mode = DENALI_MAP10 | DENALI_BANK(denali);
 493
 494        /* DMA is a four step process */
 495
 496        /* 1. setup transfer type and # of pages */
 497        denali->host_write(denali, mode | page,
 498                           0x2000 | (write << 8) | page_count);
 499
 500        /* 2. set memory high address bits 23:8 */
 501        denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
 502
 503        /* 3. set memory low address bits 23:8 */
 504        denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
 505
 506        /* 4. interrupt when complete, burst len = 64 bytes */
 507        denali->host_write(denali, mode | 0x14000, 0x2400);
 508}
 509
 510static int denali_pio_read(struct denali_nand_info *denali, void *buf,
 511                           size_t size, int page, int raw)
 512{
 513        u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
 514        uint32_t *buf32 = (uint32_t *)buf;
 515        uint32_t irq_status, ecc_err_mask;
 516        int i;
 517
 518        if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
 519                ecc_err_mask = INTR__ECC_UNCOR_ERR;
 520        else
 521                ecc_err_mask = INTR__ECC_ERR;
 522
 523        denali_reset_irq(denali);
 524
 525        for (i = 0; i < size / 4; i++)
 526                *buf32++ = denali->host_read(denali, addr);
 527
 528        irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
 529        if (!(irq_status & INTR__PAGE_XFER_INC))
 530                return -EIO;
 531
 532        if (irq_status & INTR__ERASED_PAGE)
 533                memset(buf, 0xff, size);
 534
 535        return irq_status & ecc_err_mask ? -EBADMSG : 0;
 536}
 537
 538static int denali_pio_write(struct denali_nand_info *denali,
 539                            const void *buf, size_t size, int page, int raw)
 540{
 541        u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
 542        const uint32_t *buf32 = (uint32_t *)buf;
 543        uint32_t irq_status;
 544        int i;
 545
 546        denali_reset_irq(denali);
 547
 548        for (i = 0; i < size / 4; i++)
 549                denali->host_write(denali, addr, *buf32++);
 550
 551        irq_status = denali_wait_for_irq(denali,
 552                                INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
 553        if (!(irq_status & INTR__PROGRAM_COMP))
 554                return -EIO;
 555
 556        return 0;
 557}
 558
 559static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
 560                           size_t size, int page, int raw, int write)
 561{
 562        if (write)
 563                return denali_pio_write(denali, buf, size, page, raw);
 564        else
 565                return denali_pio_read(denali, buf, size, page, raw);
 566}
 567
 568static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
 569                           size_t size, int page, int raw, int write)
 570{
 571        dma_addr_t dma_addr;
 572        uint32_t irq_mask, irq_status, ecc_err_mask;
 573        enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 574        int ret = 0;
 575
 576        dma_addr = dma_map_single(denali->dev, buf, size, dir);
 577        if (dma_mapping_error(denali->dev, dma_addr)) {
 578                dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
 579                return denali_pio_xfer(denali, buf, size, page, raw, write);
 580        }
 581
 582        if (write) {
 583                /*
 584                 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
 585                 * We can use INTR__DMA_CMD_COMP instead.  This flag is asserted
 586                 * when the page program is completed.
 587                 */
 588                irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
 589                ecc_err_mask = 0;
 590        } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
 591                irq_mask = INTR__DMA_CMD_COMP;
 592                ecc_err_mask = INTR__ECC_UNCOR_ERR;
 593        } else {
 594                irq_mask = INTR__DMA_CMD_COMP;
 595                ecc_err_mask = INTR__ECC_ERR;
 596        }
 597
 598        iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
 599        /*
 600         * The ->setup_dma() hook kicks DMA by using the data/command
 601         * interface, which belongs to a different AXI port from the
 602         * register interface.  Read back the register to avoid a race.
 603         */
 604        ioread32(denali->reg + DMA_ENABLE);
 605
 606        denali_reset_irq(denali);
 607        denali->setup_dma(denali, dma_addr, page, write);
 608
 609        irq_status = denali_wait_for_irq(denali, irq_mask);
 610        if (!(irq_status & INTR__DMA_CMD_COMP))
 611                ret = -EIO;
 612        else if (irq_status & ecc_err_mask)
 613                ret = -EBADMSG;
 614
 615        iowrite32(0, denali->reg + DMA_ENABLE);
 616
 617        dma_unmap_single(denali->dev, dma_addr, size, dir);
 618
 619        if (irq_status & INTR__ERASED_PAGE)
 620                memset(buf, 0xff, size);
 621
 622        return ret;
 623}
 624
 625static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
 626                            size_t size, int page, int raw, int write)
 627{
 628        iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
 629        iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
 630                  denali->reg + TRANSFER_SPARE_REG);
 631
 632        if (denali->dma_avail)
 633                return denali_dma_xfer(denali, buf, size, page, raw, write);
 634        else
 635                return denali_pio_xfer(denali, buf, size, page, raw, write);
 636}
 637
 638static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
 639                            int page, int write)
 640{
 641        struct denali_nand_info *denali = mtd_to_denali(mtd);
 642        int writesize = mtd->writesize;
 643        int oobsize = mtd->oobsize;
 644        uint8_t *bufpoi = chip->oob_poi;
 645        int ecc_steps = chip->ecc.steps;
 646        int ecc_size = chip->ecc.size;
 647        int ecc_bytes = chip->ecc.bytes;
 648        int oob_skip = denali->oob_skip_bytes;
 649        size_t size = writesize + oobsize;
 650        int i, pos, len;
 651
 652        /* BBM at the beginning of the OOB area */
 653        if (write)
 654                nand_prog_page_begin_op(chip, page, writesize, bufpoi,
 655                                        oob_skip);
 656        else
 657                nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
 658        bufpoi += oob_skip;
 659
 660        /* OOB ECC */
 661        for (i = 0; i < ecc_steps; i++) {
 662                pos = ecc_size + i * (ecc_size + ecc_bytes);
 663                len = ecc_bytes;
 664
 665                if (pos >= writesize)
 666                        pos += oob_skip;
 667                else if (pos + len > writesize)
 668                        len = writesize - pos;
 669
 670                if (write)
 671                        nand_change_write_column_op(chip, pos, bufpoi, len,
 672                                                    false);
 673                else
 674                        nand_change_read_column_op(chip, pos, bufpoi, len,
 675                                                   false);
 676                bufpoi += len;
 677                if (len < ecc_bytes) {
 678                        len = ecc_bytes - len;
 679                        if (write)
 680                                nand_change_write_column_op(chip, writesize +
 681                                                            oob_skip, bufpoi,
 682                                                            len, false);
 683                        else
 684                                nand_change_read_column_op(chip, writesize +
 685                                                           oob_skip, bufpoi,
 686                                                           len, false);
 687                        bufpoi += len;
 688                }
 689        }
 690
 691        /* OOB free */
 692        len = oobsize - (bufpoi - chip->oob_poi);
 693        if (write)
 694                nand_change_write_column_op(chip, size - len, bufpoi, len,
 695                                            false);
 696        else
 697                nand_change_read_column_op(chip, size - len, bufpoi, len,
 698                                           false);
 699}
 700
 701static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 702                                uint8_t *buf, int oob_required, int page)
 703{
 704        struct denali_nand_info *denali = mtd_to_denali(mtd);
 705        int writesize = mtd->writesize;
 706        int oobsize = mtd->oobsize;
 707        int ecc_steps = chip->ecc.steps;
 708        int ecc_size = chip->ecc.size;
 709        int ecc_bytes = chip->ecc.bytes;
 710        void *tmp_buf = denali->buf;
 711        int oob_skip = denali->oob_skip_bytes;
 712        size_t size = writesize + oobsize;
 713        int ret, i, pos, len;
 714
 715        ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
 716        if (ret)
 717                return ret;
 718
 719        /* Arrange the buffer for syndrome payload/ecc layout */
 720        if (buf) {
 721                for (i = 0; i < ecc_steps; i++) {
 722                        pos = i * (ecc_size + ecc_bytes);
 723                        len = ecc_size;
 724
 725                        if (pos >= writesize)
 726                                pos += oob_skip;
 727                        else if (pos + len > writesize)
 728                                len = writesize - pos;
 729
 730                        memcpy(buf, tmp_buf + pos, len);
 731                        buf += len;
 732                        if (len < ecc_size) {
 733                                len = ecc_size - len;
 734                                memcpy(buf, tmp_buf + writesize + oob_skip,
 735                                       len);
 736                                buf += len;
 737                        }
 738                }
 739        }
 740
 741        if (oob_required) {
 742                uint8_t *oob = chip->oob_poi;
 743
 744                /* BBM at the beginning of the OOB area */
 745                memcpy(oob, tmp_buf + writesize, oob_skip);
 746                oob += oob_skip;
 747
 748                /* OOB ECC */
 749                for (i = 0; i < ecc_steps; i++) {
 750                        pos = ecc_size + i * (ecc_size + ecc_bytes);
 751                        len = ecc_bytes;
 752
 753                        if (pos >= writesize)
 754                                pos += oob_skip;
 755                        else if (pos + len > writesize)
 756                                len = writesize - pos;
 757
 758                        memcpy(oob, tmp_buf + pos, len);
 759                        oob += len;
 760                        if (len < ecc_bytes) {
 761                                len = ecc_bytes - len;
 762                                memcpy(oob, tmp_buf + writesize + oob_skip,
 763                                       len);
 764                                oob += len;
 765                        }
 766                }
 767
 768                /* OOB free */
 769                len = oobsize - (oob - chip->oob_poi);
 770                memcpy(oob, tmp_buf + size - len, len);
 771        }
 772
 773        return 0;
 774}
 775
 776static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
 777                           int page)
 778{
 779        denali_oob_xfer(mtd, chip, page, 0);
 780
 781        return 0;
 782}
 783
 784static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
 785                            int page)
 786{
 787        struct denali_nand_info *denali = mtd_to_denali(mtd);
 788
 789        denali_reset_irq(denali);
 790
 791        denali_oob_xfer(mtd, chip, page, 1);
 792
 793        return nand_prog_page_end_op(chip);
 794}
 795
 796static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 797                            uint8_t *buf, int oob_required, int page)
 798{
 799        struct denali_nand_info *denali = mtd_to_denali(mtd);
 800        unsigned long uncor_ecc_flags = 0;
 801        int stat = 0;
 802        int ret;
 803
 804        ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
 805        if (ret && ret != -EBADMSG)
 806                return ret;
 807
 808        if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
 809                stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
 810        else if (ret == -EBADMSG)
 811                stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
 812
 813        if (stat < 0)
 814                return stat;
 815
 816        if (uncor_ecc_flags) {
 817                ret = denali_read_oob(mtd, chip, page);
 818                if (ret)
 819                        return ret;
 820
 821                stat = denali_check_erased_page(mtd, chip, buf,
 822                                                uncor_ecc_flags, stat);
 823        }
 824
 825        return stat;
 826}
 827
 828static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 829                                 const uint8_t *buf, int oob_required, int page)
 830{
 831        struct denali_nand_info *denali = mtd_to_denali(mtd);
 832        int writesize = mtd->writesize;
 833        int oobsize = mtd->oobsize;
 834        int ecc_steps = chip->ecc.steps;
 835        int ecc_size = chip->ecc.size;
 836        int ecc_bytes = chip->ecc.bytes;
 837        void *tmp_buf = denali->buf;
 838        int oob_skip = denali->oob_skip_bytes;
 839        size_t size = writesize + oobsize;
 840        int i, pos, len;
 841
 842        /*
 843         * Fill the buffer with 0xff first except the full page transfer.
 844         * This simplifies the logic.
 845         */
 846        if (!buf || !oob_required)
 847                memset(tmp_buf, 0xff, size);
 848
 849        /* Arrange the buffer for syndrome payload/ecc layout */
 850        if (buf) {
 851                for (i = 0; i < ecc_steps; i++) {
 852                        pos = i * (ecc_size + ecc_bytes);
 853                        len = ecc_size;
 854
 855                        if (pos >= writesize)
 856                                pos += oob_skip;
 857                        else if (pos + len > writesize)
 858                                len = writesize - pos;
 859
 860                        memcpy(tmp_buf + pos, buf, len);
 861                        buf += len;
 862                        if (len < ecc_size) {
 863                                len = ecc_size - len;
 864                                memcpy(tmp_buf + writesize + oob_skip, buf,
 865                                       len);
 866                                buf += len;
 867                        }
 868                }
 869        }
 870
 871        if (oob_required) {
 872                const uint8_t *oob = chip->oob_poi;
 873
 874                /* BBM at the beginning of the OOB area */
 875                memcpy(tmp_buf + writesize, oob, oob_skip);
 876                oob += oob_skip;
 877
 878                /* OOB ECC */
 879                for (i = 0; i < ecc_steps; i++) {
 880                        pos = ecc_size + i * (ecc_size + ecc_bytes);
 881                        len = ecc_bytes;
 882
 883                        if (pos >= writesize)
 884                                pos += oob_skip;
 885                        else if (pos + len > writesize)
 886                                len = writesize - pos;
 887
 888                        memcpy(tmp_buf + pos, oob, len);
 889                        oob += len;
 890                        if (len < ecc_bytes) {
 891                                len = ecc_bytes - len;
 892                                memcpy(tmp_buf + writesize + oob_skip, oob,
 893                                       len);
 894                                oob += len;
 895                        }
 896                }
 897
 898                /* OOB free */
 899                len = oobsize - (oob - chip->oob_poi);
 900                memcpy(tmp_buf + size - len, oob, len);
 901        }
 902
 903        return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
 904}
 905
 906static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 907                             const uint8_t *buf, int oob_required, int page)
 908{
 909        struct denali_nand_info *denali = mtd_to_denali(mtd);
 910
 911        return denali_data_xfer(denali, (void *)buf, mtd->writesize,
 912                                page, 0, 1);
 913}
 914
 915static void denali_select_chip(struct mtd_info *mtd, int chip)
 916{
 917        struct denali_nand_info *denali = mtd_to_denali(mtd);
 918
 919        denali->active_bank = chip;
 920}
 921
 922static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
 923{
 924        struct denali_nand_info *denali = mtd_to_denali(mtd);
 925        uint32_t irq_status;
 926
 927        /* R/B# pin transitioned from low to high? */
 928        irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
 929
 930        return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
 931}
 932
 933static int denali_erase(struct mtd_info *mtd, int page)
 934{
 935        struct denali_nand_info *denali = mtd_to_denali(mtd);
 936        uint32_t irq_status;
 937
 938        denali_reset_irq(denali);
 939
 940        denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
 941                           DENALI_ERASE);
 942
 943        /* wait for erase to complete or failure to occur */
 944        irq_status = denali_wait_for_irq(denali,
 945                                         INTR__ERASE_COMP | INTR__ERASE_FAIL);
 946
 947        return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
 948}
 949
 950static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 951                                       const struct nand_data_interface *conf)
 952{
 953        struct denali_nand_info *denali = mtd_to_denali(mtd);
 954        const struct nand_sdr_timings *timings;
 955        unsigned long t_x, mult_x;
 956        int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
 957        int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
 958        int addr_2_data_mask;
 959        uint32_t tmp;
 960
 961        timings = nand_get_sdr_timings(conf);
 962        if (IS_ERR(timings))
 963                return PTR_ERR(timings);
 964
 965        /* clk_x period in picoseconds */
 966        t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
 967        if (!t_x)
 968                return -EINVAL;
 969
 970        /*
 971         * The bus interface clock, clk_x, is phase aligned with the core clock.
 972         * The clk_x is an integral multiple N of the core clk.  The value N is
 973         * configured at IP delivery time, and its available value is 4, 5, 6.
 974         */
 975        mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
 976        if (mult_x < 4 || mult_x > 6)
 977                return -EINVAL;
 978
 979        if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
 980                return 0;
 981
 982        /* tREA -> ACC_CLKS */
 983        acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
 984        acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
 985
 986        tmp = ioread32(denali->reg + ACC_CLKS);
 987        tmp &= ~ACC_CLKS__VALUE;
 988        tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
 989        iowrite32(tmp, denali->reg + ACC_CLKS);
 990
 991        /* tRWH -> RE_2_WE */
 992        re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
 993        re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
 994
 995        tmp = ioread32(denali->reg + RE_2_WE);
 996        tmp &= ~RE_2_WE__VALUE;
 997        tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
 998        iowrite32(tmp, denali->reg + RE_2_WE);
 999
1000        /* tRHZ -> RE_2_RE */
1001        re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
1002        re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1003
1004        tmp = ioread32(denali->reg + RE_2_RE);
1005        tmp &= ~RE_2_RE__VALUE;
1006        tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
1007        iowrite32(tmp, denali->reg + RE_2_RE);
1008
1009        /*
1010         * tCCS, tWHR -> WE_2_RE
1011         *
1012         * With WE_2_RE properly set, the Denali controller automatically takes
1013         * care of the delay; the driver need not set NAND_WAIT_TCCS.
1014         */
1015        we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
1016        we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1017
1018        tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
1019        tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1020        tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
1021        iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
1022
1023        /* tADL -> ADDR_2_DATA */
1024
1025        /* for older versions, ADDR_2_DATA is only 6 bit wide */
1026        addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1027        if (denali->revision < 0x0501)
1028                addr_2_data_mask >>= 1;
1029
1030        addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
1031        addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1032
1033        tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
1034        tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1035        tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
1036        iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
1037
1038        /* tREH, tWH -> RDWR_EN_HI_CNT */
1039        rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1040                                  t_x);
1041        rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1042
1043        tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
1044        tmp &= ~RDWR_EN_HI_CNT__VALUE;
1045        tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
1046        iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
1047
1048        /* tRP, tWP -> RDWR_EN_LO_CNT */
1049        rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
1050        rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1051                                     t_x);
1052        rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
1053        rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1054        rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1055
1056        tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
1057        tmp &= ~RDWR_EN_LO_CNT__VALUE;
1058        tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
1059        iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
1060
1061        /* tCS, tCEA -> CS_SETUP_CNT */
1062        cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
1063                        (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
1064                        0);
1065        cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1066
1067        tmp = ioread32(denali->reg + CS_SETUP_CNT);
1068        tmp &= ~CS_SETUP_CNT__VALUE;
1069        tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
1070        iowrite32(tmp, denali->reg + CS_SETUP_CNT);
1071
1072        return 0;
1073}
1074
1075static void denali_reset_banks(struct denali_nand_info *denali)
1076{
1077        u32 irq_status;
1078        int i;
1079
1080        for (i = 0; i < denali->max_banks; i++) {
1081                denali->active_bank = i;
1082
1083                denali_reset_irq(denali);
1084
1085                iowrite32(DEVICE_RESET__BANK(i),
1086                          denali->reg + DEVICE_RESET);
1087
1088                irq_status = denali_wait_for_irq(denali,
1089                        INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
1090                if (!(irq_status & INTR__INT_ACT))
1091                        break;
1092        }
1093
1094        dev_dbg(denali->dev, "%d chips connected\n", i);
1095        denali->max_banks = i;
1096}
1097
1098static void denali_hw_init(struct denali_nand_info *denali)
1099{
1100        /*
1101         * The REVISION register may not be reliable.  Platforms are allowed to
1102         * override it.
1103         */
1104        if (!denali->revision)
1105                denali->revision = swab16(ioread32(denali->reg + REVISION));
1106
1107        /*
1108         * tell driver how many bit controller will skip before
1109         * writing ECC code in OOB, this register may be already
1110         * set by firmware. So we read this value out.
1111         * if this value is 0, just let it be.
1112         */
1113        denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
1114        denali_detect_max_banks(denali);
1115        iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
1116        iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
1117
1118        iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
1119}
1120
1121int denali_calc_ecc_bytes(int step_size, int strength)
1122{
1123        /* BCH code.  Denali requires ecc.bytes to be multiple of 2 */
1124        return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1125}
1126EXPORT_SYMBOL(denali_calc_ecc_bytes);
1127
1128static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1129                                struct mtd_oob_region *oobregion)
1130{
1131        struct denali_nand_info *denali = mtd_to_denali(mtd);
1132        struct nand_chip *chip = mtd_to_nand(mtd);
1133
1134        if (section)
1135                return -ERANGE;
1136
1137        oobregion->offset = denali->oob_skip_bytes;
1138        oobregion->length = chip->ecc.total;
1139
1140        return 0;
1141}
1142
1143static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1144                                 struct mtd_oob_region *oobregion)
1145{
1146        struct denali_nand_info *denali = mtd_to_denali(mtd);
1147        struct nand_chip *chip = mtd_to_nand(mtd);
1148
1149        if (section)
1150                return -ERANGE;
1151
1152        oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
1153        oobregion->length = mtd->oobsize - oobregion->offset;
1154
1155        return 0;
1156}
1157
1158static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1159        .ecc = denali_ooblayout_ecc,
1160        .free = denali_ooblayout_free,
1161};
1162
1163static int denali_multidev_fixup(struct denali_nand_info *denali)
1164{
1165        struct nand_chip *chip = &denali->nand;
1166        struct mtd_info *mtd = nand_to_mtd(chip);
1167
1168        /*
1169         * Support for multi device:
1170         * When the IP configuration is x16 capable and two x8 chips are
1171         * connected in parallel, DEVICES_CONNECTED should be set to 2.
1172         * In this case, the core framework knows nothing about this fact,
1173         * so we should tell it the _logical_ pagesize and anything necessary.
1174         */
1175        denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
1176
1177        /*
1178         * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1179         * For those, DEVICES_CONNECTED is left to 0.  Set 1 if it is the case.
1180         */
1181        if (denali->devs_per_cs == 0) {
1182                denali->devs_per_cs = 1;
1183                iowrite32(1, denali->reg + DEVICES_CONNECTED);
1184        }
1185
1186        if (denali->devs_per_cs == 1)
1187                return 0;
1188
1189        if (denali->devs_per_cs != 2) {
1190                dev_err(denali->dev, "unsupported number of devices %d\n",
1191                        denali->devs_per_cs);
1192                return -EINVAL;
1193        }
1194
1195        /* 2 chips in parallel */
1196        mtd->size <<= 1;
1197        mtd->erasesize <<= 1;
1198        mtd->writesize <<= 1;
1199        mtd->oobsize <<= 1;
1200        chip->chipsize <<= 1;
1201        chip->page_shift += 1;
1202        chip->phys_erase_shift += 1;
1203        chip->bbt_erase_shift += 1;
1204        chip->chip_shift += 1;
1205        chip->pagemask <<= 1;
1206        chip->ecc.size <<= 1;
1207        chip->ecc.bytes <<= 1;
1208        chip->ecc.strength <<= 1;
1209        denali->oob_skip_bytes <<= 1;
1210
1211        return 0;
1212}
1213
1214static int denali_attach_chip(struct nand_chip *chip)
1215{
1216        struct mtd_info *mtd = nand_to_mtd(chip);
1217        struct denali_nand_info *denali = mtd_to_denali(mtd);
1218        int ret;
1219
1220        if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
1221                denali->dma_avail = 1;
1222
1223        if (denali->dma_avail) {
1224                int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1225
1226                ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1227                if (ret) {
1228                        dev_info(denali->dev,
1229                                 "Failed to set DMA mask. Disabling DMA.\n");
1230                        denali->dma_avail = 0;
1231                }
1232        }
1233
1234        if (denali->dma_avail) {
1235                chip->options |= NAND_USE_BOUNCE_BUFFER;
1236                chip->buf_align = 16;
1237                if (denali->caps & DENALI_CAP_DMA_64BIT)
1238                        denali->setup_dma = denali_setup_dma64;
1239                else
1240                        denali->setup_dma = denali_setup_dma32;
1241        }
1242
1243        chip->bbt_options |= NAND_BBT_USE_FLASH;
1244        chip->bbt_options |= NAND_BBT_NO_OOB;
1245        chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1246        chip->options |= NAND_NO_SUBPAGE_WRITE;
1247
1248        ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
1249                                   mtd->oobsize - denali->oob_skip_bytes);
1250        if (ret) {
1251                dev_err(denali->dev, "Failed to setup ECC settings.\n");
1252                return ret;
1253        }
1254
1255        dev_dbg(denali->dev,
1256                "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1257                chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1258
1259        iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
1260                  FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
1261                  denali->reg + ECC_CORRECTION);
1262        iowrite32(mtd->erasesize / mtd->writesize,
1263                  denali->reg + PAGES_PER_BLOCK);
1264        iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1265                  denali->reg + DEVICE_WIDTH);
1266        iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
1267                  denali->reg + TWO_ROW_ADDR_CYCLES);
1268        iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
1269        iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
1270
1271        iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
1272        iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
1273        /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1274        iowrite32(mtd->writesize / chip->ecc.size,
1275                  denali->reg + CFG_NUM_DATA_BLOCKS);
1276
1277        mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1278
1279        if (chip->options & NAND_BUSWIDTH_16) {
1280                chip->read_buf = denali_read_buf16;
1281                chip->write_buf = denali_write_buf16;
1282        } else {
1283                chip->read_buf = denali_read_buf;
1284                chip->write_buf = denali_write_buf;
1285        }
1286        chip->ecc.read_page = denali_read_page;
1287        chip->ecc.read_page_raw = denali_read_page_raw;
1288        chip->ecc.write_page = denali_write_page;
1289        chip->ecc.write_page_raw = denali_write_page_raw;
1290        chip->ecc.read_oob = denali_read_oob;
1291        chip->ecc.write_oob = denali_write_oob;
1292        chip->erase = denali_erase;
1293
1294        ret = denali_multidev_fixup(denali);
1295        if (ret)
1296                return ret;
1297
1298        /*
1299         * This buffer is DMA-mapped by denali_{read,write}_page_raw.  Do not
1300         * use devm_kmalloc() because the memory allocated by devm_ does not
1301         * guarantee DMA-safe alignment.
1302         */
1303        denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1304        if (!denali->buf)
1305                return -ENOMEM;
1306
1307        return 0;
1308}
1309
1310static void denali_detach_chip(struct nand_chip *chip)
1311{
1312        struct mtd_info *mtd = nand_to_mtd(chip);
1313        struct denali_nand_info *denali = mtd_to_denali(mtd);
1314
1315        kfree(denali->buf);
1316}
1317
1318static const struct nand_controller_ops denali_controller_ops = {
1319        .attach_chip = denali_attach_chip,
1320        .detach_chip = denali_detach_chip,
1321};
1322
1323int denali_init(struct denali_nand_info *denali)
1324{
1325        struct nand_chip *chip = &denali->nand;
1326        struct mtd_info *mtd = nand_to_mtd(chip);
1327        u32 features = ioread32(denali->reg + FEATURES);
1328        int ret;
1329
1330        mtd->dev.parent = denali->dev;
1331        denali_hw_init(denali);
1332
1333        init_completion(&denali->complete);
1334        spin_lock_init(&denali->irq_lock);
1335
1336        denali_clear_irq_all(denali);
1337
1338        ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1339                               IRQF_SHARED, DENALI_NAND_NAME, denali);
1340        if (ret) {
1341                dev_err(denali->dev, "Unable to request IRQ\n");
1342                return ret;
1343        }
1344
1345        denali_enable_irq(denali);
1346        denali_reset_banks(denali);
1347        if (!denali->max_banks) {
1348                /* Error out earlier if no chip is found for some reasons. */
1349                ret = -ENODEV;
1350                goto disable_irq;
1351        }
1352
1353        denali->active_bank = DENALI_INVALID_BANK;
1354
1355        nand_set_flash_node(chip, denali->dev->of_node);
1356        /* Fallback to the default name if DT did not give "label" property */
1357        if (!mtd->name)
1358                mtd->name = "denali-nand";
1359
1360        chip->select_chip = denali_select_chip;
1361        chip->read_byte = denali_read_byte;
1362        chip->write_byte = denali_write_byte;
1363        chip->read_word = denali_read_word;
1364        chip->cmd_ctrl = denali_cmd_ctrl;
1365        chip->dev_ready = denali_dev_ready;
1366        chip->waitfunc = denali_waitfunc;
1367
1368        if (features & FEATURES__INDEX_ADDR) {
1369                denali->host_read = denali_indexed_read;
1370                denali->host_write = denali_indexed_write;
1371        } else {
1372                denali->host_read = denali_direct_read;
1373                denali->host_write = denali_direct_write;
1374        }
1375
1376        /* clk rate info is needed for setup_data_interface */
1377        if (denali->clk_rate && denali->clk_x_rate)
1378                chip->setup_data_interface = denali_setup_data_interface;
1379
1380        chip->dummy_controller.ops = &denali_controller_ops;
1381        ret = nand_scan(mtd, denali->max_banks);
1382        if (ret)
1383                goto disable_irq;
1384
1385        ret = mtd_device_register(mtd, NULL, 0);
1386        if (ret) {
1387                dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1388                goto cleanup_nand;
1389        }
1390
1391        return 0;
1392
1393cleanup_nand:
1394        nand_cleanup(chip);
1395disable_irq:
1396        denali_disable_irq(denali);
1397
1398        return ret;
1399}
1400EXPORT_SYMBOL(denali_init);
1401
1402void denali_remove(struct denali_nand_info *denali)
1403{
1404        struct mtd_info *mtd = nand_to_mtd(&denali->nand);
1405
1406        nand_release(mtd);
1407        denali_disable_irq(denali);
1408}
1409EXPORT_SYMBOL(denali_remove);
1410