linux/drivers/mtd/nand/denali.c
<<
>>
Prefs
   1/*
   2 * NAND Flash Controller Device Driver
   3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14
  15#include <linux/bitfield.h>
  16#include <linux/completion.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/interrupt.h>
  19#include <linux/io.h>
  20#include <linux/module.h>
  21#include <linux/mtd/mtd.h>
  22#include <linux/mtd/rawnand.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25
  26#include "denali.h"
  27
  28MODULE_LICENSE("GPL");
  29
  30#define DENALI_NAND_NAME    "denali-nand"
  31
  32/* for Indexed Addressing */
  33#define DENALI_INDEXED_CTRL     0x00
  34#define DENALI_INDEXED_DATA     0x10
  35
  36#define DENALI_MAP00            (0 << 26)       /* direct access to buffer */
  37#define DENALI_MAP01            (1 << 26)       /* read/write pages in PIO */
  38#define DENALI_MAP10            (2 << 26)       /* high-level control plane */
  39#define DENALI_MAP11            (3 << 26)       /* direct controller access */
  40
  41/* MAP11 access cycle type */
  42#define DENALI_MAP11_CMD        ((DENALI_MAP11) | 0)    /* command cycle */
  43#define DENALI_MAP11_ADDR       ((DENALI_MAP11) | 1)    /* address cycle */
  44#define DENALI_MAP11_DATA       ((DENALI_MAP11) | 2)    /* data cycle */
  45
  46/* MAP10 commands */
  47#define DENALI_ERASE            0x01
  48
  49#define DENALI_BANK(denali)     ((denali)->active_bank << 24)
  50
  51#define DENALI_INVALID_BANK     -1
  52#define DENALI_NR_BANKS         4
  53
  54/*
  55 * The bus interface clock, clk_x, is phase aligned with the core clock.  The
  56 * clk_x is an integral multiple N of the core clk.  The value N is configured
  57 * at IP delivery time, and its available value is 4, 5, or 6.  We need to align
  58 * to the largest value to make it work with any possible configuration.
  59 */
  60#define DENALI_CLK_X_MULT       6
  61
  62static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  63{
  64        return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  65}
  66
  67/*
  68 * Direct Addressing - the slave address forms the control information (command
  69 * type, bank, block, and page address).  The slave data is the actual data to
  70 * be transferred.  This mode requires 28 bits of address region allocated.
  71 */
  72static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
  73{
  74        return ioread32(denali->host + addr);
  75}
  76
  77static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
  78                                u32 data)
  79{
  80        iowrite32(data, denali->host + addr);
  81}
  82
  83/*
  84 * Indexed Addressing - address translation module intervenes in passing the
  85 * control information.  This mode reduces the required address range.  The
  86 * control information and transferred data are latched by the registers in
  87 * the translation module.
  88 */
  89static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
  90{
  91        iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  92        return ioread32(denali->host + DENALI_INDEXED_DATA);
  93}
  94
  95static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
  96                                 u32 data)
  97{
  98        iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  99        iowrite32(data, denali->host + DENALI_INDEXED_DATA);
 100}
 101
 102/*
 103 * Use the configuration feature register to determine the maximum number of
 104 * banks that the hardware supports.
 105 */
 106static void denali_detect_max_banks(struct denali_nand_info *denali)
 107{
 108        uint32_t features = ioread32(denali->reg + FEATURES);
 109
 110        denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
 111
 112        /* the encoding changed from rev 5.0 to 5.1 */
 113        if (denali->revision < 0x0501)
 114                denali->max_banks <<= 1;
 115}
 116
 117static void denali_enable_irq(struct denali_nand_info *denali)
 118{
 119        int i;
 120
 121        for (i = 0; i < DENALI_NR_BANKS; i++)
 122                iowrite32(U32_MAX, denali->reg + INTR_EN(i));
 123        iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
 124}
 125
 126static void denali_disable_irq(struct denali_nand_info *denali)
 127{
 128        int i;
 129
 130        for (i = 0; i < DENALI_NR_BANKS; i++)
 131                iowrite32(0, denali->reg + INTR_EN(i));
 132        iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
 133}
 134
 135static void denali_clear_irq(struct denali_nand_info *denali,
 136                             int bank, uint32_t irq_status)
 137{
 138        /* write one to clear bits */
 139        iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
 140}
 141
 142static void denali_clear_irq_all(struct denali_nand_info *denali)
 143{
 144        int i;
 145
 146        for (i = 0; i < DENALI_NR_BANKS; i++)
 147                denali_clear_irq(denali, i, U32_MAX);
 148}
 149
 150static irqreturn_t denali_isr(int irq, void *dev_id)
 151{
 152        struct denali_nand_info *denali = dev_id;
 153        irqreturn_t ret = IRQ_NONE;
 154        uint32_t irq_status;
 155        int i;
 156
 157        spin_lock(&denali->irq_lock);
 158
 159        for (i = 0; i < DENALI_NR_BANKS; i++) {
 160                irq_status = ioread32(denali->reg + INTR_STATUS(i));
 161                if (irq_status)
 162                        ret = IRQ_HANDLED;
 163
 164                denali_clear_irq(denali, i, irq_status);
 165
 166                if (i != denali->active_bank)
 167                        continue;
 168
 169                denali->irq_status |= irq_status;
 170
 171                if (denali->irq_status & denali->irq_mask)
 172                        complete(&denali->complete);
 173        }
 174
 175        spin_unlock(&denali->irq_lock);
 176
 177        return ret;
 178}
 179
 180static void denali_reset_irq(struct denali_nand_info *denali)
 181{
 182        unsigned long flags;
 183
 184        spin_lock_irqsave(&denali->irq_lock, flags);
 185        denali->irq_status = 0;
 186        denali->irq_mask = 0;
 187        spin_unlock_irqrestore(&denali->irq_lock, flags);
 188}
 189
 190static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
 191                                    uint32_t irq_mask)
 192{
 193        unsigned long time_left, flags;
 194        uint32_t irq_status;
 195
 196        spin_lock_irqsave(&denali->irq_lock, flags);
 197
 198        irq_status = denali->irq_status;
 199
 200        if (irq_mask & irq_status) {
 201                /* return immediately if the IRQ has already happened. */
 202                spin_unlock_irqrestore(&denali->irq_lock, flags);
 203                return irq_status;
 204        }
 205
 206        denali->irq_mask = irq_mask;
 207        reinit_completion(&denali->complete);
 208        spin_unlock_irqrestore(&denali->irq_lock, flags);
 209
 210        time_left = wait_for_completion_timeout(&denali->complete,
 211                                                msecs_to_jiffies(1000));
 212        if (!time_left) {
 213                dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
 214                        irq_mask);
 215                return 0;
 216        }
 217
 218        return denali->irq_status;
 219}
 220
 221static uint32_t denali_check_irq(struct denali_nand_info *denali)
 222{
 223        unsigned long flags;
 224        uint32_t irq_status;
 225
 226        spin_lock_irqsave(&denali->irq_lock, flags);
 227        irq_status = denali->irq_status;
 228        spin_unlock_irqrestore(&denali->irq_lock, flags);
 229
 230        return irq_status;
 231}
 232
 233static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 234{
 235        struct denali_nand_info *denali = mtd_to_denali(mtd);
 236        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 237        int i;
 238
 239        for (i = 0; i < len; i++)
 240                buf[i] = denali->host_read(denali, addr);
 241}
 242
 243static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 244{
 245        struct denali_nand_info *denali = mtd_to_denali(mtd);
 246        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 247        int i;
 248
 249        for (i = 0; i < len; i++)
 250                denali->host_write(denali, addr, buf[i]);
 251}
 252
 253static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
 254{
 255        struct denali_nand_info *denali = mtd_to_denali(mtd);
 256        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 257        uint16_t *buf16 = (uint16_t *)buf;
 258        int i;
 259
 260        for (i = 0; i < len / 2; i++)
 261                buf16[i] = denali->host_read(denali, addr);
 262}
 263
 264static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
 265                               int len)
 266{
 267        struct denali_nand_info *denali = mtd_to_denali(mtd);
 268        u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
 269        const uint16_t *buf16 = (const uint16_t *)buf;
 270        int i;
 271
 272        for (i = 0; i < len / 2; i++)
 273                denali->host_write(denali, addr, buf16[i]);
 274}
 275
 276static uint8_t denali_read_byte(struct mtd_info *mtd)
 277{
 278        uint8_t byte;
 279
 280        denali_read_buf(mtd, &byte, 1);
 281
 282        return byte;
 283}
 284
 285static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
 286{
 287        denali_write_buf(mtd, &byte, 1);
 288}
 289
 290static uint16_t denali_read_word(struct mtd_info *mtd)
 291{
 292        uint16_t word;
 293
 294        denali_read_buf16(mtd, (uint8_t *)&word, 2);
 295
 296        return word;
 297}
 298
 299static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
 300{
 301        struct denali_nand_info *denali = mtd_to_denali(mtd);
 302        uint32_t type;
 303
 304        if (ctrl & NAND_CLE)
 305                type = DENALI_MAP11_CMD;
 306        else if (ctrl & NAND_ALE)
 307                type = DENALI_MAP11_ADDR;
 308        else
 309                return;
 310
 311        /*
 312         * Some commands are followed by chip->dev_ready or chip->waitfunc.
 313         * irq_status must be cleared here to catch the R/B# interrupt later.
 314         */
 315        if (ctrl & NAND_CTRL_CHANGE)
 316                denali_reset_irq(denali);
 317
 318        denali->host_write(denali, DENALI_BANK(denali) | type, dat);
 319}
 320
 321static int denali_dev_ready(struct mtd_info *mtd)
 322{
 323        struct denali_nand_info *denali = mtd_to_denali(mtd);
 324
 325        return !!(denali_check_irq(denali) & INTR__INT_ACT);
 326}
 327
 328static int denali_check_erased_page(struct mtd_info *mtd,
 329                                    struct nand_chip *chip, uint8_t *buf,
 330                                    unsigned long uncor_ecc_flags,
 331                                    unsigned int max_bitflips)
 332{
 333        uint8_t *ecc_code = chip->buffers->ecccode;
 334        int ecc_steps = chip->ecc.steps;
 335        int ecc_size = chip->ecc.size;
 336        int ecc_bytes = chip->ecc.bytes;
 337        int i, ret, stat;
 338
 339        ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
 340                                         chip->ecc.total);
 341        if (ret)
 342                return ret;
 343
 344        for (i = 0; i < ecc_steps; i++) {
 345                if (!(uncor_ecc_flags & BIT(i)))
 346                        continue;
 347
 348                stat = nand_check_erased_ecc_chunk(buf, ecc_size,
 349                                                  ecc_code, ecc_bytes,
 350                                                  NULL, 0,
 351                                                  chip->ecc.strength);
 352                if (stat < 0) {
 353                        mtd->ecc_stats.failed++;
 354                } else {
 355                        mtd->ecc_stats.corrected += stat;
 356                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
 357                }
 358
 359                buf += ecc_size;
 360                ecc_code += ecc_bytes;
 361        }
 362
 363        return max_bitflips;
 364}
 365
 366static int denali_hw_ecc_fixup(struct mtd_info *mtd,
 367                               struct denali_nand_info *denali,
 368                               unsigned long *uncor_ecc_flags)
 369{
 370        struct nand_chip *chip = mtd_to_nand(mtd);
 371        int bank = denali->active_bank;
 372        uint32_t ecc_cor;
 373        unsigned int max_bitflips;
 374
 375        ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
 376        ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
 377
 378        if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
 379                /*
 380                 * This flag is set when uncorrectable error occurs at least in
 381                 * one ECC sector.  We can not know "how many sectors", or
 382                 * "which sector(s)".  We need erase-page check for all sectors.
 383                 */
 384                *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
 385                return 0;
 386        }
 387
 388        max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
 389
 390        /*
 391         * The register holds the maximum of per-sector corrected bitflips.
 392         * This is suitable for the return value of the ->read_page() callback.
 393         * Unfortunately, we can not know the total number of corrected bits in
 394         * the page.  Increase the stats by max_bitflips. (compromised solution)
 395         */
 396        mtd->ecc_stats.corrected += max_bitflips;
 397
 398        return max_bitflips;
 399}
 400
 401static int denali_sw_ecc_fixup(struct mtd_info *mtd,
 402                               struct denali_nand_info *denali,
 403                               unsigned long *uncor_ecc_flags, uint8_t *buf)
 404{
 405        unsigned int ecc_size = denali->nand.ecc.size;
 406        unsigned int bitflips = 0;
 407        unsigned int max_bitflips = 0;
 408        uint32_t err_addr, err_cor_info;
 409        unsigned int err_byte, err_sector, err_device;
 410        uint8_t err_cor_value;
 411        unsigned int prev_sector = 0;
 412        uint32_t irq_status;
 413
 414        denali_reset_irq(denali);
 415
 416        do {
 417                err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
 418                err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
 419                err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
 420
 421                err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
 422                err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
 423                                          err_cor_info);
 424                err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
 425                                       err_cor_info);
 426
 427                /* reset the bitflip counter when crossing ECC sector */
 428                if (err_sector != prev_sector)
 429                        bitflips = 0;
 430
 431                if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
 432                        /*
 433                         * Check later if this is a real ECC error, or
 434                         * an erased sector.
 435                         */
 436                        *uncor_ecc_flags |= BIT(err_sector);
 437                } else if (err_byte < ecc_size) {
 438                        /*
 439                         * If err_byte is larger than ecc_size, means error
 440                         * happened in OOB, so we ignore it. It's no need for
 441                         * us to correct it err_device is represented the NAND
 442                         * error bits are happened in if there are more than
 443                         * one NAND connected.
 444                         */
 445                        int offset;
 446                        unsigned int flips_in_byte;
 447
 448                        offset = (err_sector * ecc_size + err_byte) *
 449                                        denali->devs_per_cs + err_device;
 450
 451                        /* correct the ECC error */
 452                        flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
 453                        buf[offset] ^= err_cor_value;
 454                        mtd->ecc_stats.corrected += flips_in_byte;
 455                        bitflips += flips_in_byte;
 456
 457                        max_bitflips = max(max_bitflips, bitflips);
 458                }
 459
 460                prev_sector = err_sector;
 461        } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
 462
 463        /*
 464         * Once handle all ECC errors, controller will trigger an
 465         * ECC_TRANSACTION_DONE interrupt.
 466         */
 467        irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
 468        if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
 469                return -EIO;
 470
 471        return max_bitflips;
 472}
 473
 474static void denali_setup_dma64(struct denali_nand_info *denali,
 475                               dma_addr_t dma_addr, int page, int write)
 476{
 477        uint32_t mode;
 478        const int page_count = 1;
 479
 480        mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
 481
 482        /* DMA is a three step process */
 483
 484        /*
 485         * 1. setup transfer type, interrupt when complete,
 486         *    burst len = 64 bytes, the number of pages
 487         */
 488        denali->host_write(denali, mode,
 489                           0x01002000 | (64 << 16) | (write << 8) | page_count);
 490
 491        /* 2. set memory low address */
 492        denali->host_write(denali, mode, lower_32_bits(dma_addr));
 493
 494        /* 3. set memory high address */
 495        denali->host_write(denali, mode, upper_32_bits(dma_addr));
 496}
 497
 498static void denali_setup_dma32(struct denali_nand_info *denali,
 499                               dma_addr_t dma_addr, int page, int write)
 500{
 501        uint32_t mode;
 502        const int page_count = 1;
 503
 504        mode = DENALI_MAP10 | DENALI_BANK(denali);
 505
 506        /* DMA is a four step process */
 507
 508        /* 1. setup transfer type and # of pages */
 509        denali->host_write(denali, mode | page,
 510                           0x2000 | (write << 8) | page_count);
 511
 512        /* 2. set memory high address bits 23:8 */
 513        denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
 514
 515        /* 3. set memory low address bits 23:8 */
 516        denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
 517
 518        /* 4. interrupt when complete, burst len = 64 bytes */
 519        denali->host_write(denali, mode | 0x14000, 0x2400);
 520}
 521
 522static int denali_pio_read(struct denali_nand_info *denali, void *buf,
 523                           size_t size, int page, int raw)
 524{
 525        u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
 526        uint32_t *buf32 = (uint32_t *)buf;
 527        uint32_t irq_status, ecc_err_mask;
 528        int i;
 529
 530        if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
 531                ecc_err_mask = INTR__ECC_UNCOR_ERR;
 532        else
 533                ecc_err_mask = INTR__ECC_ERR;
 534
 535        denali_reset_irq(denali);
 536
 537        for (i = 0; i < size / 4; i++)
 538                *buf32++ = denali->host_read(denali, addr);
 539
 540        irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
 541        if (!(irq_status & INTR__PAGE_XFER_INC))
 542                return -EIO;
 543
 544        if (irq_status & INTR__ERASED_PAGE)
 545                memset(buf, 0xff, size);
 546
 547        return irq_status & ecc_err_mask ? -EBADMSG : 0;
 548}
 549
 550static int denali_pio_write(struct denali_nand_info *denali,
 551                            const void *buf, size_t size, int page, int raw)
 552{
 553        u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
 554        const uint32_t *buf32 = (uint32_t *)buf;
 555        uint32_t irq_status;
 556        int i;
 557
 558        denali_reset_irq(denali);
 559
 560        for (i = 0; i < size / 4; i++)
 561                denali->host_write(denali, addr, *buf32++);
 562
 563        irq_status = denali_wait_for_irq(denali,
 564                                INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
 565        if (!(irq_status & INTR__PROGRAM_COMP))
 566                return -EIO;
 567
 568        return 0;
 569}
 570
 571static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
 572                           size_t size, int page, int raw, int write)
 573{
 574        if (write)
 575                return denali_pio_write(denali, buf, size, page, raw);
 576        else
 577                return denali_pio_read(denali, buf, size, page, raw);
 578}
 579
 580static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
 581                           size_t size, int page, int raw, int write)
 582{
 583        dma_addr_t dma_addr;
 584        uint32_t irq_mask, irq_status, ecc_err_mask;
 585        enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 586        int ret = 0;
 587
 588        dma_addr = dma_map_single(denali->dev, buf, size, dir);
 589        if (dma_mapping_error(denali->dev, dma_addr)) {
 590                dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
 591                return denali_pio_xfer(denali, buf, size, page, raw, write);
 592        }
 593
 594        if (write) {
 595                /*
 596                 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
 597                 * We can use INTR__DMA_CMD_COMP instead.  This flag is asserted
 598                 * when the page program is completed.
 599                 */
 600                irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
 601                ecc_err_mask = 0;
 602        } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
 603                irq_mask = INTR__DMA_CMD_COMP;
 604                ecc_err_mask = INTR__ECC_UNCOR_ERR;
 605        } else {
 606                irq_mask = INTR__DMA_CMD_COMP;
 607                ecc_err_mask = INTR__ECC_ERR;
 608        }
 609
 610        iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
 611
 612        denali_reset_irq(denali);
 613        denali->setup_dma(denali, dma_addr, page, write);
 614
 615        irq_status = denali_wait_for_irq(denali, irq_mask);
 616        if (!(irq_status & INTR__DMA_CMD_COMP))
 617                ret = -EIO;
 618        else if (irq_status & ecc_err_mask)
 619                ret = -EBADMSG;
 620
 621        iowrite32(0, denali->reg + DMA_ENABLE);
 622
 623        dma_unmap_single(denali->dev, dma_addr, size, dir);
 624
 625        if (irq_status & INTR__ERASED_PAGE)
 626                memset(buf, 0xff, size);
 627
 628        return ret;
 629}
 630
 631static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
 632                            size_t size, int page, int raw, int write)
 633{
 634        iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
 635        iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
 636                  denali->reg + TRANSFER_SPARE_REG);
 637
 638        if (denali->dma_avail)
 639                return denali_dma_xfer(denali, buf, size, page, raw, write);
 640        else
 641                return denali_pio_xfer(denali, buf, size, page, raw, write);
 642}
 643
 644static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
 645                            int page, int write)
 646{
 647        struct denali_nand_info *denali = mtd_to_denali(mtd);
 648        unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
 649        unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
 650        int writesize = mtd->writesize;
 651        int oobsize = mtd->oobsize;
 652        uint8_t *bufpoi = chip->oob_poi;
 653        int ecc_steps = chip->ecc.steps;
 654        int ecc_size = chip->ecc.size;
 655        int ecc_bytes = chip->ecc.bytes;
 656        int oob_skip = denali->oob_skip_bytes;
 657        size_t size = writesize + oobsize;
 658        int i, pos, len;
 659
 660        /* BBM at the beginning of the OOB area */
 661        chip->cmdfunc(mtd, start_cmd, writesize, page);
 662        if (write)
 663                chip->write_buf(mtd, bufpoi, oob_skip);
 664        else
 665                chip->read_buf(mtd, bufpoi, oob_skip);
 666        bufpoi += oob_skip;
 667
 668        /* OOB ECC */
 669        for (i = 0; i < ecc_steps; i++) {
 670                pos = ecc_size + i * (ecc_size + ecc_bytes);
 671                len = ecc_bytes;
 672
 673                if (pos >= writesize)
 674                        pos += oob_skip;
 675                else if (pos + len > writesize)
 676                        len = writesize - pos;
 677
 678                chip->cmdfunc(mtd, rnd_cmd, pos, -1);
 679                if (write)
 680                        chip->write_buf(mtd, bufpoi, len);
 681                else
 682                        chip->read_buf(mtd, bufpoi, len);
 683                bufpoi += len;
 684                if (len < ecc_bytes) {
 685                        len = ecc_bytes - len;
 686                        chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
 687                        if (write)
 688                                chip->write_buf(mtd, bufpoi, len);
 689                        else
 690                                chip->read_buf(mtd, bufpoi, len);
 691                        bufpoi += len;
 692                }
 693        }
 694
 695        /* OOB free */
 696        len = oobsize - (bufpoi - chip->oob_poi);
 697        chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
 698        if (write)
 699                chip->write_buf(mtd, bufpoi, len);
 700        else
 701                chip->read_buf(mtd, bufpoi, len);
 702}
 703
 704static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 705                                uint8_t *buf, int oob_required, int page)
 706{
 707        struct denali_nand_info *denali = mtd_to_denali(mtd);
 708        int writesize = mtd->writesize;
 709        int oobsize = mtd->oobsize;
 710        int ecc_steps = chip->ecc.steps;
 711        int ecc_size = chip->ecc.size;
 712        int ecc_bytes = chip->ecc.bytes;
 713        void *dma_buf = denali->buf;
 714        int oob_skip = denali->oob_skip_bytes;
 715        size_t size = writesize + oobsize;
 716        int ret, i, pos, len;
 717
 718        ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
 719        if (ret)
 720                return ret;
 721
 722        /* Arrange the buffer for syndrome payload/ecc layout */
 723        if (buf) {
 724                for (i = 0; i < ecc_steps; i++) {
 725                        pos = i * (ecc_size + ecc_bytes);
 726                        len = ecc_size;
 727
 728                        if (pos >= writesize)
 729                                pos += oob_skip;
 730                        else if (pos + len > writesize)
 731                                len = writesize - pos;
 732
 733                        memcpy(buf, dma_buf + pos, len);
 734                        buf += len;
 735                        if (len < ecc_size) {
 736                                len = ecc_size - len;
 737                                memcpy(buf, dma_buf + writesize + oob_skip,
 738                                       len);
 739                                buf += len;
 740                        }
 741                }
 742        }
 743
 744        if (oob_required) {
 745                uint8_t *oob = chip->oob_poi;
 746
 747                /* BBM at the beginning of the OOB area */
 748                memcpy(oob, dma_buf + writesize, oob_skip);
 749                oob += oob_skip;
 750
 751                /* OOB ECC */
 752                for (i = 0; i < ecc_steps; i++) {
 753                        pos = ecc_size + i * (ecc_size + ecc_bytes);
 754                        len = ecc_bytes;
 755
 756                        if (pos >= writesize)
 757                                pos += oob_skip;
 758                        else if (pos + len > writesize)
 759                                len = writesize - pos;
 760
 761                        memcpy(oob, dma_buf + pos, len);
 762                        oob += len;
 763                        if (len < ecc_bytes) {
 764                                len = ecc_bytes - len;
 765                                memcpy(oob, dma_buf + writesize + oob_skip,
 766                                       len);
 767                                oob += len;
 768                        }
 769                }
 770
 771                /* OOB free */
 772                len = oobsize - (oob - chip->oob_poi);
 773                memcpy(oob, dma_buf + size - len, len);
 774        }
 775
 776        return 0;
 777}
 778
 779static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
 780                           int page)
 781{
 782        denali_oob_xfer(mtd, chip, page, 0);
 783
 784        return 0;
 785}
 786
 787static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
 788                            int page)
 789{
 790        struct denali_nand_info *denali = mtd_to_denali(mtd);
 791        int status;
 792
 793        denali_reset_irq(denali);
 794
 795        denali_oob_xfer(mtd, chip, page, 1);
 796
 797        chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 798        status = chip->waitfunc(mtd, chip);
 799
 800        return status & NAND_STATUS_FAIL ? -EIO : 0;
 801}
 802
 803static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 804                            uint8_t *buf, int oob_required, int page)
 805{
 806        struct denali_nand_info *denali = mtd_to_denali(mtd);
 807        unsigned long uncor_ecc_flags = 0;
 808        int stat = 0;
 809        int ret;
 810
 811        ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
 812        if (ret && ret != -EBADMSG)
 813                return ret;
 814
 815        if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
 816                stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
 817        else if (ret == -EBADMSG)
 818                stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
 819
 820        if (stat < 0)
 821                return stat;
 822
 823        if (uncor_ecc_flags) {
 824                ret = denali_read_oob(mtd, chip, page);
 825                if (ret)
 826                        return ret;
 827
 828                stat = denali_check_erased_page(mtd, chip, buf,
 829                                                uncor_ecc_flags, stat);
 830        }
 831
 832        return stat;
 833}
 834
 835static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 836                                 const uint8_t *buf, int oob_required, int page)
 837{
 838        struct denali_nand_info *denali = mtd_to_denali(mtd);
 839        int writesize = mtd->writesize;
 840        int oobsize = mtd->oobsize;
 841        int ecc_steps = chip->ecc.steps;
 842        int ecc_size = chip->ecc.size;
 843        int ecc_bytes = chip->ecc.bytes;
 844        void *dma_buf = denali->buf;
 845        int oob_skip = denali->oob_skip_bytes;
 846        size_t size = writesize + oobsize;
 847        int i, pos, len;
 848
 849        /*
 850         * Fill the buffer with 0xff first except the full page transfer.
 851         * This simplifies the logic.
 852         */
 853        if (!buf || !oob_required)
 854                memset(dma_buf, 0xff, size);
 855
 856        /* Arrange the buffer for syndrome payload/ecc layout */
 857        if (buf) {
 858                for (i = 0; i < ecc_steps; i++) {
 859                        pos = i * (ecc_size + ecc_bytes);
 860                        len = ecc_size;
 861
 862                        if (pos >= writesize)
 863                                pos += oob_skip;
 864                        else if (pos + len > writesize)
 865                                len = writesize - pos;
 866
 867                        memcpy(dma_buf + pos, buf, len);
 868                        buf += len;
 869                        if (len < ecc_size) {
 870                                len = ecc_size - len;
 871                                memcpy(dma_buf + writesize + oob_skip, buf,
 872                                       len);
 873                                buf += len;
 874                        }
 875                }
 876        }
 877
 878        if (oob_required) {
 879                const uint8_t *oob = chip->oob_poi;
 880
 881                /* BBM at the beginning of the OOB area */
 882                memcpy(dma_buf + writesize, oob, oob_skip);
 883                oob += oob_skip;
 884
 885                /* OOB ECC */
 886                for (i = 0; i < ecc_steps; i++) {
 887                        pos = ecc_size + i * (ecc_size + ecc_bytes);
 888                        len = ecc_bytes;
 889
 890                        if (pos >= writesize)
 891                                pos += oob_skip;
 892                        else if (pos + len > writesize)
 893                                len = writesize - pos;
 894
 895                        memcpy(dma_buf + pos, oob, len);
 896                        oob += len;
 897                        if (len < ecc_bytes) {
 898                                len = ecc_bytes - len;
 899                                memcpy(dma_buf + writesize + oob_skip, oob,
 900                                       len);
 901                                oob += len;
 902                        }
 903                }
 904
 905                /* OOB free */
 906                len = oobsize - (oob - chip->oob_poi);
 907                memcpy(dma_buf + size - len, oob, len);
 908        }
 909
 910        return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
 911}
 912
 913static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 914                             const uint8_t *buf, int oob_required, int page)
 915{
 916        struct denali_nand_info *denali = mtd_to_denali(mtd);
 917
 918        return denali_data_xfer(denali, (void *)buf, mtd->writesize,
 919                                page, 0, 1);
 920}
 921
 922static void denali_select_chip(struct mtd_info *mtd, int chip)
 923{
 924        struct denali_nand_info *denali = mtd_to_denali(mtd);
 925
 926        denali->active_bank = chip;
 927}
 928
 929static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
 930{
 931        struct denali_nand_info *denali = mtd_to_denali(mtd);
 932        uint32_t irq_status;
 933
 934        /* R/B# pin transitioned from low to high? */
 935        irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
 936
 937        return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
 938}
 939
 940static int denali_erase(struct mtd_info *mtd, int page)
 941{
 942        struct denali_nand_info *denali = mtd_to_denali(mtd);
 943        uint32_t irq_status;
 944
 945        denali_reset_irq(denali);
 946
 947        denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
 948                           DENALI_ERASE);
 949
 950        /* wait for erase to complete or failure to occur */
 951        irq_status = denali_wait_for_irq(denali,
 952                                         INTR__ERASE_COMP | INTR__ERASE_FAIL);
 953
 954        return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
 955}
 956
 957static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 958                                       const struct nand_data_interface *conf)
 959{
 960        struct denali_nand_info *denali = mtd_to_denali(mtd);
 961        const struct nand_sdr_timings *timings;
 962        unsigned long t_clk;
 963        int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
 964        int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
 965        int addr_2_data_mask;
 966        uint32_t tmp;
 967
 968        timings = nand_get_sdr_timings(conf);
 969        if (IS_ERR(timings))
 970                return PTR_ERR(timings);
 971
 972        /* clk_x period in picoseconds */
 973        t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
 974        if (!t_clk)
 975                return -EINVAL;
 976
 977        if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
 978                return 0;
 979
 980        /* tREA -> ACC_CLKS */
 981        acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
 982        acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
 983
 984        tmp = ioread32(denali->reg + ACC_CLKS);
 985        tmp &= ~ACC_CLKS__VALUE;
 986        tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
 987        iowrite32(tmp, denali->reg + ACC_CLKS);
 988
 989        /* tRWH -> RE_2_WE */
 990        re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
 991        re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
 992
 993        tmp = ioread32(denali->reg + RE_2_WE);
 994        tmp &= ~RE_2_WE__VALUE;
 995        tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
 996        iowrite32(tmp, denali->reg + RE_2_WE);
 997
 998        /* tRHZ -> RE_2_RE */
 999        re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
1000        re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1001
1002        tmp = ioread32(denali->reg + RE_2_RE);
1003        tmp &= ~RE_2_RE__VALUE;
1004        tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
1005        iowrite32(tmp, denali->reg + RE_2_RE);
1006
1007        /*
1008         * tCCS, tWHR -> WE_2_RE
1009         *
1010         * With WE_2_RE properly set, the Denali controller automatically takes
1011         * care of the delay; the driver need not set NAND_WAIT_TCCS.
1012         */
1013        we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
1014                               t_clk);
1015        we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1016
1017        tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
1018        tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1019        tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
1020        iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
1021
1022        /* tADL -> ADDR_2_DATA */
1023
1024        /* for older versions, ADDR_2_DATA is only 6 bit wide */
1025        addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1026        if (denali->revision < 0x0501)
1027                addr_2_data_mask >>= 1;
1028
1029        addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
1030        addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1031
1032        tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
1033        tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1034        tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
1035        iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
1036
1037        /* tREH, tWH -> RDWR_EN_HI_CNT */
1038        rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1039                                  t_clk);
1040        rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1041
1042        tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
1043        tmp &= ~RDWR_EN_HI_CNT__VALUE;
1044        tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
1045        iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
1046
1047        /* tRP, tWP -> RDWR_EN_LO_CNT */
1048        rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
1049                                  t_clk);
1050        rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1051                                     t_clk);
1052        rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
1053        rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1054        rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1055
1056        tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
1057        tmp &= ~RDWR_EN_LO_CNT__VALUE;
1058        tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
1059        iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
1060
1061        /* tCS, tCEA -> CS_SETUP_CNT */
1062        cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1063                        (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1064                        0);
1065        cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1066
1067        tmp = ioread32(denali->reg + CS_SETUP_CNT);
1068        tmp &= ~CS_SETUP_CNT__VALUE;
1069        tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
1070        iowrite32(tmp, denali->reg + CS_SETUP_CNT);
1071
1072        return 0;
1073}
1074
1075static void denali_reset_banks(struct denali_nand_info *denali)
1076{
1077        u32 irq_status;
1078        int i;
1079
1080        for (i = 0; i < denali->max_banks; i++) {
1081                denali->active_bank = i;
1082
1083                denali_reset_irq(denali);
1084
1085                iowrite32(DEVICE_RESET__BANK(i),
1086                          denali->reg + DEVICE_RESET);
1087
1088                irq_status = denali_wait_for_irq(denali,
1089                        INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
1090                if (!(irq_status & INTR__INT_ACT))
1091                        break;
1092        }
1093
1094        dev_dbg(denali->dev, "%d chips connected\n", i);
1095        denali->max_banks = i;
1096}
1097
1098static void denali_hw_init(struct denali_nand_info *denali)
1099{
1100        /*
1101         * The REVISION register may not be reliable.  Platforms are allowed to
1102         * override it.
1103         */
1104        if (!denali->revision)
1105                denali->revision = swab16(ioread32(denali->reg + REVISION));
1106
1107        /*
1108         * tell driver how many bit controller will skip before
1109         * writing ECC code in OOB, this register may be already
1110         * set by firmware. So we read this value out.
1111         * if this value is 0, just let it be.
1112         */
1113        denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
1114        denali_detect_max_banks(denali);
1115        iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
1116        iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
1117
1118        iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
1119}
1120
1121int denali_calc_ecc_bytes(int step_size, int strength)
1122{
1123        /* BCH code.  Denali requires ecc.bytes to be multiple of 2 */
1124        return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1125}
1126EXPORT_SYMBOL(denali_calc_ecc_bytes);
1127
1128static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1129                            struct denali_nand_info *denali)
1130{
1131        int oobavail = mtd->oobsize - denali->oob_skip_bytes;
1132        int ret;
1133
1134        /*
1135         * If .size and .strength are already set (usually by DT),
1136         * check if they are supported by this controller.
1137         */
1138        if (chip->ecc.size && chip->ecc.strength)
1139                return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1140
1141        /*
1142         * We want .size and .strength closest to the chip's requirement
1143         * unless NAND_ECC_MAXIMIZE is requested.
1144         */
1145        if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1146                ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1147                if (!ret)
1148                        return 0;
1149        }
1150
1151        /* Max ECC strength is the last thing we can do */
1152        return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1153}
1154
1155static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1156                                struct mtd_oob_region *oobregion)
1157{
1158        struct denali_nand_info *denali = mtd_to_denali(mtd);
1159        struct nand_chip *chip = mtd_to_nand(mtd);
1160
1161        if (section)
1162                return -ERANGE;
1163
1164        oobregion->offset = denali->oob_skip_bytes;
1165        oobregion->length = chip->ecc.total;
1166
1167        return 0;
1168}
1169
1170static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1171                                 struct mtd_oob_region *oobregion)
1172{
1173        struct denali_nand_info *denali = mtd_to_denali(mtd);
1174        struct nand_chip *chip = mtd_to_nand(mtd);
1175
1176        if (section)
1177                return -ERANGE;
1178
1179        oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
1180        oobregion->length = mtd->oobsize - oobregion->offset;
1181
1182        return 0;
1183}
1184
1185static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1186        .ecc = denali_ooblayout_ecc,
1187        .free = denali_ooblayout_free,
1188};
1189
1190static int denali_multidev_fixup(struct denali_nand_info *denali)
1191{
1192        struct nand_chip *chip = &denali->nand;
1193        struct mtd_info *mtd = nand_to_mtd(chip);
1194
1195        /*
1196         * Support for multi device:
1197         * When the IP configuration is x16 capable and two x8 chips are
1198         * connected in parallel, DEVICES_CONNECTED should be set to 2.
1199         * In this case, the core framework knows nothing about this fact,
1200         * so we should tell it the _logical_ pagesize and anything necessary.
1201         */
1202        denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
1203
1204        /*
1205         * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1206         * For those, DEVICES_CONNECTED is left to 0.  Set 1 if it is the case.
1207         */
1208        if (denali->devs_per_cs == 0) {
1209                denali->devs_per_cs = 1;
1210                iowrite32(1, denali->reg + DEVICES_CONNECTED);
1211        }
1212
1213        if (denali->devs_per_cs == 1)
1214                return 0;
1215
1216        if (denali->devs_per_cs != 2) {
1217                dev_err(denali->dev, "unsupported number of devices %d\n",
1218                        denali->devs_per_cs);
1219                return -EINVAL;
1220        }
1221
1222        /* 2 chips in parallel */
1223        mtd->size <<= 1;
1224        mtd->erasesize <<= 1;
1225        mtd->writesize <<= 1;
1226        mtd->oobsize <<= 1;
1227        chip->chipsize <<= 1;
1228        chip->page_shift += 1;
1229        chip->phys_erase_shift += 1;
1230        chip->bbt_erase_shift += 1;
1231        chip->chip_shift += 1;
1232        chip->pagemask <<= 1;
1233        chip->ecc.size <<= 1;
1234        chip->ecc.bytes <<= 1;
1235        chip->ecc.strength <<= 1;
1236        denali->oob_skip_bytes <<= 1;
1237
1238        return 0;
1239}
1240
1241int denali_init(struct denali_nand_info *denali)
1242{
1243        struct nand_chip *chip = &denali->nand;
1244        struct mtd_info *mtd = nand_to_mtd(chip);
1245        u32 features = ioread32(denali->reg + FEATURES);
1246        int ret;
1247
1248        mtd->dev.parent = denali->dev;
1249        denali_hw_init(denali);
1250
1251        init_completion(&denali->complete);
1252        spin_lock_init(&denali->irq_lock);
1253
1254        denali_clear_irq_all(denali);
1255
1256        ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1257                               IRQF_SHARED, DENALI_NAND_NAME, denali);
1258        if (ret) {
1259                dev_err(denali->dev, "Unable to request IRQ\n");
1260                return ret;
1261        }
1262
1263        denali_enable_irq(denali);
1264        denali_reset_banks(denali);
1265
1266        denali->active_bank = DENALI_INVALID_BANK;
1267
1268        nand_set_flash_node(chip, denali->dev->of_node);
1269        /* Fallback to the default name if DT did not give "label" property */
1270        if (!mtd->name)
1271                mtd->name = "denali-nand";
1272
1273        chip->select_chip = denali_select_chip;
1274        chip->read_byte = denali_read_byte;
1275        chip->write_byte = denali_write_byte;
1276        chip->read_word = denali_read_word;
1277        chip->cmd_ctrl = denali_cmd_ctrl;
1278        chip->dev_ready = denali_dev_ready;
1279        chip->waitfunc = denali_waitfunc;
1280
1281        if (features & FEATURES__INDEX_ADDR) {
1282                denali->host_read = denali_indexed_read;
1283                denali->host_write = denali_indexed_write;
1284        } else {
1285                denali->host_read = denali_direct_read;
1286                denali->host_write = denali_direct_write;
1287        }
1288
1289        /* clk rate info is needed for setup_data_interface */
1290        if (denali->clk_x_rate)
1291                chip->setup_data_interface = denali_setup_data_interface;
1292
1293        ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1294        if (ret)
1295                goto disable_irq;
1296
1297        if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
1298                denali->dma_avail = 1;
1299
1300        if (denali->dma_avail) {
1301                int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1302
1303                ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1304                if (ret) {
1305                        dev_info(denali->dev,
1306                                 "Failed to set DMA mask. Disabling DMA.\n");
1307                        denali->dma_avail = 0;
1308                }
1309        }
1310
1311        if (denali->dma_avail) {
1312                chip->options |= NAND_USE_BOUNCE_BUFFER;
1313                chip->buf_align = 16;
1314                if (denali->caps & DENALI_CAP_DMA_64BIT)
1315                        denali->setup_dma = denali_setup_dma64;
1316                else
1317                        denali->setup_dma = denali_setup_dma32;
1318        }
1319
1320        chip->bbt_options |= NAND_BBT_USE_FLASH;
1321        chip->bbt_options |= NAND_BBT_NO_OOB;
1322        chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1323        chip->options |= NAND_NO_SUBPAGE_WRITE;
1324
1325        ret = denali_ecc_setup(mtd, chip, denali);
1326        if (ret) {
1327                dev_err(denali->dev, "Failed to setup ECC settings.\n");
1328                goto disable_irq;
1329        }
1330
1331        dev_dbg(denali->dev,
1332                "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1333                chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1334
1335        iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
1336                  FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
1337                  denali->reg + ECC_CORRECTION);
1338        iowrite32(mtd->erasesize / mtd->writesize,
1339                  denali->reg + PAGES_PER_BLOCK);
1340        iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1341                  denali->reg + DEVICE_WIDTH);
1342        iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
1343                  denali->reg + TWO_ROW_ADDR_CYCLES);
1344        iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
1345        iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
1346
1347        iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
1348        iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
1349        /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1350        iowrite32(mtd->writesize / chip->ecc.size,
1351                  denali->reg + CFG_NUM_DATA_BLOCKS);
1352
1353        mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1354
1355        if (chip->options & NAND_BUSWIDTH_16) {
1356                chip->read_buf = denali_read_buf16;
1357                chip->write_buf = denali_write_buf16;
1358        } else {
1359                chip->read_buf = denali_read_buf;
1360                chip->write_buf = denali_write_buf;
1361        }
1362        chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1363        chip->ecc.read_page = denali_read_page;
1364        chip->ecc.read_page_raw = denali_read_page_raw;
1365        chip->ecc.write_page = denali_write_page;
1366        chip->ecc.write_page_raw = denali_write_page_raw;
1367        chip->ecc.read_oob = denali_read_oob;
1368        chip->ecc.write_oob = denali_write_oob;
1369        chip->erase = denali_erase;
1370
1371        ret = denali_multidev_fixup(denali);
1372        if (ret)
1373                goto disable_irq;
1374
1375        /*
1376         * This buffer is DMA-mapped by denali_{read,write}_page_raw.  Do not
1377         * use devm_kmalloc() because the memory allocated by devm_ does not
1378         * guarantee DMA-safe alignment.
1379         */
1380        denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1381        if (!denali->buf) {
1382                ret = -ENOMEM;
1383                goto disable_irq;
1384        }
1385
1386        ret = nand_scan_tail(mtd);
1387        if (ret)
1388                goto free_buf;
1389
1390        ret = mtd_device_register(mtd, NULL, 0);
1391        if (ret) {
1392                dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1393                goto free_buf;
1394        }
1395        return 0;
1396
1397free_buf:
1398        kfree(denali->buf);
1399disable_irq:
1400        denali_disable_irq(denali);
1401
1402        return ret;
1403}
1404EXPORT_SYMBOL(denali_init);
1405
1406void denali_remove(struct denali_nand_info *denali)
1407{
1408        struct mtd_info *mtd = nand_to_mtd(&denali->nand);
1409
1410        nand_release(mtd);
1411        kfree(denali->buf);
1412        denali_disable_irq(denali);
1413}
1414EXPORT_SYMBOL(denali_remove);
1415