linux/drivers/mtd/nand/denali.c
<<
>>
Prefs
   1/*
   2 * NAND Flash Controller Device Driver
   3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 */
  19#include <linux/interrupt.h>
  20#include <linux/delay.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/wait.h>
  23#include <linux/mutex.h>
  24#include <linux/slab.h>
  25#include <linux/mtd/mtd.h>
  26#include <linux/module.h>
  27
  28#include "denali.h"
  29
  30MODULE_LICENSE("GPL");
  31
  32/* We define a module parameter that allows the user to override
  33 * the hardware and decide what timing mode should be used.
  34 */
  35#define NAND_DEFAULT_TIMINGS    -1
  36
  37static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
  38module_param(onfi_timing_mode, int, S_IRUGO);
  39MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
  40                        " -1 indicates use default timings");
  41
  42#define DENALI_NAND_NAME    "denali-nand"
  43
  44/* We define a macro here that combines all interrupts this driver uses into
  45 * a single constant value, for convenience. */
  46#define DENALI_IRQ_ALL  (INTR_STATUS__DMA_CMD_COMP | \
  47                        INTR_STATUS__ECC_TRANSACTION_DONE | \
  48                        INTR_STATUS__ECC_ERR | \
  49                        INTR_STATUS__PROGRAM_FAIL | \
  50                        INTR_STATUS__LOAD_COMP | \
  51                        INTR_STATUS__PROGRAM_COMP | \
  52                        INTR_STATUS__TIME_OUT | \
  53                        INTR_STATUS__ERASE_FAIL | \
  54                        INTR_STATUS__RST_COMP | \
  55                        INTR_STATUS__ERASE_COMP)
  56
  57/* indicates whether or not the internal value for the flash bank is
  58 * valid or not */
  59#define CHIP_SELECT_INVALID     -1
  60
  61#define SUPPORT_8BITECC         1
  62
  63/* This macro divides two integers and rounds fractional values up
  64 * to the nearest integer value. */
  65#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
  66
  67/* this macro allows us to convert from an MTD structure to our own
  68 * device context (denali) structure.
  69 */
  70#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
  71
  72/* These constants are defined by the driver to enable common driver
  73 * configuration options. */
  74#define SPARE_ACCESS            0x41
  75#define MAIN_ACCESS             0x42
  76#define MAIN_SPARE_ACCESS       0x43
  77
  78#define DENALI_READ     0
  79#define DENALI_WRITE    0x100
  80
  81/* types of device accesses. We can issue commands and get status */
  82#define COMMAND_CYCLE   0
  83#define ADDR_CYCLE      1
  84#define STATUS_CYCLE    2
  85
  86/* this is a helper macro that allows us to
  87 * format the bank into the proper bits for the controller */
  88#define BANK(x) ((x) << 24)
  89
  90/* forward declarations */
  91static void clear_interrupts(struct denali_nand_info *denali);
  92static uint32_t wait_for_irq(struct denali_nand_info *denali,
  93                                                        uint32_t irq_mask);
  94static void denali_irq_enable(struct denali_nand_info *denali,
  95                                                        uint32_t int_mask);
  96static uint32_t read_interrupt_status(struct denali_nand_info *denali);
  97
  98/* Certain operations for the denali NAND controller use
  99 * an indexed mode to read/write data. The operation is
 100 * performed by writing the address value of the command
 101 * to the device memory followed by the data. This function
 102 * abstracts this common operation.
 103*/
 104static void index_addr(struct denali_nand_info *denali,
 105                                uint32_t address, uint32_t data)
 106{
 107        iowrite32(address, denali->flash_mem);
 108        iowrite32(data, denali->flash_mem + 0x10);
 109}
 110
 111/* Perform an indexed read of the device */
 112static void index_addr_read_data(struct denali_nand_info *denali,
 113                                 uint32_t address, uint32_t *pdata)
 114{
 115        iowrite32(address, denali->flash_mem);
 116        *pdata = ioread32(denali->flash_mem + 0x10);
 117}
 118
 119/* We need to buffer some data for some of the NAND core routines.
 120 * The operations manage buffering that data. */
 121static void reset_buf(struct denali_nand_info *denali)
 122{
 123        denali->buf.head = denali->buf.tail = 0;
 124}
 125
 126static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
 127{
 128        denali->buf.buf[denali->buf.tail++] = byte;
 129}
 130
 131/* reads the status of the device */
 132static void read_status(struct denali_nand_info *denali)
 133{
 134        uint32_t cmd = 0x0;
 135
 136        /* initialize the data buffer to store status */
 137        reset_buf(denali);
 138
 139        cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
 140        if (cmd)
 141                write_byte_to_buf(denali, NAND_STATUS_WP);
 142        else
 143                write_byte_to_buf(denali, 0);
 144}
 145
 146/* resets a specific device connected to the core */
 147static void reset_bank(struct denali_nand_info *denali)
 148{
 149        uint32_t irq_status = 0;
 150        uint32_t irq_mask = INTR_STATUS__RST_COMP |
 151                            INTR_STATUS__TIME_OUT;
 152
 153        clear_interrupts(denali);
 154
 155        iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
 156
 157        irq_status = wait_for_irq(denali, irq_mask);
 158
 159        if (irq_status & INTR_STATUS__TIME_OUT)
 160                dev_err(denali->dev, "reset bank failed.\n");
 161}
 162
 163/* Reset the flash controller */
 164static uint16_t denali_nand_reset(struct denali_nand_info *denali)
 165{
 166        uint32_t i;
 167
 168        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 169                       __FILE__, __LINE__, __func__);
 170
 171        for (i = 0 ; i < denali->max_banks; i++)
 172                iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
 173                denali->flash_reg + INTR_STATUS(i));
 174
 175        for (i = 0 ; i < denali->max_banks; i++) {
 176                iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
 177                while (!(ioread32(denali->flash_reg +
 178                                INTR_STATUS(i)) &
 179                        (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
 180                        cpu_relax();
 181                if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
 182                        INTR_STATUS__TIME_OUT)
 183                        dev_dbg(denali->dev,
 184                        "NAND Reset operation timed out on bank %d\n", i);
 185        }
 186
 187        for (i = 0; i < denali->max_banks; i++)
 188                iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
 189                        denali->flash_reg + INTR_STATUS(i));
 190
 191        return PASS;
 192}
 193
 194/* this routine calculates the ONFI timing values for a given mode and
 195 * programs the clocking register accordingly. The mode is determined by
 196 * the get_onfi_nand_para routine.
 197 */
 198static void nand_onfi_timing_set(struct denali_nand_info *denali,
 199                                                                uint16_t mode)
 200{
 201        uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
 202        uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
 203        uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
 204        uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
 205        uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
 206        uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
 207        uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
 208        uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
 209        uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
 210        uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
 211        uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
 212        uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
 213
 214        uint16_t TclsRising = 1;
 215        uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
 216        uint16_t dv_window = 0;
 217        uint16_t en_lo, en_hi;
 218        uint16_t acc_clks;
 219        uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
 220
 221        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 222                       __FILE__, __LINE__, __func__);
 223
 224        en_lo = CEIL_DIV(Trp[mode], CLK_X);
 225        en_hi = CEIL_DIV(Treh[mode], CLK_X);
 226#if ONFI_BLOOM_TIME
 227        if ((en_hi * CLK_X) < (Treh[mode] + 2))
 228                en_hi++;
 229#endif
 230
 231        if ((en_lo + en_hi) * CLK_X < Trc[mode])
 232                en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
 233
 234        if ((en_lo + en_hi) < CLK_MULTI)
 235                en_lo += CLK_MULTI - en_lo - en_hi;
 236
 237        while (dv_window < 8) {
 238                data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
 239
 240                data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
 241
 242                data_invalid =
 243                    data_invalid_rhoh <
 244                    data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
 245
 246                dv_window = data_invalid - Trea[mode];
 247
 248                if (dv_window < 8)
 249                        en_lo++;
 250        }
 251
 252        acc_clks = CEIL_DIV(Trea[mode], CLK_X);
 253
 254        while (((acc_clks * CLK_X) - Trea[mode]) < 3)
 255                acc_clks++;
 256
 257        if ((data_invalid - acc_clks * CLK_X) < 2)
 258                dev_warn(denali->dev, "%s, Line %d: Warning!\n",
 259                        __FILE__, __LINE__);
 260
 261        addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
 262        re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
 263        re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
 264        we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
 265        cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
 266        if (!TclsRising)
 267                cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
 268        if (cs_cnt == 0)
 269                cs_cnt = 1;
 270
 271        if (Tcea[mode]) {
 272                while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
 273                        cs_cnt++;
 274        }
 275
 276#if MODE5_WORKAROUND
 277        if (mode == 5)
 278                acc_clks = 5;
 279#endif
 280
 281        /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
 282        if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
 283                (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
 284                acc_clks = 6;
 285
 286        iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
 287        iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
 288        iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
 289        iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
 290        iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
 291        iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
 292        iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
 293        iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
 294}
 295
 296/* queries the NAND device to see what ONFI modes it supports. */
 297static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
 298{
 299        int i;
 300        /* we needn't to do a reset here because driver has already
 301         * reset all the banks before
 302         * */
 303        if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
 304                ONFI_TIMING_MODE__VALUE))
 305                return FAIL;
 306
 307        for (i = 5; i > 0; i--) {
 308                if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
 309                        (0x01 << i))
 310                        break;
 311        }
 312
 313        nand_onfi_timing_set(denali, i);
 314
 315        /* By now, all the ONFI devices we know support the page cache */
 316        /* rw feature. So here we enable the pipeline_rw_ahead feature */
 317        /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
 318        /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE);  */
 319
 320        return PASS;
 321}
 322
 323static void get_samsung_nand_para(struct denali_nand_info *denali,
 324                                                        uint8_t device_id)
 325{
 326        if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
 327                /* Set timing register values according to datasheet */
 328                iowrite32(5, denali->flash_reg + ACC_CLKS);
 329                iowrite32(20, denali->flash_reg + RE_2_WE);
 330                iowrite32(12, denali->flash_reg + WE_2_RE);
 331                iowrite32(14, denali->flash_reg + ADDR_2_DATA);
 332                iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
 333                iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
 334                iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
 335        }
 336}
 337
 338static void get_toshiba_nand_para(struct denali_nand_info *denali)
 339{
 340        uint32_t tmp;
 341
 342        /* Workaround to fix a controller bug which reports a wrong */
 343        /* spare area size for some kind of Toshiba NAND device */
 344        if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
 345                (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
 346                iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
 347                tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
 348                        ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
 349                iowrite32(tmp,
 350                                denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
 351#if SUPPORT_15BITECC
 352                iowrite32(15, denali->flash_reg + ECC_CORRECTION);
 353#elif SUPPORT_8BITECC
 354                iowrite32(8, denali->flash_reg + ECC_CORRECTION);
 355#endif
 356        }
 357}
 358
 359static void get_hynix_nand_para(struct denali_nand_info *denali,
 360                                                        uint8_t device_id)
 361{
 362        uint32_t main_size, spare_size;
 363
 364        switch (device_id) {
 365        case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
 366        case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
 367                iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
 368                iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
 369                iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
 370                main_size = 4096 *
 371                        ioread32(denali->flash_reg + DEVICES_CONNECTED);
 372                spare_size = 224 *
 373                        ioread32(denali->flash_reg + DEVICES_CONNECTED);
 374                iowrite32(main_size,
 375                                denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
 376                iowrite32(spare_size,
 377                                denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
 378                iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
 379#if SUPPORT_15BITECC
 380                iowrite32(15, denali->flash_reg + ECC_CORRECTION);
 381#elif SUPPORT_8BITECC
 382                iowrite32(8, denali->flash_reg + ECC_CORRECTION);
 383#endif
 384                break;
 385        default:
 386                dev_warn(denali->dev,
 387                        "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
 388                        "Will use default parameter values instead.\n",
 389                        device_id);
 390        }
 391}
 392
 393/* determines how many NAND chips are connected to the controller. Note for
 394 * Intel CE4100 devices we don't support more than one device.
 395 */
 396static void find_valid_banks(struct denali_nand_info *denali)
 397{
 398        uint32_t id[denali->max_banks];
 399        int i;
 400
 401        denali->total_used_banks = 1;
 402        for (i = 0; i < denali->max_banks; i++) {
 403                index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
 404                index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
 405                index_addr_read_data(denali,
 406                                (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
 407
 408                dev_dbg(denali->dev,
 409                        "Return 1st ID for bank[%d]: %x\n", i, id[i]);
 410
 411                if (i == 0) {
 412                        if (!(id[i] & 0x0ff))
 413                                break; /* WTF? */
 414                } else {
 415                        if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
 416                                denali->total_used_banks++;
 417                        else
 418                                break;
 419                }
 420        }
 421
 422        if (denali->platform == INTEL_CE4100) {
 423                /* Platform limitations of the CE4100 device limit
 424                 * users to a single chip solution for NAND.
 425                 * Multichip support is not enabled.
 426                 */
 427                if (denali->total_used_banks != 1) {
 428                        dev_err(denali->dev,
 429                                        "Sorry, Intel CE4100 only supports "
 430                                        "a single NAND device.\n");
 431                        BUG();
 432                }
 433        }
 434        dev_dbg(denali->dev,
 435                "denali->total_used_banks: %d\n", denali->total_used_banks);
 436}
 437
 438/*
 439 * Use the configuration feature register to determine the maximum number of
 440 * banks that the hardware supports.
 441 */
 442static void detect_max_banks(struct denali_nand_info *denali)
 443{
 444        uint32_t features = ioread32(denali->flash_reg + FEATURES);
 445
 446        denali->max_banks = 2 << (features & FEATURES__N_BANKS);
 447}
 448
 449static void detect_partition_feature(struct denali_nand_info *denali)
 450{
 451        /* For MRST platform, denali->fwblks represent the
 452         * number of blocks firmware is taken,
 453         * FW is in protect partition and MTD driver has no
 454         * permission to access it. So let driver know how many
 455         * blocks it can't touch.
 456         * */
 457        if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
 458                if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
 459                        PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
 460                        denali->fwblks =
 461                            ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
 462                              MIN_MAX_BANK__MIN_VALUE) *
 463                             denali->blksperchip)
 464                            +
 465                            (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
 466                            MIN_BLK_ADDR__VALUE);
 467                } else
 468                        denali->fwblks = SPECTRA_START_BLOCK;
 469        } else
 470                denali->fwblks = SPECTRA_START_BLOCK;
 471}
 472
 473static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
 474{
 475        uint16_t status = PASS;
 476        uint32_t id_bytes[5], addr;
 477        uint8_t i, maf_id, device_id;
 478
 479        dev_dbg(denali->dev,
 480                        "%s, Line %d, Function: %s\n",
 481                        __FILE__, __LINE__, __func__);
 482
 483        /* Use read id method to get device ID and other
 484         * params. For some NAND chips, controller can't
 485         * report the correct device ID by reading from
 486         * DEVICE_ID register
 487         * */
 488        addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
 489        index_addr(denali, (uint32_t)addr | 0, 0x90);
 490        index_addr(denali, (uint32_t)addr | 1, 0);
 491        for (i = 0; i < 5; i++)
 492                index_addr_read_data(denali, addr | 2, &id_bytes[i]);
 493        maf_id = id_bytes[0];
 494        device_id = id_bytes[1];
 495
 496        if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
 497                ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
 498                if (FAIL == get_onfi_nand_para(denali))
 499                        return FAIL;
 500        } else if (maf_id == 0xEC) { /* Samsung NAND */
 501                get_samsung_nand_para(denali, device_id);
 502        } else if (maf_id == 0x98) { /* Toshiba NAND */
 503                get_toshiba_nand_para(denali);
 504        } else if (maf_id == 0xAD) { /* Hynix NAND */
 505                get_hynix_nand_para(denali, device_id);
 506        }
 507
 508        dev_info(denali->dev,
 509                        "Dump timing register values:"
 510                        "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
 511                        "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
 512                        "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
 513                        ioread32(denali->flash_reg + ACC_CLKS),
 514                        ioread32(denali->flash_reg + RE_2_WE),
 515                        ioread32(denali->flash_reg + RE_2_RE),
 516                        ioread32(denali->flash_reg + WE_2_RE),
 517                        ioread32(denali->flash_reg + ADDR_2_DATA),
 518                        ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
 519                        ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
 520                        ioread32(denali->flash_reg + CS_SETUP_CNT));
 521
 522        find_valid_banks(denali);
 523
 524        detect_partition_feature(denali);
 525
 526        /* If the user specified to override the default timings
 527         * with a specific ONFI mode, we apply those changes here.
 528         */
 529        if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
 530                nand_onfi_timing_set(denali, onfi_timing_mode);
 531
 532        return status;
 533}
 534
 535static void denali_set_intr_modes(struct denali_nand_info *denali,
 536                                        uint16_t INT_ENABLE)
 537{
 538        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 539                       __FILE__, __LINE__, __func__);
 540
 541        if (INT_ENABLE)
 542                iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
 543        else
 544                iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
 545}
 546
 547/* validation function to verify that the controlling software is making
 548 * a valid request
 549 */
 550static inline bool is_flash_bank_valid(int flash_bank)
 551{
 552        return (flash_bank >= 0 && flash_bank < 4);
 553}
 554
 555static void denali_irq_init(struct denali_nand_info *denali)
 556{
 557        uint32_t int_mask = 0;
 558        int i;
 559
 560        /* Disable global interrupts */
 561        denali_set_intr_modes(denali, false);
 562
 563        int_mask = DENALI_IRQ_ALL;
 564
 565        /* Clear all status bits */
 566        for (i = 0; i < denali->max_banks; ++i)
 567                iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
 568
 569        denali_irq_enable(denali, int_mask);
 570}
 571
 572static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
 573{
 574        denali_set_intr_modes(denali, false);
 575        free_irq(irqnum, denali);
 576}
 577
 578static void denali_irq_enable(struct denali_nand_info *denali,
 579                                                        uint32_t int_mask)
 580{
 581        int i;
 582
 583        for (i = 0; i < denali->max_banks; ++i)
 584                iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
 585}
 586
 587/* This function only returns when an interrupt that this driver cares about
 588 * occurs. This is to reduce the overhead of servicing interrupts
 589 */
 590static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
 591{
 592        return read_interrupt_status(denali) & DENALI_IRQ_ALL;
 593}
 594
 595/* Interrupts are cleared by writing a 1 to the appropriate status bit */
 596static inline void clear_interrupt(struct denali_nand_info *denali,
 597                                                        uint32_t irq_mask)
 598{
 599        uint32_t intr_status_reg = 0;
 600
 601        intr_status_reg = INTR_STATUS(denali->flash_bank);
 602
 603        iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
 604}
 605
 606static void clear_interrupts(struct denali_nand_info *denali)
 607{
 608        uint32_t status = 0x0;
 609        spin_lock_irq(&denali->irq_lock);
 610
 611        status = read_interrupt_status(denali);
 612        clear_interrupt(denali, status);
 613
 614        denali->irq_status = 0x0;
 615        spin_unlock_irq(&denali->irq_lock);
 616}
 617
 618static uint32_t read_interrupt_status(struct denali_nand_info *denali)
 619{
 620        uint32_t intr_status_reg = 0;
 621
 622        intr_status_reg = INTR_STATUS(denali->flash_bank);
 623
 624        return ioread32(denali->flash_reg + intr_status_reg);
 625}
 626
 627/* This is the interrupt service routine. It handles all interrupts
 628 * sent to this device. Note that on CE4100, this is a shared
 629 * interrupt.
 630 */
 631static irqreturn_t denali_isr(int irq, void *dev_id)
 632{
 633        struct denali_nand_info *denali = dev_id;
 634        uint32_t irq_status = 0x0;
 635        irqreturn_t result = IRQ_NONE;
 636
 637        spin_lock(&denali->irq_lock);
 638
 639        /* check to see if a valid NAND chip has
 640         * been selected.
 641         */
 642        if (is_flash_bank_valid(denali->flash_bank)) {
 643                /* check to see if controller generated
 644                 * the interrupt, since this is a shared interrupt */
 645                irq_status = denali_irq_detected(denali);
 646                if (irq_status != 0) {
 647                        /* handle interrupt */
 648                        /* first acknowledge it */
 649                        clear_interrupt(denali, irq_status);
 650                        /* store the status in the device context for someone
 651                           to read */
 652                        denali->irq_status |= irq_status;
 653                        /* notify anyone who cares that it happened */
 654                        complete(&denali->complete);
 655                        /* tell the OS that we've handled this */
 656                        result = IRQ_HANDLED;
 657                }
 658        }
 659        spin_unlock(&denali->irq_lock);
 660        return result;
 661}
 662#define BANK(x) ((x) << 24)
 663
 664static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
 665{
 666        unsigned long comp_res = 0;
 667        uint32_t intr_status = 0;
 668        bool retry = false;
 669        unsigned long timeout = msecs_to_jiffies(1000);
 670
 671        do {
 672                comp_res =
 673                        wait_for_completion_timeout(&denali->complete, timeout);
 674                spin_lock_irq(&denali->irq_lock);
 675                intr_status = denali->irq_status;
 676
 677                if (intr_status & irq_mask) {
 678                        denali->irq_status &= ~irq_mask;
 679                        spin_unlock_irq(&denali->irq_lock);
 680                        /* our interrupt was detected */
 681                        break;
 682                } else {
 683                        /* these are not the interrupts you are looking for -
 684                         * need to wait again */
 685                        spin_unlock_irq(&denali->irq_lock);
 686                        retry = true;
 687                }
 688        } while (comp_res != 0);
 689
 690        if (comp_res == 0) {
 691                /* timeout */
 692                pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
 693                                intr_status, irq_mask);
 694
 695                intr_status = 0;
 696        }
 697        return intr_status;
 698}
 699
 700/* This helper function setups the registers for ECC and whether or not
 701 * the spare area will be transferred. */
 702static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
 703                                bool transfer_spare)
 704{
 705        int ecc_en_flag = 0, transfer_spare_flag = 0;
 706
 707        /* set ECC, transfer spare bits if needed */
 708        ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
 709        transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
 710
 711        /* Enable spare area/ECC per user's request. */
 712        iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
 713        iowrite32(transfer_spare_flag,
 714                        denali->flash_reg + TRANSFER_SPARE_REG);
 715}
 716
 717/* sends a pipeline command operation to the controller. See the Denali NAND
 718 * controller's user guide for more information (section 4.2.3.6).
 719 */
 720static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
 721                                                        bool ecc_en,
 722                                                        bool transfer_spare,
 723                                                        int access_type,
 724                                                        int op)
 725{
 726        int status = PASS;
 727        uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
 728                 irq_mask = 0;
 729
 730        if (op == DENALI_READ)
 731                irq_mask = INTR_STATUS__LOAD_COMP;
 732        else if (op == DENALI_WRITE)
 733                irq_mask = 0;
 734        else
 735                BUG();
 736
 737        setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
 738
 739        /* clear interrupts */
 740        clear_interrupts(denali);
 741
 742        addr = BANK(denali->flash_bank) | denali->page;
 743
 744        if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
 745                cmd = MODE_01 | addr;
 746                iowrite32(cmd, denali->flash_mem);
 747        } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
 748                /* read spare area */
 749                cmd = MODE_10 | addr;
 750                index_addr(denali, (uint32_t)cmd, access_type);
 751
 752                cmd = MODE_01 | addr;
 753                iowrite32(cmd, denali->flash_mem);
 754        } else if (op == DENALI_READ) {
 755                /* setup page read request for access type */
 756                cmd = MODE_10 | addr;
 757                index_addr(denali, (uint32_t)cmd, access_type);
 758
 759                /* page 33 of the NAND controller spec indicates we should not
 760                   use the pipeline commands in Spare area only mode. So we
 761                   don't.
 762                 */
 763                if (access_type == SPARE_ACCESS) {
 764                        cmd = MODE_01 | addr;
 765                        iowrite32(cmd, denali->flash_mem);
 766                } else {
 767                        index_addr(denali, (uint32_t)cmd,
 768                                        0x2000 | op | page_count);
 769
 770                        /* wait for command to be accepted
 771                         * can always use status0 bit as the
 772                         * mask is identical for each
 773                         * bank. */
 774                        irq_status = wait_for_irq(denali, irq_mask);
 775
 776                        if (irq_status == 0) {
 777                                dev_err(denali->dev,
 778                                                "cmd, page, addr on timeout "
 779                                                "(0x%x, 0x%x, 0x%x)\n",
 780                                                cmd, denali->page, addr);
 781                                status = FAIL;
 782                        } else {
 783                                cmd = MODE_01 | addr;
 784                                iowrite32(cmd, denali->flash_mem);
 785                        }
 786                }
 787        }
 788        return status;
 789}
 790
 791/* helper function that simply writes a buffer to the flash */
 792static int write_data_to_flash_mem(struct denali_nand_info *denali,
 793                                                        const uint8_t *buf,
 794                                                        int len)
 795{
 796        uint32_t i = 0, *buf32;
 797
 798        /* verify that the len is a multiple of 4. see comment in
 799         * read_data_from_flash_mem() */
 800        BUG_ON((len % 4) != 0);
 801
 802        /* write the data to the flash memory */
 803        buf32 = (uint32_t *)buf;
 804        for (i = 0; i < len / 4; i++)
 805                iowrite32(*buf32++, denali->flash_mem + 0x10);
 806        return i*4; /* intent is to return the number of bytes read */
 807}
 808
 809/* helper function that simply reads a buffer from the flash */
 810static int read_data_from_flash_mem(struct denali_nand_info *denali,
 811                                                                uint8_t *buf,
 812                                                                int len)
 813{
 814        uint32_t i = 0, *buf32;
 815
 816        /* we assume that len will be a multiple of 4, if not
 817         * it would be nice to know about it ASAP rather than
 818         * have random failures...
 819         * This assumption is based on the fact that this
 820         * function is designed to be used to read flash pages,
 821         * which are typically multiples of 4...
 822         */
 823
 824        BUG_ON((len % 4) != 0);
 825
 826        /* transfer the data from the flash */
 827        buf32 = (uint32_t *)buf;
 828        for (i = 0; i < len / 4; i++)
 829                *buf32++ = ioread32(denali->flash_mem + 0x10);
 830        return i*4; /* intent is to return the number of bytes read */
 831}
 832
 833/* writes OOB data to the device */
 834static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
 835{
 836        struct denali_nand_info *denali = mtd_to_denali(mtd);
 837        uint32_t irq_status = 0;
 838        uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
 839                                                INTR_STATUS__PROGRAM_FAIL;
 840        int status = 0;
 841
 842        denali->page = page;
 843
 844        if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
 845                                                        DENALI_WRITE) == PASS) {
 846                write_data_to_flash_mem(denali, buf, mtd->oobsize);
 847
 848                /* wait for operation to complete */
 849                irq_status = wait_for_irq(denali, irq_mask);
 850
 851                if (irq_status == 0) {
 852                        dev_err(denali->dev, "OOB write failed\n");
 853                        status = -EIO;
 854                }
 855        } else {
 856                dev_err(denali->dev, "unable to send pipeline command\n");
 857                status = -EIO;
 858        }
 859        return status;
 860}
 861
 862/* reads OOB data from the device */
 863static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
 864{
 865        struct denali_nand_info *denali = mtd_to_denali(mtd);
 866        uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
 867                         irq_status = 0, addr = 0x0, cmd = 0x0;
 868
 869        denali->page = page;
 870
 871        if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
 872                                                        DENALI_READ) == PASS) {
 873                read_data_from_flash_mem(denali, buf, mtd->oobsize);
 874
 875                /* wait for command to be accepted
 876                 * can always use status0 bit as the mask is identical for each
 877                 * bank. */
 878                irq_status = wait_for_irq(denali, irq_mask);
 879
 880                if (irq_status == 0)
 881                        dev_err(denali->dev, "page on OOB timeout %d\n",
 882                                        denali->page);
 883
 884                /* We set the device back to MAIN_ACCESS here as I observed
 885                 * instability with the controller if you do a block erase
 886                 * and the last transaction was a SPARE_ACCESS. Block erase
 887                 * is reliable (according to the MTD test infrastructure)
 888                 * if you are in MAIN_ACCESS.
 889                 */
 890                addr = BANK(denali->flash_bank) | denali->page;
 891                cmd = MODE_10 | addr;
 892                index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
 893        }
 894}
 895
 896/* this function examines buffers to see if they contain data that
 897 * indicate that the buffer is part of an erased region of flash.
 898 */
 899static bool is_erased(uint8_t *buf, int len)
 900{
 901        int i = 0;
 902        for (i = 0; i < len; i++)
 903                if (buf[i] != 0xFF)
 904                        return false;
 905        return true;
 906}
 907#define ECC_SECTOR_SIZE 512
 908
 909#define ECC_SECTOR(x)   (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
 910#define ECC_BYTE(x)     (((x) & ECC_ERROR_ADDRESS__OFFSET))
 911#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
 912#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
 913#define ECC_ERR_DEVICE(x)       (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
 914#define ECC_LAST_ERR(x)         ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
 915
 916static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
 917                       uint32_t irq_status, unsigned int *max_bitflips)
 918{
 919        bool check_erased_page = false;
 920        unsigned int bitflips = 0;
 921
 922        if (irq_status & INTR_STATUS__ECC_ERR) {
 923                /* read the ECC errors. we'll ignore them for now */
 924                uint32_t err_address = 0, err_correction_info = 0;
 925                uint32_t err_byte = 0, err_sector = 0, err_device = 0;
 926                uint32_t err_correction_value = 0;
 927                denali_set_intr_modes(denali, false);
 928
 929                do {
 930                        err_address = ioread32(denali->flash_reg +
 931                                                ECC_ERROR_ADDRESS);
 932                        err_sector = ECC_SECTOR(err_address);
 933                        err_byte = ECC_BYTE(err_address);
 934
 935                        err_correction_info = ioread32(denali->flash_reg +
 936                                                ERR_CORRECTION_INFO);
 937                        err_correction_value =
 938                                ECC_CORRECTION_VALUE(err_correction_info);
 939                        err_device = ECC_ERR_DEVICE(err_correction_info);
 940
 941                        if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
 942                                /* If err_byte is larger than ECC_SECTOR_SIZE,
 943                                 * means error happened in OOB, so we ignore
 944                                 * it. It's no need for us to correct it
 945                                 * err_device is represented the NAND error
 946                                 * bits are happened in if there are more
 947                                 * than one NAND connected.
 948                                 * */
 949                                if (err_byte < ECC_SECTOR_SIZE) {
 950                                        int offset;
 951                                        offset = (err_sector *
 952                                                        ECC_SECTOR_SIZE +
 953                                                        err_byte) *
 954                                                        denali->devnum +
 955                                                        err_device;
 956                                        /* correct the ECC error */
 957                                        buf[offset] ^= err_correction_value;
 958                                        denali->mtd.ecc_stats.corrected++;
 959                                        bitflips++;
 960                                }
 961                        } else {
 962                                /* if the error is not correctable, need to
 963                                 * look at the page to see if it is an erased
 964                                 * page. if so, then it's not a real ECC error
 965                                 * */
 966                                check_erased_page = true;
 967                        }
 968                } while (!ECC_LAST_ERR(err_correction_info));
 969                /* Once handle all ecc errors, controller will triger
 970                 * a ECC_TRANSACTION_DONE interrupt, so here just wait
 971                 * for a while for this interrupt
 972                 * */
 973                while (!(read_interrupt_status(denali) &
 974                                INTR_STATUS__ECC_TRANSACTION_DONE))
 975                        cpu_relax();
 976                clear_interrupts(denali);
 977                denali_set_intr_modes(denali, true);
 978        }
 979        *max_bitflips = bitflips;
 980        return check_erased_page;
 981}
 982
 983/* programs the controller to either enable/disable DMA transfers */
 984static void denali_enable_dma(struct denali_nand_info *denali, bool en)
 985{
 986        uint32_t reg_val = 0x0;
 987
 988        if (en)
 989                reg_val = DMA_ENABLE__FLAG;
 990
 991        iowrite32(reg_val, denali->flash_reg + DMA_ENABLE);
 992        ioread32(denali->flash_reg + DMA_ENABLE);
 993}
 994
 995/* setups the HW to perform the data DMA */
 996static void denali_setup_dma(struct denali_nand_info *denali, int op)
 997{
 998        uint32_t mode = 0x0;
 999        const int page_count = 1;
1000        dma_addr_t addr = denali->buf.dma_buf;
1001
1002        mode = MODE_10 | BANK(denali->flash_bank);
1003
1004        /* DMA is a four step process */
1005
1006        /* 1. setup transfer type and # of pages */
1007        index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
1008
1009        /* 2. set memory high address bits 23:8 */
1010        index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
1011
1012        /* 3. set memory low address bits 23:8 */
1013        index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
1014
1015        /* 4.  interrupt when complete, burst len = 64 bytes*/
1016        index_addr(denali, mode | 0x14000, 0x2400);
1017}
1018
1019/* writes a page. user specifies type, and this function handles the
1020 * configuration details. */
1021static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
1022                        const uint8_t *buf, bool raw_xfer)
1023{
1024        struct denali_nand_info *denali = mtd_to_denali(mtd);
1025
1026        dma_addr_t addr = denali->buf.dma_buf;
1027        size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1028
1029        uint32_t irq_status = 0;
1030        uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
1031                                                INTR_STATUS__PROGRAM_FAIL;
1032
1033        /* if it is a raw xfer, we want to disable ecc, and send
1034         * the spare area.
1035         * !raw_xfer - enable ecc
1036         * raw_xfer - transfer spare
1037         */
1038        setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
1039
1040        /* copy buffer into DMA buffer */
1041        memcpy(denali->buf.buf, buf, mtd->writesize);
1042
1043        if (raw_xfer) {
1044                /* transfer the data to the spare area */
1045                memcpy(denali->buf.buf + mtd->writesize,
1046                        chip->oob_poi,
1047                        mtd->oobsize);
1048        }
1049
1050        dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
1051
1052        clear_interrupts(denali);
1053        denali_enable_dma(denali, true);
1054
1055        denali_setup_dma(denali, DENALI_WRITE);
1056
1057        /* wait for operation to complete */
1058        irq_status = wait_for_irq(denali, irq_mask);
1059
1060        if (irq_status == 0) {
1061                dev_err(denali->dev,
1062                                "timeout on write_page (type = %d)\n",
1063                                raw_xfer);
1064                denali->status =
1065                        (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
1066                        NAND_STATUS_FAIL : PASS;
1067        }
1068
1069        denali_enable_dma(denali, false);
1070        dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1071
1072        return 0;
1073}
1074
1075/* NAND core entry points */
1076
1077/* this is the callback that the NAND core calls to write a page. Since
1078 * writing a page with ECC or without is similar, all the work is done
1079 * by write_page above.
1080 * */
1081static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1082                                const uint8_t *buf, int oob_required)
1083{
1084        /* for regular page writes, we let HW handle all the ECC
1085         * data written to the device. */
1086        return write_page(mtd, chip, buf, false);
1087}
1088
1089/* This is the callback that the NAND core calls to write a page without ECC.
1090 * raw access is similar to ECC page writes, so all the work is done in the
1091 * write_page() function above.
1092 */
1093static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1094                                        const uint8_t *buf, int oob_required)
1095{
1096        /* for raw page writes, we want to disable ECC and simply write
1097           whatever data is in the buffer. */
1098        return write_page(mtd, chip, buf, true);
1099}
1100
1101static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1102                            int page)
1103{
1104        return write_oob_data(mtd, chip->oob_poi, page);
1105}
1106
1107static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1108                           int page)
1109{
1110        read_oob_data(mtd, chip->oob_poi, page);
1111
1112        return 0;
1113}
1114
1115static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1116                            uint8_t *buf, int oob_required, int page)
1117{
1118        unsigned int max_bitflips;
1119        struct denali_nand_info *denali = mtd_to_denali(mtd);
1120
1121        dma_addr_t addr = denali->buf.dma_buf;
1122        size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1123
1124        uint32_t irq_status = 0;
1125        uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
1126                            INTR_STATUS__ECC_ERR;
1127        bool check_erased_page = false;
1128
1129        if (page != denali->page) {
1130                dev_err(denali->dev, "IN %s: page %d is not"
1131                                " equal to denali->page %d, investigate!!",
1132                                __func__, page, denali->page);
1133                BUG();
1134        }
1135
1136        setup_ecc_for_xfer(denali, true, false);
1137
1138        denali_enable_dma(denali, true);
1139        dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1140
1141        clear_interrupts(denali);
1142        denali_setup_dma(denali, DENALI_READ);
1143
1144        /* wait for operation to complete */
1145        irq_status = wait_for_irq(denali, irq_mask);
1146
1147        dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1148
1149        memcpy(buf, denali->buf.buf, mtd->writesize);
1150
1151        check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
1152        denali_enable_dma(denali, false);
1153
1154        if (check_erased_page) {
1155                read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1156
1157                /* check ECC failures that may have occurred on erased pages */
1158                if (check_erased_page) {
1159                        if (!is_erased(buf, denali->mtd.writesize))
1160                                denali->mtd.ecc_stats.failed++;
1161                        if (!is_erased(buf, denali->mtd.oobsize))
1162                                denali->mtd.ecc_stats.failed++;
1163                }
1164        }
1165        return max_bitflips;
1166}
1167
1168static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1169                                uint8_t *buf, int oob_required, int page)
1170{
1171        struct denali_nand_info *denali = mtd_to_denali(mtd);
1172
1173        dma_addr_t addr = denali->buf.dma_buf;
1174        size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1175
1176        uint32_t irq_status = 0;
1177        uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1178
1179        if (page != denali->page) {
1180                dev_err(denali->dev, "IN %s: page %d is not"
1181                                " equal to denali->page %d, investigate!!",
1182                                __func__, page, denali->page);
1183                BUG();
1184        }
1185
1186        setup_ecc_for_xfer(denali, false, true);
1187        denali_enable_dma(denali, true);
1188
1189        dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1190
1191        clear_interrupts(denali);
1192        denali_setup_dma(denali, DENALI_READ);
1193
1194        /* wait for operation to complete */
1195        irq_status = wait_for_irq(denali, irq_mask);
1196
1197        dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1198
1199        denali_enable_dma(denali, false);
1200
1201        memcpy(buf, denali->buf.buf, mtd->writesize);
1202        memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
1203
1204        return 0;
1205}
1206
1207static uint8_t denali_read_byte(struct mtd_info *mtd)
1208{
1209        struct denali_nand_info *denali = mtd_to_denali(mtd);
1210        uint8_t result = 0xff;
1211
1212        if (denali->buf.head < denali->buf.tail)
1213                result = denali->buf.buf[denali->buf.head++];
1214
1215        return result;
1216}
1217
1218static void denali_select_chip(struct mtd_info *mtd, int chip)
1219{
1220        struct denali_nand_info *denali = mtd_to_denali(mtd);
1221
1222        spin_lock_irq(&denali->irq_lock);
1223        denali->flash_bank = chip;
1224        spin_unlock_irq(&denali->irq_lock);
1225}
1226
1227static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1228{
1229        struct denali_nand_info *denali = mtd_to_denali(mtd);
1230        int status = denali->status;
1231        denali->status = 0;
1232
1233        return status;
1234}
1235
1236static void denali_erase(struct mtd_info *mtd, int page)
1237{
1238        struct denali_nand_info *denali = mtd_to_denali(mtd);
1239
1240        uint32_t cmd = 0x0, irq_status = 0;
1241
1242        /* clear interrupts */
1243        clear_interrupts(denali);
1244
1245        /* setup page read request for access type */
1246        cmd = MODE_10 | BANK(denali->flash_bank) | page;
1247        index_addr(denali, (uint32_t)cmd, 0x1);
1248
1249        /* wait for erase to complete or failure to occur */
1250        irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1251                                        INTR_STATUS__ERASE_FAIL);
1252
1253        denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
1254                                                NAND_STATUS_FAIL : PASS;
1255}
1256
1257static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1258                           int page)
1259{
1260        struct denali_nand_info *denali = mtd_to_denali(mtd);
1261        uint32_t addr, id;
1262        int i;
1263
1264        switch (cmd) {
1265        case NAND_CMD_PAGEPROG:
1266                break;
1267        case NAND_CMD_STATUS:
1268                read_status(denali);
1269                break;
1270        case NAND_CMD_READID:
1271        case NAND_CMD_PARAM:
1272                reset_buf(denali);
1273                /*sometimes ManufactureId read from register is not right
1274                 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
1275                 * So here we send READID cmd to NAND insteand
1276                 * */
1277                addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
1278                index_addr(denali, (uint32_t)addr | 0, 0x90);
1279                index_addr(denali, (uint32_t)addr | 1, 0);
1280                for (i = 0; i < 5; i++) {
1281                        index_addr_read_data(denali,
1282                                                (uint32_t)addr | 2,
1283                                                &id);
1284                        write_byte_to_buf(denali, id);
1285                }
1286                break;
1287        case NAND_CMD_READ0:
1288        case NAND_CMD_SEQIN:
1289                denali->page = page;
1290                break;
1291        case NAND_CMD_RESET:
1292                reset_bank(denali);
1293                break;
1294        case NAND_CMD_READOOB:
1295                /* TODO: Read OOB data */
1296                break;
1297        default:
1298                pr_err(": unsupported command received 0x%x\n", cmd);
1299                break;
1300        }
1301}
1302
1303/* stubs for ECC functions not used by the NAND core */
1304static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1305                                uint8_t *ecc_code)
1306{
1307        struct denali_nand_info *denali = mtd_to_denali(mtd);
1308        dev_err(denali->dev,
1309                        "denali_ecc_calculate called unexpectedly\n");
1310        BUG();
1311        return -EIO;
1312}
1313
1314static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1315                                uint8_t *read_ecc, uint8_t *calc_ecc)
1316{
1317        struct denali_nand_info *denali = mtd_to_denali(mtd);
1318        dev_err(denali->dev,
1319                        "denali_ecc_correct called unexpectedly\n");
1320        BUG();
1321        return -EIO;
1322}
1323
1324static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1325{
1326        struct denali_nand_info *denali = mtd_to_denali(mtd);
1327        dev_err(denali->dev,
1328                        "denali_ecc_hwctl called unexpectedly\n");
1329        BUG();
1330}
1331/* end NAND core entry points */
1332
1333/* Initialization code to bring the device up to a known good state */
1334static void denali_hw_init(struct denali_nand_info *denali)
1335{
1336        /* tell driver how many bit controller will skip before
1337         * writing ECC code in OOB, this register may be already
1338         * set by firmware. So we read this value out.
1339         * if this value is 0, just let it be.
1340         * */
1341        denali->bbtskipbytes = ioread32(denali->flash_reg +
1342                                                SPARE_AREA_SKIP_BYTES);
1343        detect_max_banks(denali);
1344        denali_nand_reset(denali);
1345        iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1346        iowrite32(CHIP_EN_DONT_CARE__FLAG,
1347                        denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1348
1349        iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1350
1351        /* Should set value for these registers when init */
1352        iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1353        iowrite32(1, denali->flash_reg + ECC_ENABLE);
1354        denali_nand_timing_set(denali);
1355        denali_irq_init(denali);
1356}
1357
1358/* Althogh controller spec said SLC ECC is forceb to be 4bit,
1359 * but denali controller in MRST only support 15bit and 8bit ECC
1360 * correction
1361 * */
1362#define ECC_8BITS       14
1363static struct nand_ecclayout nand_8bit_oob = {
1364        .eccbytes = 14,
1365};
1366
1367#define ECC_15BITS      26
1368static struct nand_ecclayout nand_15bit_oob = {
1369        .eccbytes = 26,
1370};
1371
1372static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1373static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1374
1375static struct nand_bbt_descr bbt_main_descr = {
1376        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1377                | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1378        .offs = 8,
1379        .len = 4,
1380        .veroffs = 12,
1381        .maxblocks = 4,
1382        .pattern = bbt_pattern,
1383};
1384
1385static struct nand_bbt_descr bbt_mirror_descr = {
1386        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1387                | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1388        .offs = 8,
1389        .len = 4,
1390        .veroffs = 12,
1391        .maxblocks = 4,
1392        .pattern = mirror_pattern,
1393};
1394
1395/* initialize driver data structures */
1396static void denali_drv_init(struct denali_nand_info *denali)
1397{
1398        denali->idx = 0;
1399
1400        /* setup interrupt handler */
1401        /* the completion object will be used to notify
1402         * the callee that the interrupt is done */
1403        init_completion(&denali->complete);
1404
1405        /* the spinlock will be used to synchronize the ISR
1406         * with any element that might be access shared
1407         * data (interrupt status) */
1408        spin_lock_init(&denali->irq_lock);
1409
1410        /* indicate that MTD has not selected a valid bank yet */
1411        denali->flash_bank = CHIP_SELECT_INVALID;
1412
1413        /* initialize our irq_status variable to indicate no interrupts */
1414        denali->irq_status = 0;
1415}
1416
1417int denali_init(struct denali_nand_info *denali)
1418{
1419        int ret;
1420
1421        if (denali->platform == INTEL_CE4100) {
1422                /* Due to a silicon limitation, we can only support
1423                 * ONFI timing mode 1 and below.
1424                 */
1425                if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
1426                        pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
1427                        return -EINVAL;
1428                }
1429        }
1430
1431        /* allocate a temporary buffer for nand_scan_ident() */
1432        denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
1433                                        GFP_DMA | GFP_KERNEL);
1434        if (!denali->buf.buf)
1435                return -ENOMEM;
1436
1437        denali->mtd.dev.parent = denali->dev;
1438        denali_hw_init(denali);
1439        denali_drv_init(denali);
1440
1441        /* denali_isr register is done after all the hardware
1442         * initilization is finished*/
1443        if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
1444                        DENALI_NAND_NAME, denali)) {
1445                pr_err("Spectra: Unable to allocate IRQ\n");
1446                return -ENODEV;
1447        }
1448
1449        /* now that our ISR is registered, we can enable interrupts */
1450        denali_set_intr_modes(denali, true);
1451        denali->mtd.name = "denali-nand";
1452        denali->mtd.owner = THIS_MODULE;
1453        denali->mtd.priv = &denali->nand;
1454
1455        /* register the driver with the NAND core subsystem */
1456        denali->nand.select_chip = denali_select_chip;
1457        denali->nand.cmdfunc = denali_cmdfunc;
1458        denali->nand.read_byte = denali_read_byte;
1459        denali->nand.waitfunc = denali_waitfunc;
1460
1461        /* scan for NAND devices attached to the controller
1462         * this is the first stage in a two step process to register
1463         * with the nand subsystem */
1464        if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
1465                ret = -ENXIO;
1466                goto failed_req_irq;
1467        }
1468
1469        /* allocate the right size buffer now */
1470        devm_kfree(denali->dev, denali->buf.buf);
1471        denali->buf.buf = devm_kzalloc(denali->dev,
1472                             denali->mtd.writesize + denali->mtd.oobsize,
1473                             GFP_KERNEL);
1474        if (!denali->buf.buf) {
1475                ret = -ENOMEM;
1476                goto failed_req_irq;
1477        }
1478
1479        /* Is 32-bit DMA supported? */
1480        ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
1481        if (ret) {
1482                pr_err("Spectra: no usable DMA configuration\n");
1483                goto failed_req_irq;
1484        }
1485
1486        denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
1487                             denali->mtd.writesize + denali->mtd.oobsize,
1488                             DMA_BIDIRECTIONAL);
1489        if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
1490                dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
1491                ret = -EIO;
1492                goto failed_req_irq;
1493        }
1494
1495        /* support for multi nand
1496         * MTD known nothing about multi nand,
1497         * so we should tell it the real pagesize
1498         * and anything necessery
1499         */
1500        denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1501        denali->nand.chipsize <<= (denali->devnum - 1);
1502        denali->nand.page_shift += (denali->devnum - 1);
1503        denali->nand.pagemask = (denali->nand.chipsize >>
1504                                                denali->nand.page_shift) - 1;
1505        denali->nand.bbt_erase_shift += (denali->devnum - 1);
1506        denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
1507        denali->nand.chip_shift += (denali->devnum - 1);
1508        denali->mtd.writesize <<= (denali->devnum - 1);
1509        denali->mtd.oobsize <<= (denali->devnum - 1);
1510        denali->mtd.erasesize <<= (denali->devnum - 1);
1511        denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
1512        denali->bbtskipbytes *= denali->devnum;
1513
1514        /* second stage of the NAND scan
1515         * this stage requires information regarding ECC and
1516         * bad block management. */
1517
1518        /* Bad block management */
1519        denali->nand.bbt_td = &bbt_main_descr;
1520        denali->nand.bbt_md = &bbt_mirror_descr;
1521
1522        /* skip the scan for now until we have OOB read and write support */
1523        denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
1524        denali->nand.options |= NAND_SKIP_BBTSCAN;
1525        denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
1526
1527        /* Denali Controller only support 15bit and 8bit ECC in MRST,
1528         * so just let controller do 15bit ECC for MLC and 8bit ECC for
1529         * SLC if possible.
1530         * */
1531        if (!nand_is_slc(&denali->nand) &&
1532                        (denali->mtd.oobsize > (denali->bbtskipbytes +
1533                        ECC_15BITS * (denali->mtd.writesize /
1534                        ECC_SECTOR_SIZE)))) {
1535                /* if MLC OOB size is large enough, use 15bit ECC*/
1536                denali->nand.ecc.strength = 15;
1537                denali->nand.ecc.layout = &nand_15bit_oob;
1538                denali->nand.ecc.bytes = ECC_15BITS;
1539                iowrite32(15, denali->flash_reg + ECC_CORRECTION);
1540        } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
1541                        ECC_8BITS * (denali->mtd.writesize /
1542                        ECC_SECTOR_SIZE))) {
1543                pr_err("Your NAND chip OOB is not large enough to \
1544                                contain 8bit ECC correction codes");
1545                goto failed_req_irq;
1546        } else {
1547                denali->nand.ecc.strength = 8;
1548                denali->nand.ecc.layout = &nand_8bit_oob;
1549                denali->nand.ecc.bytes = ECC_8BITS;
1550                iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1551        }
1552
1553        denali->nand.ecc.bytes *= denali->devnum;
1554        denali->nand.ecc.strength *= denali->devnum;
1555        denali->nand.ecc.layout->eccbytes *=
1556                denali->mtd.writesize / ECC_SECTOR_SIZE;
1557        denali->nand.ecc.layout->oobfree[0].offset =
1558                denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
1559        denali->nand.ecc.layout->oobfree[0].length =
1560                denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
1561                denali->bbtskipbytes;
1562
1563        /* Let driver know the total blocks number and
1564         * how many blocks contained by each nand chip.
1565         * blksperchip will help driver to know how many
1566         * blocks is taken by FW.
1567         * */
1568        denali->totalblks = denali->mtd.size >>
1569                                denali->nand.phys_erase_shift;
1570        denali->blksperchip = denali->totalblks / denali->nand.numchips;
1571
1572        /* These functions are required by the NAND core framework, otherwise,
1573         * the NAND core will assert. However, we don't need them, so we'll stub
1574         * them out. */
1575        denali->nand.ecc.calculate = denali_ecc_calculate;
1576        denali->nand.ecc.correct = denali_ecc_correct;
1577        denali->nand.ecc.hwctl = denali_ecc_hwctl;
1578
1579        /* override the default read operations */
1580        denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
1581        denali->nand.ecc.read_page = denali_read_page;
1582        denali->nand.ecc.read_page_raw = denali_read_page_raw;
1583        denali->nand.ecc.write_page = denali_write_page;
1584        denali->nand.ecc.write_page_raw = denali_write_page_raw;
1585        denali->nand.ecc.read_oob = denali_read_oob;
1586        denali->nand.ecc.write_oob = denali_write_oob;
1587        denali->nand.erase_cmd = denali_erase;
1588
1589        if (nand_scan_tail(&denali->mtd)) {
1590                ret = -ENXIO;
1591                goto failed_req_irq;
1592        }
1593
1594        ret = mtd_device_register(&denali->mtd, NULL, 0);
1595        if (ret) {
1596                dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
1597                                ret);
1598                goto failed_req_irq;
1599        }
1600        return 0;
1601
1602failed_req_irq:
1603        denali_irq_cleanup(denali->irq, denali);
1604
1605        return ret;
1606}
1607EXPORT_SYMBOL(denali_init);
1608
1609/* driver exit point */
1610void denali_remove(struct denali_nand_info *denali)
1611{
1612        denali_irq_cleanup(denali->irq, denali);
1613        dma_unmap_single(denali->dev, denali->buf.dma_buf,
1614                        denali->mtd.writesize + denali->mtd.oobsize,
1615                        DMA_BIDIRECTIONAL);
1616}
1617EXPORT_SYMBOL(denali_remove);
1618