linux/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
<<
>>
Prefs
   1/*
   2 * Freescale GPMI NAND Flash Driver
   3 *
   4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
   5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License along
  18 * with this program; if not, write to the Free Software Foundation, Inc.,
  19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20 */
  21
  22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23
  24#include <linux/clk.h>
  25#include <linux/slab.h>
  26#include <linux/interrupt.h>
  27#include <linux/module.h>
  28#include <linux/mtd/partitions.h>
  29#include <linux/pinctrl/consumer.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/of_mtd.h>
  33#include "gpmi-nand.h"
  34
  35/* Resource names for the GPMI NAND driver. */
  36#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
  37#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
  38#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
  39
  40/* add our owner bbt descriptor */
  41static uint8_t scan_ff_pattern[] = { 0xff };
  42static struct nand_bbt_descr gpmi_bbt_descr = {
  43        .options        = 0,
  44        .offs           = 0,
  45        .len            = 1,
  46        .pattern        = scan_ff_pattern
  47};
  48
  49/*  We will use all the (page + OOB). */
  50static struct nand_ecclayout gpmi_hw_ecclayout = {
  51        .eccbytes = 0,
  52        .eccpos = { 0, },
  53        .oobfree = { {.offset = 0, .length = 0} }
  54};
  55
  56static irqreturn_t bch_irq(int irq, void *cookie)
  57{
  58        struct gpmi_nand_data *this = cookie;
  59
  60        gpmi_clear_bch(this);
  61        complete(&this->bch_done);
  62        return IRQ_HANDLED;
  63}
  64
  65/*
  66 *  Calculate the ECC strength by hand:
  67 *      E : The ECC strength.
  68 *      G : the length of Galois Field.
  69 *      N : The chunk count of per page.
  70 *      O : the oobsize of the NAND chip.
  71 *      M : the metasize of per page.
  72 *
  73 *      The formula is :
  74 *              E * G * N
  75 *            ------------ <= (O - M)
  76 *                  8
  77 *
  78 *      So, we get E by:
  79 *                    (O - M) * 8
  80 *              E <= -------------
  81 *                       G * N
  82 */
  83static inline int get_ecc_strength(struct gpmi_nand_data *this)
  84{
  85        struct bch_geometry *geo = &this->bch_geometry;
  86        struct mtd_info *mtd = &this->mtd;
  87        int ecc_strength;
  88
  89        ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
  90                        / (geo->gf_len * geo->ecc_chunk_count);
  91
  92        /* We need the minor even number. */
  93        return round_down(ecc_strength, 2);
  94}
  95
  96static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
  97{
  98        struct bch_geometry *geo = &this->bch_geometry;
  99
 100        /* Do the sanity check. */
 101        if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
 102                /* The mx23/mx28 only support the GF13. */
 103                if (geo->gf_len == 14)
 104                        return false;
 105
 106                if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
 107                        return false;
 108        } else if (GPMI_IS_MX6Q(this)) {
 109                if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
 110                        return false;
 111        }
 112        return true;
 113}
 114
 115int common_nfc_set_geometry(struct gpmi_nand_data *this)
 116{
 117        struct bch_geometry *geo = &this->bch_geometry;
 118        struct mtd_info *mtd = &this->mtd;
 119        unsigned int metadata_size;
 120        unsigned int status_size;
 121        unsigned int block_mark_bit_offset;
 122
 123        /*
 124         * The size of the metadata can be changed, though we set it to 10
 125         * bytes now. But it can't be too large, because we have to save
 126         * enough space for BCH.
 127         */
 128        geo->metadata_size = 10;
 129
 130        /* The default for the length of Galois Field. */
 131        geo->gf_len = 13;
 132
 133        /* The default for chunk size. */
 134        geo->ecc_chunk_size = 512;
 135        while (geo->ecc_chunk_size < mtd->oobsize) {
 136                geo->ecc_chunk_size *= 2; /* keep C >= O */
 137                geo->gf_len = 14;
 138        }
 139
 140        geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
 141
 142        /* We use the same ECC strength for all chunks. */
 143        geo->ecc_strength = get_ecc_strength(this);
 144        if (!gpmi_check_ecc(this)) {
 145                dev_err(this->dev,
 146                        "We can not support this nand chip."
 147                        " Its required ecc strength(%d) is beyond our"
 148                        " capability(%d).\n", geo->ecc_strength,
 149                        (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
 150                                        : MXS_ECC_STRENGTH_MAX));
 151                return -EINVAL;
 152        }
 153
 154        geo->page_size = mtd->writesize + mtd->oobsize;
 155        geo->payload_size = mtd->writesize;
 156
 157        /*
 158         * The auxiliary buffer contains the metadata and the ECC status. The
 159         * metadata is padded to the nearest 32-bit boundary. The ECC status
 160         * contains one byte for every ECC chunk, and is also padded to the
 161         * nearest 32-bit boundary.
 162         */
 163        metadata_size = ALIGN(geo->metadata_size, 4);
 164        status_size   = ALIGN(geo->ecc_chunk_count, 4);
 165
 166        geo->auxiliary_size = metadata_size + status_size;
 167        geo->auxiliary_status_offset = metadata_size;
 168
 169        if (!this->swap_block_mark)
 170                return 0;
 171
 172        /*
 173         * We need to compute the byte and bit offsets of
 174         * the physical block mark within the ECC-based view of the page.
 175         *
 176         * NAND chip with 2K page shows below:
 177         *                                             (Block Mark)
 178         *                                                   |      |
 179         *                                                   |  D   |
 180         *                                                   |<---->|
 181         *                                                   V      V
 182         *    +---+----------+-+----------+-+----------+-+----------+-+
 183         *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
 184         *    +---+----------+-+----------+-+----------+-+----------+-+
 185         *
 186         * The position of block mark moves forward in the ECC-based view
 187         * of page, and the delta is:
 188         *
 189         *                   E * G * (N - 1)
 190         *             D = (---------------- + M)
 191         *                          8
 192         *
 193         * With the formula to compute the ECC strength, and the condition
 194         *       : C >= O         (C is the ecc chunk size)
 195         *
 196         * It's easy to deduce to the following result:
 197         *
 198         *         E * G       (O - M)      C - M         C - M
 199         *      ----------- <= ------- <=  --------  <  ---------
 200         *           8            N           N          (N - 1)
 201         *
 202         *  So, we get:
 203         *
 204         *                   E * G * (N - 1)
 205         *             D = (---------------- + M) < C
 206         *                          8
 207         *
 208         *  The above inequality means the position of block mark
 209         *  within the ECC-based view of the page is still in the data chunk,
 210         *  and it's NOT in the ECC bits of the chunk.
 211         *
 212         *  Use the following to compute the bit position of the
 213         *  physical block mark within the ECC-based view of the page:
 214         *          (page_size - D) * 8
 215         *
 216         *  --Huang Shijie
 217         */
 218        block_mark_bit_offset = mtd->writesize * 8 -
 219                (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 220                                + geo->metadata_size * 8);
 221
 222        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 223        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 224        return 0;
 225}
 226
 227struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 228{
 229        int chipnr = this->current_chip;
 230
 231        return this->dma_chans[chipnr];
 232}
 233
 234/* Can we use the upper's buffer directly for DMA? */
 235void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
 236{
 237        struct scatterlist *sgl = &this->data_sgl;
 238        int ret;
 239
 240        this->direct_dma_map_ok = true;
 241
 242        /* first try to map the upper buffer directly */
 243        sg_init_one(sgl, this->upper_buf, this->upper_len);
 244        ret = dma_map_sg(this->dev, sgl, 1, dr);
 245        if (ret == 0) {
 246                /* We have to use our own DMA buffer. */
 247                sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
 248
 249                if (dr == DMA_TO_DEVICE)
 250                        memcpy(this->data_buffer_dma, this->upper_buf,
 251                                this->upper_len);
 252
 253                ret = dma_map_sg(this->dev, sgl, 1, dr);
 254                if (ret == 0)
 255                        pr_err("DMA mapping failed.\n");
 256
 257                this->direct_dma_map_ok = false;
 258        }
 259}
 260
 261/* This will be called after the DMA operation is finished. */
 262static void dma_irq_callback(void *param)
 263{
 264        struct gpmi_nand_data *this = param;
 265        struct completion *dma_c = &this->dma_done;
 266
 267        complete(dma_c);
 268
 269        switch (this->dma_type) {
 270        case DMA_FOR_COMMAND:
 271                dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
 272                break;
 273
 274        case DMA_FOR_READ_DATA:
 275                dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
 276                if (this->direct_dma_map_ok == false)
 277                        memcpy(this->upper_buf, this->data_buffer_dma,
 278                                this->upper_len);
 279                break;
 280
 281        case DMA_FOR_WRITE_DATA:
 282                dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
 283                break;
 284
 285        case DMA_FOR_READ_ECC_PAGE:
 286        case DMA_FOR_WRITE_ECC_PAGE:
 287                /* We have to wait the BCH interrupt to finish. */
 288                break;
 289
 290        default:
 291                pr_err("in wrong DMA operation.\n");
 292        }
 293}
 294
 295int start_dma_without_bch_irq(struct gpmi_nand_data *this,
 296                                struct dma_async_tx_descriptor *desc)
 297{
 298        struct completion *dma_c = &this->dma_done;
 299        int err;
 300
 301        init_completion(dma_c);
 302
 303        desc->callback          = dma_irq_callback;
 304        desc->callback_param    = this;
 305        dmaengine_submit(desc);
 306        dma_async_issue_pending(get_dma_chan(this));
 307
 308        /* Wait for the interrupt from the DMA block. */
 309        err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
 310        if (!err) {
 311                pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
 312                gpmi_dump_info(this);
 313                return -ETIMEDOUT;
 314        }
 315        return 0;
 316}
 317
 318/*
 319 * This function is used in BCH reading or BCH writing pages.
 320 * It will wait for the BCH interrupt as long as ONE second.
 321 * Actually, we must wait for two interrupts :
 322 *      [1] firstly the DMA interrupt and
 323 *      [2] secondly the BCH interrupt.
 324 */
 325int start_dma_with_bch_irq(struct gpmi_nand_data *this,
 326                        struct dma_async_tx_descriptor *desc)
 327{
 328        struct completion *bch_c = &this->bch_done;
 329        int err;
 330
 331        /* Prepare to receive an interrupt from the BCH block. */
 332        init_completion(bch_c);
 333
 334        /* start the DMA */
 335        start_dma_without_bch_irq(this, desc);
 336
 337        /* Wait for the interrupt from the BCH block. */
 338        err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
 339        if (!err) {
 340                pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
 341                gpmi_dump_info(this);
 342                return -ETIMEDOUT;
 343        }
 344        return 0;
 345}
 346
 347static int acquire_register_block(struct gpmi_nand_data *this,
 348                                  const char *res_name)
 349{
 350        struct platform_device *pdev = this->pdev;
 351        struct resources *res = &this->resources;
 352        struct resource *r;
 353        void __iomem *p;
 354
 355        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
 356        if (!r) {
 357                pr_err("Can't get resource for %s\n", res_name);
 358                return -ENXIO;
 359        }
 360
 361        p = ioremap(r->start, resource_size(r));
 362        if (!p) {
 363                pr_err("Can't remap %s\n", res_name);
 364                return -ENOMEM;
 365        }
 366
 367        if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
 368                res->gpmi_regs = p;
 369        else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
 370                res->bch_regs = p;
 371        else
 372                pr_err("unknown resource name : %s\n", res_name);
 373
 374        return 0;
 375}
 376
 377static void release_register_block(struct gpmi_nand_data *this)
 378{
 379        struct resources *res = &this->resources;
 380        if (res->gpmi_regs)
 381                iounmap(res->gpmi_regs);
 382        if (res->bch_regs)
 383                iounmap(res->bch_regs);
 384        res->gpmi_regs = NULL;
 385        res->bch_regs = NULL;
 386}
 387
 388static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
 389{
 390        struct platform_device *pdev = this->pdev;
 391        struct resources *res = &this->resources;
 392        const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
 393        struct resource *r;
 394        int err;
 395
 396        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
 397        if (!r) {
 398                pr_err("Can't get resource for %s\n", res_name);
 399                return -ENXIO;
 400        }
 401
 402        err = request_irq(r->start, irq_h, 0, res_name, this);
 403        if (err) {
 404                pr_err("Can't own %s\n", res_name);
 405                return err;
 406        }
 407
 408        res->bch_low_interrupt = r->start;
 409        res->bch_high_interrupt = r->end;
 410        return 0;
 411}
 412
 413static void release_bch_irq(struct gpmi_nand_data *this)
 414{
 415        struct resources *res = &this->resources;
 416        int i = res->bch_low_interrupt;
 417
 418        for (; i <= res->bch_high_interrupt; i++)
 419                free_irq(i, this);
 420}
 421
 422static void release_dma_channels(struct gpmi_nand_data *this)
 423{
 424        unsigned int i;
 425        for (i = 0; i < DMA_CHANS; i++)
 426                if (this->dma_chans[i]) {
 427                        dma_release_channel(this->dma_chans[i]);
 428                        this->dma_chans[i] = NULL;
 429                }
 430}
 431
 432static int acquire_dma_channels(struct gpmi_nand_data *this)
 433{
 434        struct platform_device *pdev = this->pdev;
 435        struct dma_chan *dma_chan;
 436
 437        /* request dma channel */
 438        dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
 439        if (!dma_chan) {
 440                pr_err("Failed to request DMA channel.\n");
 441                goto acquire_err;
 442        }
 443
 444        this->dma_chans[0] = dma_chan;
 445        return 0;
 446
 447acquire_err:
 448        release_dma_channels(this);
 449        return -EINVAL;
 450}
 451
 452static void gpmi_put_clks(struct gpmi_nand_data *this)
 453{
 454        struct resources *r = &this->resources;
 455        struct clk *clk;
 456        int i;
 457
 458        for (i = 0; i < GPMI_CLK_MAX; i++) {
 459                clk = r->clock[i];
 460                if (clk) {
 461                        clk_put(clk);
 462                        r->clock[i] = NULL;
 463                }
 464        }
 465}
 466
 467static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
 468        "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
 469};
 470
 471static int gpmi_get_clks(struct gpmi_nand_data *this)
 472{
 473        struct resources *r = &this->resources;
 474        char **extra_clks = NULL;
 475        struct clk *clk;
 476        int i;
 477
 478        /* The main clock is stored in the first. */
 479        r->clock[0] = clk_get(this->dev, "gpmi_io");
 480        if (IS_ERR(r->clock[0]))
 481                goto err_clock;
 482
 483        /* Get extra clocks */
 484        if (GPMI_IS_MX6Q(this))
 485                extra_clks = extra_clks_for_mx6q;
 486        if (!extra_clks)
 487                return 0;
 488
 489        for (i = 1; i < GPMI_CLK_MAX; i++) {
 490                if (extra_clks[i - 1] == NULL)
 491                        break;
 492
 493                clk = clk_get(this->dev, extra_clks[i - 1]);
 494                if (IS_ERR(clk))
 495                        goto err_clock;
 496
 497                r->clock[i] = clk;
 498        }
 499
 500        if (GPMI_IS_MX6Q(this))
 501                /*
 502                 * Set the default value for the gpmi clock in mx6q:
 503                 *
 504                 * If you want to use the ONFI nand which is in the
 505                 * Synchronous Mode, you should change the clock as you need.
 506                 */
 507                clk_set_rate(r->clock[0], 22000000);
 508
 509        return 0;
 510
 511err_clock:
 512        dev_dbg(this->dev, "failed in finding the clocks.\n");
 513        gpmi_put_clks(this);
 514        return -ENOMEM;
 515}
 516
 517static int acquire_resources(struct gpmi_nand_data *this)
 518{
 519        struct pinctrl *pinctrl;
 520        int ret;
 521
 522        ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
 523        if (ret)
 524                goto exit_regs;
 525
 526        ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
 527        if (ret)
 528                goto exit_regs;
 529
 530        ret = acquire_bch_irq(this, bch_irq);
 531        if (ret)
 532                goto exit_regs;
 533
 534        ret = acquire_dma_channels(this);
 535        if (ret)
 536                goto exit_dma_channels;
 537
 538        pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev);
 539        if (IS_ERR(pinctrl)) {
 540                ret = PTR_ERR(pinctrl);
 541                goto exit_pin;
 542        }
 543
 544        ret = gpmi_get_clks(this);
 545        if (ret)
 546                goto exit_clock;
 547        return 0;
 548
 549exit_clock:
 550exit_pin:
 551        release_dma_channels(this);
 552exit_dma_channels:
 553        release_bch_irq(this);
 554exit_regs:
 555        release_register_block(this);
 556        return ret;
 557}
 558
 559static void release_resources(struct gpmi_nand_data *this)
 560{
 561        gpmi_put_clks(this);
 562        release_register_block(this);
 563        release_bch_irq(this);
 564        release_dma_channels(this);
 565}
 566
 567static int init_hardware(struct gpmi_nand_data *this)
 568{
 569        int ret;
 570
 571        /*
 572         * This structure contains the "safe" GPMI timing that should succeed
 573         * with any NAND Flash device
 574         * (although, with less-than-optimal performance).
 575         */
 576        struct nand_timing  safe_timing = {
 577                .data_setup_in_ns        = 80,
 578                .data_hold_in_ns         = 60,
 579                .address_setup_in_ns     = 25,
 580                .gpmi_sample_delay_in_ns =  6,
 581                .tREA_in_ns              = -1,
 582                .tRLOH_in_ns             = -1,
 583                .tRHOH_in_ns             = -1,
 584        };
 585
 586        /* Initialize the hardwares. */
 587        ret = gpmi_init(this);
 588        if (ret)
 589                return ret;
 590
 591        this->timing = safe_timing;
 592        return 0;
 593}
 594
 595static int read_page_prepare(struct gpmi_nand_data *this,
 596                        void *destination, unsigned length,
 597                        void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
 598                        void **use_virt, dma_addr_t *use_phys)
 599{
 600        struct device *dev = this->dev;
 601
 602        if (virt_addr_valid(destination)) {
 603                dma_addr_t dest_phys;
 604
 605                dest_phys = dma_map_single(dev, destination,
 606                                                length, DMA_FROM_DEVICE);
 607                if (dma_mapping_error(dev, dest_phys)) {
 608                        if (alt_size < length) {
 609                                pr_err("%s, Alternate buffer is too small\n",
 610                                        __func__);
 611                                return -ENOMEM;
 612                        }
 613                        goto map_failed;
 614                }
 615                *use_virt = destination;
 616                *use_phys = dest_phys;
 617                this->direct_dma_map_ok = true;
 618                return 0;
 619        }
 620
 621map_failed:
 622        *use_virt = alt_virt;
 623        *use_phys = alt_phys;
 624        this->direct_dma_map_ok = false;
 625        return 0;
 626}
 627
 628static inline void read_page_end(struct gpmi_nand_data *this,
 629                        void *destination, unsigned length,
 630                        void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
 631                        void *used_virt, dma_addr_t used_phys)
 632{
 633        if (this->direct_dma_map_ok)
 634                dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
 635}
 636
 637static inline void read_page_swap_end(struct gpmi_nand_data *this,
 638                        void *destination, unsigned length,
 639                        void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
 640                        void *used_virt, dma_addr_t used_phys)
 641{
 642        if (!this->direct_dma_map_ok)
 643                memcpy(destination, alt_virt, length);
 644}
 645
 646static int send_page_prepare(struct gpmi_nand_data *this,
 647                        const void *source, unsigned length,
 648                        void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
 649                        const void **use_virt, dma_addr_t *use_phys)
 650{
 651        struct device *dev = this->dev;
 652
 653        if (virt_addr_valid(source)) {
 654                dma_addr_t source_phys;
 655
 656                source_phys = dma_map_single(dev, (void *)source, length,
 657                                                DMA_TO_DEVICE);
 658                if (dma_mapping_error(dev, source_phys)) {
 659                        if (alt_size < length) {
 660                                pr_err("%s, Alternate buffer is too small\n",
 661                                        __func__);
 662                                return -ENOMEM;
 663                        }
 664                        goto map_failed;
 665                }
 666                *use_virt = source;
 667                *use_phys = source_phys;
 668                return 0;
 669        }
 670map_failed:
 671        /*
 672         * Copy the content of the source buffer into the alternate
 673         * buffer and set up the return values accordingly.
 674         */
 675        memcpy(alt_virt, source, length);
 676
 677        *use_virt = alt_virt;
 678        *use_phys = alt_phys;
 679        return 0;
 680}
 681
 682static void send_page_end(struct gpmi_nand_data *this,
 683                        const void *source, unsigned length,
 684                        void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
 685                        const void *used_virt, dma_addr_t used_phys)
 686{
 687        struct device *dev = this->dev;
 688        if (used_virt == source)
 689                dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
 690}
 691
 692static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
 693{
 694        struct device *dev = this->dev;
 695
 696        if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
 697                dma_free_coherent(dev, this->page_buffer_size,
 698                                        this->page_buffer_virt,
 699                                        this->page_buffer_phys);
 700        kfree(this->cmd_buffer);
 701        kfree(this->data_buffer_dma);
 702
 703        this->cmd_buffer        = NULL;
 704        this->data_buffer_dma   = NULL;
 705        this->page_buffer_virt  = NULL;
 706        this->page_buffer_size  =  0;
 707}
 708
 709/* Allocate the DMA buffers */
 710static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
 711{
 712        struct bch_geometry *geo = &this->bch_geometry;
 713        struct device *dev = this->dev;
 714
 715        /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
 716        this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
 717        if (this->cmd_buffer == NULL)
 718                goto error_alloc;
 719
 720        /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
 721        this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
 722        if (this->data_buffer_dma == NULL)
 723                goto error_alloc;
 724
 725        /*
 726         * [3] Allocate the page buffer.
 727         *
 728         * Both the payload buffer and the auxiliary buffer must appear on
 729         * 32-bit boundaries. We presume the size of the payload buffer is a
 730         * power of two and is much larger than four, which guarantees the
 731         * auxiliary buffer will appear on a 32-bit boundary.
 732         */
 733        this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
 734        this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
 735                                        &this->page_buffer_phys, GFP_DMA);
 736        if (!this->page_buffer_virt)
 737                goto error_alloc;
 738
 739
 740        /* Slice up the page buffer. */
 741        this->payload_virt = this->page_buffer_virt;
 742        this->payload_phys = this->page_buffer_phys;
 743        this->auxiliary_virt = this->payload_virt + geo->payload_size;
 744        this->auxiliary_phys = this->payload_phys + geo->payload_size;
 745        return 0;
 746
 747error_alloc:
 748        gpmi_free_dma_buffer(this);
 749        pr_err("Error allocating DMA buffers!\n");
 750        return -ENOMEM;
 751}
 752
 753static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
 754{
 755        struct nand_chip *chip = mtd->priv;
 756        struct gpmi_nand_data *this = chip->priv;
 757        int ret;
 758
 759        /*
 760         * Every operation begins with a command byte and a series of zero or
 761         * more address bytes. These are distinguished by either the Address
 762         * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
 763         * asserted. When MTD is ready to execute the command, it will deassert
 764         * both latch enables.
 765         *
 766         * Rather than run a separate DMA operation for every single byte, we
 767         * queue them up and run a single DMA operation for the entire series
 768         * of command and data bytes. NAND_CMD_NONE means the END of the queue.
 769         */
 770        if ((ctrl & (NAND_ALE | NAND_CLE))) {
 771                if (data != NAND_CMD_NONE)
 772                        this->cmd_buffer[this->command_length++] = data;
 773                return;
 774        }
 775
 776        if (!this->command_length)
 777                return;
 778
 779        ret = gpmi_send_command(this);
 780        if (ret)
 781                pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
 782
 783        this->command_length = 0;
 784}
 785
 786static int gpmi_dev_ready(struct mtd_info *mtd)
 787{
 788        struct nand_chip *chip = mtd->priv;
 789        struct gpmi_nand_data *this = chip->priv;
 790
 791        return gpmi_is_ready(this, this->current_chip);
 792}
 793
 794static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
 795{
 796        struct nand_chip *chip = mtd->priv;
 797        struct gpmi_nand_data *this = chip->priv;
 798
 799        if ((this->current_chip < 0) && (chipnr >= 0))
 800                gpmi_begin(this);
 801        else if ((this->current_chip >= 0) && (chipnr < 0))
 802                gpmi_end(this);
 803
 804        this->current_chip = chipnr;
 805}
 806
 807static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 808{
 809        struct nand_chip *chip = mtd->priv;
 810        struct gpmi_nand_data *this = chip->priv;
 811
 812        pr_debug("len is %d\n", len);
 813        this->upper_buf = buf;
 814        this->upper_len = len;
 815
 816        gpmi_read_data(this);
 817}
 818
 819static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 820{
 821        struct nand_chip *chip = mtd->priv;
 822        struct gpmi_nand_data *this = chip->priv;
 823
 824        pr_debug("len is %d\n", len);
 825        this->upper_buf = (uint8_t *)buf;
 826        this->upper_len = len;
 827
 828        gpmi_send_data(this);
 829}
 830
 831static uint8_t gpmi_read_byte(struct mtd_info *mtd)
 832{
 833        struct nand_chip *chip = mtd->priv;
 834        struct gpmi_nand_data *this = chip->priv;
 835        uint8_t *buf = this->data_buffer_dma;
 836
 837        gpmi_read_buf(mtd, buf, 1);
 838        return buf[0];
 839}
 840
 841/*
 842 * Handles block mark swapping.
 843 * It can be called in swapping the block mark, or swapping it back,
 844 * because the the operations are the same.
 845 */
 846static void block_mark_swapping(struct gpmi_nand_data *this,
 847                                void *payload, void *auxiliary)
 848{
 849        struct bch_geometry *nfc_geo = &this->bch_geometry;
 850        unsigned char *p;
 851        unsigned char *a;
 852        unsigned int  bit;
 853        unsigned char mask;
 854        unsigned char from_data;
 855        unsigned char from_oob;
 856
 857        if (!this->swap_block_mark)
 858                return;
 859
 860        /*
 861         * If control arrives here, we're swapping. Make some convenience
 862         * variables.
 863         */
 864        bit = nfc_geo->block_mark_bit_offset;
 865        p   = payload + nfc_geo->block_mark_byte_offset;
 866        a   = auxiliary;
 867
 868        /*
 869         * Get the byte from the data area that overlays the block mark. Since
 870         * the ECC engine applies its own view to the bits in the page, the
 871         * physical block mark won't (in general) appear on a byte boundary in
 872         * the data.
 873         */
 874        from_data = (p[0] >> bit) | (p[1] << (8 - bit));
 875
 876        /* Get the byte from the OOB. */
 877        from_oob = a[0];
 878
 879        /* Swap them. */
 880        a[0] = from_data;
 881
 882        mask = (0x1 << bit) - 1;
 883        p[0] = (p[0] & mask) | (from_oob << bit);
 884
 885        mask = ~0 << bit;
 886        p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
 887}
 888
 889static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 890                                uint8_t *buf, int oob_required, int page)
 891{
 892        struct gpmi_nand_data *this = chip->priv;
 893        struct bch_geometry *nfc_geo = &this->bch_geometry;
 894        void          *payload_virt;
 895        dma_addr_t    payload_phys;
 896        void          *auxiliary_virt;
 897        dma_addr_t    auxiliary_phys;
 898        unsigned int  i;
 899        unsigned char *status;
 900        unsigned int  max_bitflips = 0;
 901        int           ret;
 902
 903        pr_debug("page number is : %d\n", page);
 904        ret = read_page_prepare(this, buf, mtd->writesize,
 905                                        this->payload_virt, this->payload_phys,
 906                                        nfc_geo->payload_size,
 907                                        &payload_virt, &payload_phys);
 908        if (ret) {
 909                pr_err("Inadequate DMA buffer\n");
 910                ret = -ENOMEM;
 911                return ret;
 912        }
 913        auxiliary_virt = this->auxiliary_virt;
 914        auxiliary_phys = this->auxiliary_phys;
 915
 916        /* go! */
 917        ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
 918        read_page_end(this, buf, mtd->writesize,
 919                        this->payload_virt, this->payload_phys,
 920                        nfc_geo->payload_size,
 921                        payload_virt, payload_phys);
 922        if (ret) {
 923                pr_err("Error in ECC-based read: %d\n", ret);
 924                return ret;
 925        }
 926
 927        /* handle the block mark swapping */
 928        block_mark_swapping(this, payload_virt, auxiliary_virt);
 929
 930        /* Loop over status bytes, accumulating ECC status. */
 931        status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 932
 933        for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
 934                if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
 935                        continue;
 936
 937                if (*status == STATUS_UNCORRECTABLE) {
 938                        mtd->ecc_stats.failed++;
 939                        continue;
 940                }
 941                mtd->ecc_stats.corrected += *status;
 942                max_bitflips = max_t(unsigned int, max_bitflips, *status);
 943        }
 944
 945        if (oob_required) {
 946                /*
 947                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
 948                 * for details about our policy for delivering the OOB.
 949                 *
 950                 * We fill the caller's buffer with set bits, and then copy the
 951                 * block mark to th caller's buffer. Note that, if block mark
 952                 * swapping was necessary, it has already been done, so we can
 953                 * rely on the first byte of the auxiliary buffer to contain
 954                 * the block mark.
 955                 */
 956                memset(chip->oob_poi, ~0, mtd->oobsize);
 957                chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
 958        }
 959
 960        read_page_swap_end(this, buf, mtd->writesize,
 961                        this->payload_virt, this->payload_phys,
 962                        nfc_geo->payload_size,
 963                        payload_virt, payload_phys);
 964
 965        return max_bitflips;
 966}
 967
 968static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 969                                const uint8_t *buf, int oob_required)
 970{
 971        struct gpmi_nand_data *this = chip->priv;
 972        struct bch_geometry *nfc_geo = &this->bch_geometry;
 973        const void *payload_virt;
 974        dma_addr_t payload_phys;
 975        const void *auxiliary_virt;
 976        dma_addr_t auxiliary_phys;
 977        int        ret;
 978
 979        pr_debug("ecc write page.\n");
 980        if (this->swap_block_mark) {
 981                /*
 982                 * If control arrives here, we're doing block mark swapping.
 983                 * Since we can't modify the caller's buffers, we must copy them
 984                 * into our own.
 985                 */
 986                memcpy(this->payload_virt, buf, mtd->writesize);
 987                payload_virt = this->payload_virt;
 988                payload_phys = this->payload_phys;
 989
 990                memcpy(this->auxiliary_virt, chip->oob_poi,
 991                                nfc_geo->auxiliary_size);
 992                auxiliary_virt = this->auxiliary_virt;
 993                auxiliary_phys = this->auxiliary_phys;
 994
 995                /* Handle block mark swapping. */
 996                block_mark_swapping(this,
 997                                (void *) payload_virt, (void *) auxiliary_virt);
 998        } else {
 999                /*
1000                 * If control arrives here, we're not doing block mark swapping,
1001                 * so we can to try and use the caller's buffers.
1002                 */
1003                ret = send_page_prepare(this,
1004                                buf, mtd->writesize,
1005                                this->payload_virt, this->payload_phys,
1006                                nfc_geo->payload_size,
1007                                &payload_virt, &payload_phys);
1008                if (ret) {
1009                        pr_err("Inadequate payload DMA buffer\n");
1010                        return 0;
1011                }
1012
1013                ret = send_page_prepare(this,
1014                                chip->oob_poi, mtd->oobsize,
1015                                this->auxiliary_virt, this->auxiliary_phys,
1016                                nfc_geo->auxiliary_size,
1017                                &auxiliary_virt, &auxiliary_phys);
1018                if (ret) {
1019                        pr_err("Inadequate auxiliary DMA buffer\n");
1020                        goto exit_auxiliary;
1021                }
1022        }
1023
1024        /* Ask the NFC. */
1025        ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
1026        if (ret)
1027                pr_err("Error in ECC-based write: %d\n", ret);
1028
1029        if (!this->swap_block_mark) {
1030                send_page_end(this, chip->oob_poi, mtd->oobsize,
1031                                this->auxiliary_virt, this->auxiliary_phys,
1032                                nfc_geo->auxiliary_size,
1033                                auxiliary_virt, auxiliary_phys);
1034exit_auxiliary:
1035                send_page_end(this, buf, mtd->writesize,
1036                                this->payload_virt, this->payload_phys,
1037                                nfc_geo->payload_size,
1038                                payload_virt, payload_phys);
1039        }
1040
1041        return 0;
1042}
1043
1044/*
1045 * There are several places in this driver where we have to handle the OOB and
1046 * block marks. This is the function where things are the most complicated, so
1047 * this is where we try to explain it all. All the other places refer back to
1048 * here.
1049 *
1050 * These are the rules, in order of decreasing importance:
1051 *
1052 * 1) Nothing the caller does can be allowed to imperil the block mark.
1053 *
1054 * 2) In read operations, the first byte of the OOB we return must reflect the
1055 *    true state of the block mark, no matter where that block mark appears in
1056 *    the physical page.
1057 *
1058 * 3) ECC-based read operations return an OOB full of set bits (since we never
1059 *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1060 *    return).
1061 *
1062 * 4) "Raw" read operations return a direct view of the physical bytes in the
1063 *    page, using the conventional definition of which bytes are data and which
1064 *    are OOB. This gives the caller a way to see the actual, physical bytes
1065 *    in the page, without the distortions applied by our ECC engine.
1066 *
1067 *
1068 * What we do for this specific read operation depends on two questions:
1069 *
1070 * 1) Are we doing a "raw" read, or an ECC-based read?
1071 *
1072 * 2) Are we using block mark swapping or transcription?
1073 *
1074 * There are four cases, illustrated by the following Karnaugh map:
1075 *
1076 *                    |           Raw           |         ECC-based       |
1077 *       -------------+-------------------------+-------------------------+
1078 *                    | Read the conventional   |                         |
1079 *                    | OOB at the end of the   |                         |
1080 *       Swapping     | page and return it. It  |                         |
1081 *                    | contains exactly what   |                         |
1082 *                    | we want.                | Read the block mark and |
1083 *       -------------+-------------------------+ return it in a buffer   |
1084 *                    | Read the conventional   | full of set bits.       |
1085 *                    | OOB at the end of the   |                         |
1086 *                    | page and also the block |                         |
1087 *       Transcribing | mark in the metadata.   |                         |
1088 *                    | Copy the block mark     |                         |
1089 *                    | into the first byte of  |                         |
1090 *                    | the OOB.                |                         |
1091 *       -------------+-------------------------+-------------------------+
1092 *
1093 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1094 * giving an accurate view of the actual, physical bytes in the page (we're
1095 * overwriting the block mark). That's OK because it's more important to follow
1096 * rule #2.
1097 *
1098 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1099 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1100 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1101 * ECC-based or raw view of the page is implicit in which function it calls
1102 * (there is a similar pair of ECC-based/raw functions for writing).
1103 *
1104 * FIXME: The following paragraph is incorrect, now that there exist
1105 * ecc.read_oob_raw and ecc.write_oob_raw functions.
1106 *
1107 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1108 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1109 * caller wants an ECC-based or raw view of the page is not propagated down to
1110 * this driver.
1111 */
1112static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1113                                int page)
1114{
1115        struct gpmi_nand_data *this = chip->priv;
1116
1117        pr_debug("page number is %d\n", page);
1118        /* clear the OOB buffer */
1119        memset(chip->oob_poi, ~0, mtd->oobsize);
1120
1121        /* Read out the conventional OOB. */
1122        chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1123        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1124
1125        /*
1126         * Now, we want to make sure the block mark is correct. In the
1127         * Swapping/Raw case, we already have it. Otherwise, we need to
1128         * explicitly read it.
1129         */
1130        if (!this->swap_block_mark) {
1131                /* Read the block mark into the first byte of the OOB buffer. */
1132                chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1133                chip->oob_poi[0] = chip->read_byte(mtd);
1134        }
1135
1136        return 0;
1137}
1138
1139static int
1140gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1141{
1142        /*
1143         * The BCH will use all the (page + oob).
1144         * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
1145         * But it can not stop some ioctls such MEMWRITEOOB which uses
1146         * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
1147         * these ioctls too.
1148         */
1149        return -EPERM;
1150}
1151
1152static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1153{
1154        struct nand_chip *chip = mtd->priv;
1155        struct gpmi_nand_data *this = chip->priv;
1156        int block, ret = 0;
1157        uint8_t *block_mark;
1158        int column, page, status, chipnr;
1159
1160        /* Get block number */
1161        block = (int)(ofs >> chip->bbt_erase_shift);
1162        if (chip->bbt)
1163                chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1164
1165        /* Do we have a flash based bad block table ? */
1166        if (chip->bbt_options & NAND_BBT_USE_FLASH)
1167                ret = nand_update_bbt(mtd, ofs);
1168        else {
1169                chipnr = (int)(ofs >> chip->chip_shift);
1170                chip->select_chip(mtd, chipnr);
1171
1172                column = this->swap_block_mark ? mtd->writesize : 0;
1173
1174                /* Write the block mark. */
1175                block_mark = this->data_buffer_dma;
1176                block_mark[0] = 0; /* bad block marker */
1177
1178                /* Shift to get page */
1179                page = (int)(ofs >> chip->page_shift);
1180
1181                chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
1182                chip->write_buf(mtd, block_mark, 1);
1183                chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1184
1185                status = chip->waitfunc(mtd, chip);
1186                if (status & NAND_STATUS_FAIL)
1187                        ret = -EIO;
1188
1189                chip->select_chip(mtd, -1);
1190        }
1191        if (!ret)
1192                mtd->ecc_stats.badblocks++;
1193
1194        return ret;
1195}
1196
1197static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1198{
1199        struct boot_rom_geometry *geometry = &this->rom_geometry;
1200
1201        /*
1202         * Set the boot block stride size.
1203         *
1204         * In principle, we should be reading this from the OTP bits, since
1205         * that's where the ROM is going to get it. In fact, we don't have any
1206         * way to read the OTP bits, so we go with the default and hope for the
1207         * best.
1208         */
1209        geometry->stride_size_in_pages = 64;
1210
1211        /*
1212         * Set the search area stride exponent.
1213         *
1214         * In principle, we should be reading this from the OTP bits, since
1215         * that's where the ROM is going to get it. In fact, we don't have any
1216         * way to read the OTP bits, so we go with the default and hope for the
1217         * best.
1218         */
1219        geometry->search_area_stride_exponent = 2;
1220        return 0;
1221}
1222
1223static const char  *fingerprint = "STMP";
1224static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1225{
1226        struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1227        struct device *dev = this->dev;
1228        struct mtd_info *mtd = &this->mtd;
1229        struct nand_chip *chip = &this->nand;
1230        unsigned int search_area_size_in_strides;
1231        unsigned int stride;
1232        unsigned int page;
1233        uint8_t *buffer = chip->buffers->databuf;
1234        int saved_chip_number;
1235        int found_an_ncb_fingerprint = false;
1236
1237        /* Compute the number of strides in a search area. */
1238        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1239
1240        saved_chip_number = this->current_chip;
1241        chip->select_chip(mtd, 0);
1242
1243        /*
1244         * Loop through the first search area, looking for the NCB fingerprint.
1245         */
1246        dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1247
1248        for (stride = 0; stride < search_area_size_in_strides; stride++) {
1249                /* Compute the page addresses. */
1250                page = stride * rom_geo->stride_size_in_pages;
1251
1252                dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1253
1254                /*
1255                 * Read the NCB fingerprint. The fingerprint is four bytes long
1256                 * and starts in the 12th byte of the page.
1257                 */
1258                chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
1259                chip->read_buf(mtd, buffer, strlen(fingerprint));
1260
1261                /* Look for the fingerprint. */
1262                if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1263                        found_an_ncb_fingerprint = true;
1264                        break;
1265                }
1266
1267        }
1268
1269        chip->select_chip(mtd, saved_chip_number);
1270
1271        if (found_an_ncb_fingerprint)
1272                dev_dbg(dev, "\tFound a fingerprint\n");
1273        else
1274                dev_dbg(dev, "\tNo fingerprint found\n");
1275        return found_an_ncb_fingerprint;
1276}
1277
1278/* Writes a transcription stamp. */
1279static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1280{
1281        struct device *dev = this->dev;
1282        struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1283        struct mtd_info *mtd = &this->mtd;
1284        struct nand_chip *chip = &this->nand;
1285        unsigned int block_size_in_pages;
1286        unsigned int search_area_size_in_strides;
1287        unsigned int search_area_size_in_pages;
1288        unsigned int search_area_size_in_blocks;
1289        unsigned int block;
1290        unsigned int stride;
1291        unsigned int page;
1292        uint8_t      *buffer = chip->buffers->databuf;
1293        int saved_chip_number;
1294        int status;
1295
1296        /* Compute the search area geometry. */
1297        block_size_in_pages = mtd->erasesize / mtd->writesize;
1298        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1299        search_area_size_in_pages = search_area_size_in_strides *
1300                                        rom_geo->stride_size_in_pages;
1301        search_area_size_in_blocks =
1302                  (search_area_size_in_pages + (block_size_in_pages - 1)) /
1303                                    block_size_in_pages;
1304
1305        dev_dbg(dev, "Search Area Geometry :\n");
1306        dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1307        dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1308        dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
1309
1310        /* Select chip 0. */
1311        saved_chip_number = this->current_chip;
1312        chip->select_chip(mtd, 0);
1313
1314        /* Loop over blocks in the first search area, erasing them. */
1315        dev_dbg(dev, "Erasing the search area...\n");
1316
1317        for (block = 0; block < search_area_size_in_blocks; block++) {
1318                /* Compute the page address. */
1319                page = block * block_size_in_pages;
1320
1321                /* Erase this block. */
1322                dev_dbg(dev, "\tErasing block 0x%x\n", block);
1323                chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
1324                chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
1325
1326                /* Wait for the erase to finish. */
1327                status = chip->waitfunc(mtd, chip);
1328                if (status & NAND_STATUS_FAIL)
1329                        dev_err(dev, "[%s] Erase failed.\n", __func__);
1330        }
1331
1332        /* Write the NCB fingerprint into the page buffer. */
1333        memset(buffer, ~0, mtd->writesize);
1334        memset(chip->oob_poi, ~0, mtd->oobsize);
1335        memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1336
1337        /* Loop through the first search area, writing NCB fingerprints. */
1338        dev_dbg(dev, "Writing NCB fingerprints...\n");
1339        for (stride = 0; stride < search_area_size_in_strides; stride++) {
1340                /* Compute the page addresses. */
1341                page = stride * rom_geo->stride_size_in_pages;
1342
1343                /* Write the first page of the current stride. */
1344                dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1345                chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1346                chip->ecc.write_page_raw(mtd, chip, buffer, 0);
1347                chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1348
1349                /* Wait for the write to finish. */
1350                status = chip->waitfunc(mtd, chip);
1351                if (status & NAND_STATUS_FAIL)
1352                        dev_err(dev, "[%s] Write failed.\n", __func__);
1353        }
1354
1355        /* Deselect chip 0. */
1356        chip->select_chip(mtd, saved_chip_number);
1357        return 0;
1358}
1359
1360static int mx23_boot_init(struct gpmi_nand_data  *this)
1361{
1362        struct device *dev = this->dev;
1363        struct nand_chip *chip = &this->nand;
1364        struct mtd_info *mtd = &this->mtd;
1365        unsigned int block_count;
1366        unsigned int block;
1367        int     chipnr;
1368        int     page;
1369        loff_t  byte;
1370        uint8_t block_mark;
1371        int     ret = 0;
1372
1373        /*
1374         * If control arrives here, we can't use block mark swapping, which
1375         * means we're forced to use transcription. First, scan for the
1376         * transcription stamp. If we find it, then we don't have to do
1377         * anything -- the block marks are already transcribed.
1378         */
1379        if (mx23_check_transcription_stamp(this))
1380                return 0;
1381
1382        /*
1383         * If control arrives here, we couldn't find a transcription stamp, so
1384         * so we presume the block marks are in the conventional location.
1385         */
1386        dev_dbg(dev, "Transcribing bad block marks...\n");
1387
1388        /* Compute the number of blocks in the entire medium. */
1389        block_count = chip->chipsize >> chip->phys_erase_shift;
1390
1391        /*
1392         * Loop over all the blocks in the medium, transcribing block marks as
1393         * we go.
1394         */
1395        for (block = 0; block < block_count; block++) {
1396                /*
1397                 * Compute the chip, page and byte addresses for this block's
1398                 * conventional mark.
1399                 */
1400                chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1401                page = block << (chip->phys_erase_shift - chip->page_shift);
1402                byte = block <<  chip->phys_erase_shift;
1403
1404                /* Send the command to read the conventional block mark. */
1405                chip->select_chip(mtd, chipnr);
1406                chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1407                block_mark = chip->read_byte(mtd);
1408                chip->select_chip(mtd, -1);
1409
1410                /*
1411                 * Check if the block is marked bad. If so, we need to mark it
1412                 * again, but this time the result will be a mark in the
1413                 * location where we transcribe block marks.
1414                 */
1415                if (block_mark != 0xff) {
1416                        dev_dbg(dev, "Transcribing mark in block %u\n", block);
1417                        ret = chip->block_markbad(mtd, byte);
1418                        if (ret)
1419                                dev_err(dev, "Failed to mark block bad with "
1420                                                        "ret %d\n", ret);
1421                }
1422        }
1423
1424        /* Write the stamp that indicates we've transcribed the block marks. */
1425        mx23_write_transcription_stamp(this);
1426        return 0;
1427}
1428
1429static int nand_boot_init(struct gpmi_nand_data  *this)
1430{
1431        nand_boot_set_geometry(this);
1432
1433        /* This is ROM arch-specific initilization before the BBT scanning. */
1434        if (GPMI_IS_MX23(this))
1435                return mx23_boot_init(this);
1436        return 0;
1437}
1438
1439static int gpmi_set_geometry(struct gpmi_nand_data *this)
1440{
1441        int ret;
1442
1443        /* Free the temporary DMA memory for reading ID. */
1444        gpmi_free_dma_buffer(this);
1445
1446        /* Set up the NFC geometry which is used by BCH. */
1447        ret = bch_set_geometry(this);
1448        if (ret) {
1449                pr_err("Error setting BCH geometry : %d\n", ret);
1450                return ret;
1451        }
1452
1453        /* Alloc the new DMA buffers according to the pagesize and oobsize */
1454        return gpmi_alloc_dma_buffer(this);
1455}
1456
1457static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
1458{
1459        int ret;
1460
1461        /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1462        if (GPMI_IS_MX23(this))
1463                this->swap_block_mark = false;
1464        else
1465                this->swap_block_mark = true;
1466
1467        /* Set up the medium geometry */
1468        ret = gpmi_set_geometry(this);
1469        if (ret)
1470                return ret;
1471
1472        /* Adjust the ECC strength according to the chip. */
1473        this->nand.ecc.strength = this->bch_geometry.ecc_strength;
1474        this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
1475        this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
1476
1477        /* NAND boot init, depends on the gpmi_set_geometry(). */
1478        return nand_boot_init(this);
1479}
1480
1481static int gpmi_scan_bbt(struct mtd_info *mtd)
1482{
1483        struct nand_chip *chip = mtd->priv;
1484        struct gpmi_nand_data *this = chip->priv;
1485        int ret;
1486
1487        /* Prepare for the BBT scan. */
1488        ret = gpmi_pre_bbt_scan(this);
1489        if (ret)
1490                return ret;
1491
1492        /*
1493         * Can we enable the extra features? such as EDO or Sync mode.
1494         *
1495         * We do not check the return value now. That's means if we fail in
1496         * enable the extra features, we still can run in the normal way.
1497         */
1498        gpmi_extra_init(this);
1499
1500        /* use the default BBT implementation */
1501        return nand_default_bbt(mtd);
1502}
1503
1504static void gpmi_nfc_exit(struct gpmi_nand_data *this)
1505{
1506        nand_release(&this->mtd);
1507        gpmi_free_dma_buffer(this);
1508}
1509
1510static int gpmi_nfc_init(struct gpmi_nand_data *this)
1511{
1512        struct mtd_info  *mtd = &this->mtd;
1513        struct nand_chip *chip = &this->nand;
1514        struct mtd_part_parser_data ppdata = {};
1515        int ret;
1516
1517        /* init current chip */
1518        this->current_chip      = -1;
1519
1520        /* init the MTD data structures */
1521        mtd->priv               = chip;
1522        mtd->name               = "gpmi-nand";
1523        mtd->owner              = THIS_MODULE;
1524
1525        /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1526        chip->priv              = this;
1527        chip->select_chip       = gpmi_select_chip;
1528        chip->cmd_ctrl          = gpmi_cmd_ctrl;
1529        chip->dev_ready         = gpmi_dev_ready;
1530        chip->read_byte         = gpmi_read_byte;
1531        chip->read_buf          = gpmi_read_buf;
1532        chip->write_buf         = gpmi_write_buf;
1533        chip->ecc.read_page     = gpmi_ecc_read_page;
1534        chip->ecc.write_page    = gpmi_ecc_write_page;
1535        chip->ecc.read_oob      = gpmi_ecc_read_oob;
1536        chip->ecc.write_oob     = gpmi_ecc_write_oob;
1537        chip->scan_bbt          = gpmi_scan_bbt;
1538        chip->badblock_pattern  = &gpmi_bbt_descr;
1539        chip->block_markbad     = gpmi_block_markbad;
1540        chip->options           |= NAND_NO_SUBPAGE_WRITE;
1541        chip->ecc.mode          = NAND_ECC_HW;
1542        chip->ecc.size          = 1;
1543        chip->ecc.strength      = 8;
1544        chip->ecc.layout        = &gpmi_hw_ecclayout;
1545        if (of_get_nand_on_flash_bbt(this->dev->of_node))
1546                chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1547
1548        /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
1549        this->bch_geometry.payload_size = 1024;
1550        this->bch_geometry.auxiliary_size = 128;
1551        ret = gpmi_alloc_dma_buffer(this);
1552        if (ret)
1553                goto err_out;
1554
1555        ret = nand_scan(mtd, 1);
1556        if (ret) {
1557                pr_err("Chip scan failed\n");
1558                goto err_out;
1559        }
1560
1561        ppdata.of_node = this->pdev->dev.of_node;
1562        ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
1563        if (ret)
1564                goto err_out;
1565        return 0;
1566
1567err_out:
1568        gpmi_nfc_exit(this);
1569        return ret;
1570}
1571
1572static const struct platform_device_id gpmi_ids[] = {
1573        { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
1574        { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
1575        { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
1576        {},
1577};
1578
1579static const struct of_device_id gpmi_nand_id_table[] = {
1580        {
1581                .compatible = "fsl,imx23-gpmi-nand",
1582                .data = (void *)&gpmi_ids[IS_MX23]
1583        }, {
1584                .compatible = "fsl,imx28-gpmi-nand",
1585                .data = (void *)&gpmi_ids[IS_MX28]
1586        }, {
1587                .compatible = "fsl,imx6q-gpmi-nand",
1588                .data = (void *)&gpmi_ids[IS_MX6Q]
1589        }, {}
1590};
1591MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
1592
1593static int gpmi_nand_probe(struct platform_device *pdev)
1594{
1595        struct gpmi_nand_data *this;
1596        const struct of_device_id *of_id;
1597        int ret;
1598
1599        of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
1600        if (of_id) {
1601                pdev->id_entry = of_id->data;
1602        } else {
1603                pr_err("Failed to find the right device id.\n");
1604                return -ENOMEM;
1605        }
1606
1607        this = kzalloc(sizeof(*this), GFP_KERNEL);
1608        if (!this) {
1609                pr_err("Failed to allocate per-device memory\n");
1610                return -ENOMEM;
1611        }
1612
1613        platform_set_drvdata(pdev, this);
1614        this->pdev  = pdev;
1615        this->dev   = &pdev->dev;
1616
1617        ret = acquire_resources(this);
1618        if (ret)
1619                goto exit_acquire_resources;
1620
1621        ret = init_hardware(this);
1622        if (ret)
1623                goto exit_nfc_init;
1624
1625        ret = gpmi_nfc_init(this);
1626        if (ret)
1627                goto exit_nfc_init;
1628
1629        dev_info(this->dev, "driver registered.\n");
1630
1631        return 0;
1632
1633exit_nfc_init:
1634        release_resources(this);
1635exit_acquire_resources:
1636        platform_set_drvdata(pdev, NULL);
1637        dev_err(this->dev, "driver registration failed: %d\n", ret);
1638        kfree(this);
1639
1640        return ret;
1641}
1642
1643static int gpmi_nand_remove(struct platform_device *pdev)
1644{
1645        struct gpmi_nand_data *this = platform_get_drvdata(pdev);
1646
1647        gpmi_nfc_exit(this);
1648        release_resources(this);
1649        platform_set_drvdata(pdev, NULL);
1650        kfree(this);
1651        return 0;
1652}
1653
1654static struct platform_driver gpmi_nand_driver = {
1655        .driver = {
1656                .name = "gpmi-nand",
1657                .of_match_table = gpmi_nand_id_table,
1658        },
1659        .probe   = gpmi_nand_probe,
1660        .remove  = gpmi_nand_remove,
1661        .id_table = gpmi_ids,
1662};
1663module_platform_driver(gpmi_nand_driver);
1664
1665MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1666MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
1667MODULE_LICENSE("GPL");
1668