linux/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Freescale GPMI NAND Flash Driver
   4 *
   5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
   6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
   7 */
   8#include <linux/clk.h>
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/interrupt.h>
  13#include <linux/module.h>
  14#include <linux/mtd/partitions.h>
  15#include <linux/of.h>
  16#include <linux/of_device.h>
  17#include <linux/pm_runtime.h>
  18#include <linux/dma/mxs-dma.h>
  19#include "gpmi-nand.h"
  20#include "gpmi-regs.h"
  21#include "bch-regs.h"
  22
  23/* Resource names for the GPMI NAND driver. */
  24#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
  25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
  26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
  27
  28/* Converts time to clock cycles */
  29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
  30
  31#define MXS_SET_ADDR            0x4
  32#define MXS_CLR_ADDR            0x8
  33/*
  34 * Clear the bit and poll it cleared.  This is usually called with
  35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
  36 * (bit 30).
  37 */
  38static int clear_poll_bit(void __iomem *addr, u32 mask)
  39{
  40        int timeout = 0x400;
  41
  42        /* clear the bit */
  43        writel(mask, addr + MXS_CLR_ADDR);
  44
  45        /*
  46         * SFTRST needs 3 GPMI clocks to settle, the reference manual
  47         * recommends to wait 1us.
  48         */
  49        udelay(1);
  50
  51        /* poll the bit becoming clear */
  52        while ((readl(addr) & mask) && --timeout)
  53                /* nothing */;
  54
  55        return !timeout;
  56}
  57
  58#define MODULE_CLKGATE          (1 << 30)
  59#define MODULE_SFTRST           (1 << 31)
  60/*
  61 * The current mxs_reset_block() will do two things:
  62 *  [1] enable the module.
  63 *  [2] reset the module.
  64 *
  65 * In most of the cases, it's ok.
  66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
  67 * If you try to soft reset the BCH block, it becomes unusable until
  68 * the next hard reset. This case occurs in the NAND boot mode. When the board
  69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
  70 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
  71 * You will see a DMA timeout in this case. The bug has been fixed
  72 * in the following chips, such as MX28.
  73 *
  74 * To avoid this bug, just add a new parameter `just_enable` for
  75 * the mxs_reset_block(), and rewrite it here.
  76 */
  77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
  78{
  79        int ret;
  80        int timeout = 0x400;
  81
  82        /* clear and poll SFTRST */
  83        ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
  84        if (unlikely(ret))
  85                goto error;
  86
  87        /* clear CLKGATE */
  88        writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
  89
  90        if (!just_enable) {
  91                /* set SFTRST to reset the block */
  92                writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
  93                udelay(1);
  94
  95                /* poll CLKGATE becoming set */
  96                while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
  97                        /* nothing */;
  98                if (unlikely(!timeout))
  99                        goto error;
 100        }
 101
 102        /* clear and poll SFTRST */
 103        ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
 104        if (unlikely(ret))
 105                goto error;
 106
 107        /* clear and poll CLKGATE */
 108        ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
 109        if (unlikely(ret))
 110                goto error;
 111
 112        return 0;
 113
 114error:
 115        pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
 116        return -ETIMEDOUT;
 117}
 118
 119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
 120{
 121        struct clk *clk;
 122        int ret;
 123        int i;
 124
 125        for (i = 0; i < GPMI_CLK_MAX; i++) {
 126                clk = this->resources.clock[i];
 127                if (!clk)
 128                        break;
 129
 130                if (v) {
 131                        ret = clk_prepare_enable(clk);
 132                        if (ret)
 133                                goto err_clk;
 134                } else {
 135                        clk_disable_unprepare(clk);
 136                }
 137        }
 138        return 0;
 139
 140err_clk:
 141        for (; i > 0; i--)
 142                clk_disable_unprepare(this->resources.clock[i - 1]);
 143        return ret;
 144}
 145
 146static int gpmi_init(struct gpmi_nand_data *this)
 147{
 148        struct resources *r = &this->resources;
 149        int ret;
 150
 151        ret = pm_runtime_get_sync(this->dev);
 152        if (ret < 0) {
 153                pm_runtime_put_noidle(this->dev);
 154                return ret;
 155        }
 156
 157        ret = gpmi_reset_block(r->gpmi_regs, false);
 158        if (ret)
 159                goto err_out;
 160
 161        /*
 162         * Reset BCH here, too. We got failures otherwise :(
 163         * See later BCH reset for explanation of MX23 and MX28 handling
 164         */
 165        ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
 166        if (ret)
 167                goto err_out;
 168
 169        /* Choose NAND mode. */
 170        writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
 171
 172        /* Set the IRQ polarity. */
 173        writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
 174                                r->gpmi_regs + HW_GPMI_CTRL1_SET);
 175
 176        /* Disable Write-Protection. */
 177        writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 178
 179        /* Select BCH ECC. */
 180        writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 181
 182        /*
 183         * Decouple the chip select from dma channel. We use dma0 for all
 184         * the chips, force all NAND RDY_BUSY inputs to be sourced from
 185         * RDY_BUSY0.
 186         */
 187        writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY,
 188               r->gpmi_regs + HW_GPMI_CTRL1_SET);
 189
 190err_out:
 191        pm_runtime_mark_last_busy(this->dev);
 192        pm_runtime_put_autosuspend(this->dev);
 193        return ret;
 194}
 195
 196/* This function is very useful. It is called only when the bug occur. */
 197static void gpmi_dump_info(struct gpmi_nand_data *this)
 198{
 199        struct resources *r = &this->resources;
 200        struct bch_geometry *geo = &this->bch_geometry;
 201        u32 reg;
 202        int i;
 203
 204        dev_err(this->dev, "Show GPMI registers :\n");
 205        for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
 206                reg = readl(r->gpmi_regs + i * 0x10);
 207                dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
 208        }
 209
 210        /* start to print out the BCH info */
 211        dev_err(this->dev, "Show BCH registers :\n");
 212        for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
 213                reg = readl(r->bch_regs + i * 0x10);
 214                dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
 215        }
 216        dev_err(this->dev, "BCH Geometry :\n"
 217                "GF length              : %u\n"
 218                "ECC Strength           : %u\n"
 219                "Page Size in Bytes     : %u\n"
 220                "Metadata Size in Bytes : %u\n"
 221                "ECC0 Chunk Size in Bytes: %u\n"
 222                "ECCn Chunk Size in Bytes: %u\n"
 223                "ECC Chunk Count        : %u\n"
 224                "Payload Size in Bytes  : %u\n"
 225                "Auxiliary Size in Bytes: %u\n"
 226                "Auxiliary Status Offset: %u\n"
 227                "Block Mark Byte Offset : %u\n"
 228                "Block Mark Bit Offset  : %u\n",
 229                geo->gf_len,
 230                geo->ecc_strength,
 231                geo->page_size,
 232                geo->metadata_size,
 233                geo->ecc0_chunk_size,
 234                geo->eccn_chunk_size,
 235                geo->ecc_chunk_count,
 236                geo->payload_size,
 237                geo->auxiliary_size,
 238                geo->auxiliary_status_offset,
 239                geo->block_mark_byte_offset,
 240                geo->block_mark_bit_offset);
 241}
 242
 243static bool gpmi_check_ecc(struct gpmi_nand_data *this)
 244{
 245        struct nand_chip *chip = &this->nand;
 246        struct bch_geometry *geo = &this->bch_geometry;
 247        struct nand_device *nand = &chip->base;
 248        struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
 249
 250        conf->step_size = geo->eccn_chunk_size;
 251        conf->strength = geo->ecc_strength;
 252
 253        /* Do the sanity check. */
 254        if (GPMI_IS_MXS(this)) {
 255                /* The mx23/mx28 only support the GF13. */
 256                if (geo->gf_len == 14)
 257                        return false;
 258        }
 259
 260        if (geo->ecc_strength > this->devdata->bch_max_ecc_strength)
 261                return false;
 262
 263        if (!nand_ecc_is_strong_enough(nand))
 264                return false;
 265
 266        return true;
 267}
 268
 269/* check if bbm locates in data chunk rather than ecc chunk */
 270static bool bbm_in_data_chunk(struct gpmi_nand_data *this,
 271                        unsigned int *chunk_num)
 272{
 273        struct bch_geometry *geo = &this->bch_geometry;
 274        struct nand_chip *chip = &this->nand;
 275        struct mtd_info *mtd = nand_to_mtd(chip);
 276        unsigned int i, j;
 277
 278        if (geo->ecc0_chunk_size != geo->eccn_chunk_size) {
 279                dev_err(this->dev,
 280                        "The size of ecc0_chunk must equal to eccn_chunk\n");
 281                return false;
 282        }
 283
 284        i = (mtd->writesize * 8 - geo->metadata_size * 8) /
 285                (geo->gf_len * geo->ecc_strength +
 286                        geo->eccn_chunk_size * 8);
 287
 288        j = (mtd->writesize * 8 - geo->metadata_size * 8) -
 289                (geo->gf_len * geo->ecc_strength +
 290                        geo->eccn_chunk_size * 8) * i;
 291
 292        if (j < geo->eccn_chunk_size * 8) {
 293                *chunk_num = i+1;
 294                dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
 295                                geo->ecc_strength, *chunk_num);
 296                return true;
 297        }
 298
 299        return false;
 300}
 301
 302/*
 303 * If we can get the ECC information from the nand chip, we do not
 304 * need to calculate them ourselves.
 305 *
 306 * We may have available oob space in this case.
 307 */
 308static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
 309                                    unsigned int ecc_strength,
 310                                    unsigned int ecc_step)
 311{
 312        struct bch_geometry *geo = &this->bch_geometry;
 313        struct nand_chip *chip = &this->nand;
 314        struct mtd_info *mtd = nand_to_mtd(chip);
 315        unsigned int block_mark_bit_offset;
 316
 317        switch (ecc_step) {
 318        case SZ_512:
 319                geo->gf_len = 13;
 320                break;
 321        case SZ_1K:
 322                geo->gf_len = 14;
 323                break;
 324        default:
 325                dev_err(this->dev,
 326                        "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
 327                        nanddev_get_ecc_requirements(&chip->base)->strength,
 328                        nanddev_get_ecc_requirements(&chip->base)->step_size);
 329                return -EINVAL;
 330        }
 331        geo->ecc0_chunk_size = ecc_step;
 332        geo->eccn_chunk_size = ecc_step;
 333        geo->ecc_strength = round_up(ecc_strength, 2);
 334        if (!gpmi_check_ecc(this))
 335                return -EINVAL;
 336
 337        /* Keep the C >= O */
 338        if (geo->eccn_chunk_size < mtd->oobsize) {
 339                dev_err(this->dev,
 340                        "unsupported nand chip. ecc size: %d, oob size : %d\n",
 341                        ecc_step, mtd->oobsize);
 342                return -EINVAL;
 343        }
 344
 345        /* The default value, see comment in the legacy_set_geometry(). */
 346        geo->metadata_size = 10;
 347
 348        geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
 349
 350        /*
 351         * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
 352         *
 353         *    |                          P                            |
 354         *    |<----------------------------------------------------->|
 355         *    |                                                       |
 356         *    |                                        (Block Mark)   |
 357         *    |                      P'                      |      | |     |
 358         *    |<-------------------------------------------->|  D   | |  O' |
 359         *    |                                              |<---->| |<--->|
 360         *    V                                              V      V V     V
 361         *    +---+----------+-+----------+-+----------+-+----------+-+-----+
 362         *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
 363         *    +---+----------+-+----------+-+----------+-+----------+-+-----+
 364         *                                                   ^              ^
 365         *                                                   |      O       |
 366         *                                                   |<------------>|
 367         *                                                   |              |
 368         *
 369         *      P : the page size for BCH module.
 370         *      E : The ECC strength.
 371         *      G : the length of Galois Field.
 372         *      N : The chunk count of per page.
 373         *      M : the metasize of per page.
 374         *      C : the ecc chunk size, aka the "data" above.
 375         *      P': the nand chip's page size.
 376         *      O : the nand chip's oob size.
 377         *      O': the free oob.
 378         *
 379         *      The formula for P is :
 380         *
 381         *                  E * G * N
 382         *             P = ------------ + P' + M
 383         *                      8
 384         *
 385         * The position of block mark moves forward in the ECC-based view
 386         * of page, and the delta is:
 387         *
 388         *                   E * G * (N - 1)
 389         *             D = (---------------- + M)
 390         *                          8
 391         *
 392         * Please see the comment in legacy_set_geometry().
 393         * With the condition C >= O , we still can get same result.
 394         * So the bit position of the physical block mark within the ECC-based
 395         * view of the page is :
 396         *             (P' - D) * 8
 397         */
 398        geo->page_size = mtd->writesize + geo->metadata_size +
 399                (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 400
 401        geo->payload_size = mtd->writesize;
 402
 403        geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
 404        geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
 405                                + ALIGN(geo->ecc_chunk_count, 4);
 406
 407        if (!this->swap_block_mark)
 408                return 0;
 409
 410        /* For bit swap. */
 411        block_mark_bit_offset = mtd->writesize * 8 -
 412                (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 413                                + geo->metadata_size * 8);
 414
 415        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 416        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 417        return 0;
 418}
 419
 420/*
 421 *  Calculate the ECC strength by hand:
 422 *      E : The ECC strength.
 423 *      G : the length of Galois Field.
 424 *      N : The chunk count of per page.
 425 *      O : the oobsize of the NAND chip.
 426 *      M : the metasize of per page.
 427 *
 428 *      The formula is :
 429 *              E * G * N
 430 *            ------------ <= (O - M)
 431 *                  8
 432 *
 433 *      So, we get E by:
 434 *                    (O - M) * 8
 435 *              E <= -------------
 436 *                       G * N
 437 */
 438static inline int get_ecc_strength(struct gpmi_nand_data *this)
 439{
 440        struct bch_geometry *geo = &this->bch_geometry;
 441        struct mtd_info *mtd = nand_to_mtd(&this->nand);
 442        int ecc_strength;
 443
 444        ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
 445                        / (geo->gf_len * geo->ecc_chunk_count);
 446
 447        /* We need the minor even number. */
 448        return round_down(ecc_strength, 2);
 449}
 450
 451static int set_geometry_for_large_oob(struct gpmi_nand_data *this)
 452{
 453        struct bch_geometry *geo = &this->bch_geometry;
 454        struct nand_chip *chip = &this->nand;
 455        struct mtd_info *mtd = nand_to_mtd(chip);
 456        const struct nand_ecc_props *requirements =
 457                nanddev_get_ecc_requirements(&chip->base);
 458        unsigned int block_mark_bit_offset;
 459        unsigned int max_ecc;
 460        unsigned int bbm_chunk;
 461        unsigned int i;
 462
 463        /* sanity check for the minimum ecc nand required */
 464        if (!(requirements->strength > 0 &&
 465              requirements->step_size > 0))
 466                return -EINVAL;
 467        geo->ecc_strength = requirements->strength;
 468
 469        /* check if platform can support this nand */
 470        if (!gpmi_check_ecc(this)) {
 471                dev_err(this->dev,
 472                        "unsupported NAND chip, minimum ecc required %d\n",
 473                        geo->ecc_strength);
 474                return -EINVAL;
 475        }
 476
 477        /* calculate the maximum ecc platform can support*/
 478        geo->metadata_size = 10;
 479        geo->gf_len = 14;
 480        geo->ecc0_chunk_size = 1024;
 481        geo->eccn_chunk_size = 1024;
 482        geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
 483        max_ecc = min(get_ecc_strength(this),
 484                      this->devdata->bch_max_ecc_strength);
 485
 486        /*
 487         * search a supported ecc strength that makes bbm
 488         * located in data chunk
 489         */
 490        geo->ecc_strength = max_ecc;
 491        while (!(geo->ecc_strength < requirements->strength)) {
 492                if (bbm_in_data_chunk(this, &bbm_chunk))
 493                        goto geo_setting;
 494                geo->ecc_strength -= 2;
 495        }
 496
 497        /* if none of them works, keep using the minimum ecc */
 498        /* nand required but changing ecc page layout  */
 499        geo->ecc_strength = requirements->strength;
 500        /* add extra ecc for meta data */
 501        geo->ecc0_chunk_size = 0;
 502        geo->ecc_chunk_count = (mtd->writesize / geo->eccn_chunk_size) + 1;
 503        geo->ecc_for_meta = 1;
 504        /* check if oob can afford this extra ecc chunk */
 505        if (mtd->oobsize * 8 < geo->metadata_size * 8 +
 506            geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) {
 507                dev_err(this->dev, "unsupported NAND chip with new layout\n");
 508                return -EINVAL;
 509        }
 510
 511        /* calculate in which chunk bbm located */
 512        bbm_chunk = (mtd->writesize * 8 - geo->metadata_size * 8 -
 513                     geo->gf_len * geo->ecc_strength) /
 514                     (geo->gf_len * geo->ecc_strength +
 515                     geo->eccn_chunk_size * 8) + 1;
 516
 517geo_setting:
 518
 519        geo->page_size = mtd->writesize + geo->metadata_size +
 520                (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 521        geo->payload_size = mtd->writesize;
 522
 523        /*
 524         * The auxiliary buffer contains the metadata and the ECC status. The
 525         * metadata is padded to the nearest 32-bit boundary. The ECC status
 526         * contains one byte for every ECC chunk, and is also padded to the
 527         * nearest 32-bit boundary.
 528         */
 529        geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
 530        geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
 531                                    + ALIGN(geo->ecc_chunk_count, 4);
 532
 533        if (!this->swap_block_mark)
 534                return 0;
 535
 536        /* calculate the number of ecc chunk behind the bbm */
 537        i = (mtd->writesize / geo->eccn_chunk_size) - bbm_chunk + 1;
 538
 539        block_mark_bit_offset = mtd->writesize * 8 -
 540                (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
 541                + geo->metadata_size * 8);
 542
 543        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 544        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 545
 546        dev_dbg(this->dev, "BCH Geometry :\n"
 547                "GF length              : %u\n"
 548                "ECC Strength           : %u\n"
 549                "Page Size in Bytes     : %u\n"
 550                "Metadata Size in Bytes : %u\n"
 551                "ECC0 Chunk Size in Bytes: %u\n"
 552                "ECCn Chunk Size in Bytes: %u\n"
 553                "ECC Chunk Count        : %u\n"
 554                "Payload Size in Bytes  : %u\n"
 555                "Auxiliary Size in Bytes: %u\n"
 556                "Auxiliary Status Offset: %u\n"
 557                "Block Mark Byte Offset : %u\n"
 558                "Block Mark Bit Offset  : %u\n"
 559                "Block Mark in chunk    : %u\n"
 560                "Ecc for Meta data      : %u\n",
 561                geo->gf_len,
 562                geo->ecc_strength,
 563                geo->page_size,
 564                geo->metadata_size,
 565                geo->ecc0_chunk_size,
 566                geo->eccn_chunk_size,
 567                geo->ecc_chunk_count,
 568                geo->payload_size,
 569                geo->auxiliary_size,
 570                geo->auxiliary_status_offset,
 571                geo->block_mark_byte_offset,
 572                geo->block_mark_bit_offset,
 573                bbm_chunk,
 574                geo->ecc_for_meta);
 575
 576        return 0;
 577}
 578
 579static int legacy_set_geometry(struct gpmi_nand_data *this)
 580{
 581        struct bch_geometry *geo = &this->bch_geometry;
 582        struct mtd_info *mtd = nand_to_mtd(&this->nand);
 583        unsigned int metadata_size;
 584        unsigned int status_size;
 585        unsigned int block_mark_bit_offset;
 586
 587        /*
 588         * The size of the metadata can be changed, though we set it to 10
 589         * bytes now. But it can't be too large, because we have to save
 590         * enough space for BCH.
 591         */
 592        geo->metadata_size = 10;
 593
 594        /* The default for the length of Galois Field. */
 595        geo->gf_len = 13;
 596
 597        /* The default for chunk size. */
 598        geo->ecc0_chunk_size = 512;
 599        geo->eccn_chunk_size = 512;
 600        while (geo->eccn_chunk_size < mtd->oobsize) {
 601                geo->ecc0_chunk_size *= 2; /* keep C >= O */
 602                geo->eccn_chunk_size *= 2; /* keep C >= O */
 603                geo->gf_len = 14;
 604        }
 605
 606        geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
 607
 608        /* We use the same ECC strength for all chunks. */
 609        geo->ecc_strength = get_ecc_strength(this);
 610        if (!gpmi_check_ecc(this)) {
 611                dev_err(this->dev,
 612                        "ecc strength: %d cannot be supported by the controller (%d)\n"
 613                        "try to use minimum ecc strength that NAND chip required\n",
 614                        geo->ecc_strength,
 615                        this->devdata->bch_max_ecc_strength);
 616                return -EINVAL;
 617        }
 618
 619        geo->page_size = mtd->writesize + geo->metadata_size +
 620                (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 621        geo->payload_size = mtd->writesize;
 622
 623        /*
 624         * The auxiliary buffer contains the metadata and the ECC status. The
 625         * metadata is padded to the nearest 32-bit boundary. The ECC status
 626         * contains one byte for every ECC chunk, and is also padded to the
 627         * nearest 32-bit boundary.
 628         */
 629        metadata_size = ALIGN(geo->metadata_size, 4);
 630        status_size   = ALIGN(geo->ecc_chunk_count, 4);
 631
 632        geo->auxiliary_size = metadata_size + status_size;
 633        geo->auxiliary_status_offset = metadata_size;
 634
 635        if (!this->swap_block_mark)
 636                return 0;
 637
 638        /*
 639         * We need to compute the byte and bit offsets of
 640         * the physical block mark within the ECC-based view of the page.
 641         *
 642         * NAND chip with 2K page shows below:
 643         *                                             (Block Mark)
 644         *                                                   |      |
 645         *                                                   |  D   |
 646         *                                                   |<---->|
 647         *                                                   V      V
 648         *    +---+----------+-+----------+-+----------+-+----------+-+
 649         *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
 650         *    +---+----------+-+----------+-+----------+-+----------+-+
 651         *
 652         * The position of block mark moves forward in the ECC-based view
 653         * of page, and the delta is:
 654         *
 655         *                   E * G * (N - 1)
 656         *             D = (---------------- + M)
 657         *                          8
 658         *
 659         * With the formula to compute the ECC strength, and the condition
 660         *       : C >= O         (C is the ecc chunk size)
 661         *
 662         * It's easy to deduce to the following result:
 663         *
 664         *         E * G       (O - M)      C - M         C - M
 665         *      ----------- <= ------- <=  --------  <  ---------
 666         *           8            N           N          (N - 1)
 667         *
 668         *  So, we get:
 669         *
 670         *                   E * G * (N - 1)
 671         *             D = (---------------- + M) < C
 672         *                          8
 673         *
 674         *  The above inequality means the position of block mark
 675         *  within the ECC-based view of the page is still in the data chunk,
 676         *  and it's NOT in the ECC bits of the chunk.
 677         *
 678         *  Use the following to compute the bit position of the
 679         *  physical block mark within the ECC-based view of the page:
 680         *          (page_size - D) * 8
 681         *
 682         *  --Huang Shijie
 683         */
 684        block_mark_bit_offset = mtd->writesize * 8 -
 685                (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 686                                + geo->metadata_size * 8);
 687
 688        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 689        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 690        return 0;
 691}
 692
 693static int common_nfc_set_geometry(struct gpmi_nand_data *this)
 694{
 695        struct nand_chip *chip = &this->nand;
 696        struct mtd_info *mtd = nand_to_mtd(&this->nand);
 697        const struct nand_ecc_props *requirements =
 698                nanddev_get_ecc_requirements(&chip->base);
 699        bool use_minimun_ecc;
 700        int err;
 701
 702        use_minimun_ecc = of_property_read_bool(this->dev->of_node,
 703                                                "fsl,use-minimum-ecc");
 704
 705        /* use legacy bch geometry settings by default*/
 706        if ((!use_minimun_ecc && mtd->oobsize < 1024) ||
 707            !(requirements->strength > 0 && requirements->step_size > 0)) {
 708                dev_dbg(this->dev, "use legacy bch geometry\n");
 709                err = legacy_set_geometry(this);
 710                if (!err)
 711                        return 0;
 712        }
 713
 714        /* for large oob nand */
 715        if (mtd->oobsize > 1024) {
 716                dev_dbg(this->dev, "use large oob bch geometry\n");
 717                err = set_geometry_for_large_oob(this);
 718                if (!err)
 719                        return 0;
 720        }
 721
 722        /* otherwise use the minimum ecc nand chip required */
 723        dev_dbg(this->dev, "use minimum ecc bch geometry\n");
 724        err = set_geometry_by_ecc_info(this, requirements->strength,
 725                                        requirements->step_size);
 726        if (err)
 727                dev_err(this->dev, "none of the bch geometry setting works\n");
 728
 729        return err;
 730}
 731
 732/* Configures the geometry for BCH.  */
 733static int bch_set_geometry(struct gpmi_nand_data *this)
 734{
 735        struct resources *r = &this->resources;
 736        int ret;
 737
 738        ret = common_nfc_set_geometry(this);
 739        if (ret)
 740                return ret;
 741
 742        ret = pm_runtime_get_sync(this->dev);
 743        if (ret < 0) {
 744                pm_runtime_put_autosuspend(this->dev);
 745                return ret;
 746        }
 747
 748        /*
 749        * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
 750        * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
 751        * and MX28.
 752        */
 753        ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
 754        if (ret)
 755                goto err_out;
 756
 757        /* Set *all* chip selects to use layout 0. */
 758        writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
 759
 760        ret = 0;
 761err_out:
 762        pm_runtime_mark_last_busy(this->dev);
 763        pm_runtime_put_autosuspend(this->dev);
 764
 765        return ret;
 766}
 767
 768/*
 769 * <1> Firstly, we should know what's the GPMI-clock means.
 770 *     The GPMI-clock is the internal clock in the gpmi nand controller.
 771 *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
 772 *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
 773 *
 774 * <2> Secondly, we should know what's the frequency on the nand chip pins.
 775 *     The frequency on the nand chip pins is derived from the GPMI-clock.
 776 *     We can get it from the following equation:
 777 *
 778 *         F = G / (DS + DH)
 779 *
 780 *         F  : the frequency on the nand chip pins.
 781 *         G  : the GPMI clock, such as 100MHz.
 782 *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
 783 *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
 784 *
 785 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
 786 *     the nand EDO(extended Data Out) timing could be applied.
 787 *     The GPMI implements a feedback read strobe to sample the read data.
 788 *     The feedback read strobe can be delayed to support the nand EDO timing
 789 *     where the read strobe may deasserts before the read data is valid, and
 790 *     read data is valid for some time after read strobe.
 791 *
 792 *     The following figure illustrates some aspects of a NAND Flash read:
 793 *
 794 *                   |<---tREA---->|
 795 *                   |             |
 796 *                   |         |   |
 797 *                   |<--tRP-->|   |
 798 *                   |         |   |
 799 *                  __          ___|__________________________________
 800 *     RDN            \________/   |
 801 *                                 |
 802 *                                 /---------\
 803 *     Read Data    --------------<           >---------
 804 *                                 \---------/
 805 *                                |     |
 806 *                                |<-D->|
 807 *     FeedbackRDN  ________             ____________
 808 *                          \___________/
 809 *
 810 *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
 811 *
 812 *
 813 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
 814 *
 815 *  4.1) From the aspect of the nand chip pins:
 816 *        Delay = (tREA + C - tRP)               {1}
 817 *
 818 *        tREA : the maximum read access time.
 819 *        C    : a constant to adjust the delay. default is 4000ps.
 820 *        tRP  : the read pulse width, which is exactly:
 821 *                   tRP = (GPMI-clock-period) * DATA_SETUP
 822 *
 823 *  4.2) From the aspect of the GPMI nand controller:
 824 *         Delay = RDN_DELAY * 0.125 * RP        {2}
 825 *
 826 *         RP   : the DLL reference period.
 827 *            if (GPMI-clock-period > DLL_THRETHOLD)
 828 *                   RP = GPMI-clock-period / 2;
 829 *            else
 830 *                   RP = GPMI-clock-period;
 831 *
 832 *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
 833 *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
 834 *            is 16000ps, but in mx6q, we use 12000ps.
 835 *
 836 *  4.3) since {1} equals {2}, we get:
 837 *
 838 *                     (tREA + 4000 - tRP) * 8
 839 *         RDN_DELAY = -----------------------     {3}
 840 *                           RP
 841 */
 842static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
 843                                    const struct nand_sdr_timings *sdr)
 844{
 845        struct gpmi_nfc_hardware_timing *hw = &this->hw;
 846        struct resources *r = &this->resources;
 847        unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
 848        unsigned int period_ps, reference_period_ps;
 849        unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
 850        unsigned int tRP_ps;
 851        bool use_half_period;
 852        int sample_delay_ps, sample_delay_factor;
 853        unsigned int busy_timeout_cycles;
 854        u8 wrn_dly_sel;
 855        unsigned long clk_rate, min_rate;
 856        u64 busy_timeout_ps;
 857
 858        if (sdr->tRC_min >= 30000) {
 859                /* ONFI non-EDO modes [0-3] */
 860                hw->clk_rate = 22000000;
 861                min_rate = 0;
 862                wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
 863        } else if (sdr->tRC_min >= 25000) {
 864                /* ONFI EDO mode 4 */
 865                hw->clk_rate = 80000000;
 866                min_rate = 22000000;
 867                wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
 868        } else {
 869                /* ONFI EDO mode 5 */
 870                hw->clk_rate = 100000000;
 871                min_rate = 80000000;
 872                wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
 873        }
 874
 875        clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
 876        if (clk_rate <= min_rate) {
 877                dev_err(this->dev, "clock setting: expected %ld, got %ld\n",
 878                        hw->clk_rate, clk_rate);
 879                return -ENOTSUPP;
 880        }
 881
 882        hw->clk_rate = clk_rate;
 883        /* SDR core timings are given in picoseconds */
 884        period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
 885
 886        addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
 887        data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
 888        data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
 889        busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
 890        busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
 891
 892        hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
 893                      BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
 894                      BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
 895        hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
 896
 897        /*
 898         * Derive NFC ideal delay from {3}:
 899         *
 900         *                     (tREA + 4000 - tRP) * 8
 901         *         RDN_DELAY = -----------------------
 902         *                                RP
 903         */
 904        if (period_ps > dll_threshold_ps) {
 905                use_half_period = true;
 906                reference_period_ps = period_ps / 2;
 907        } else {
 908                use_half_period = false;
 909                reference_period_ps = period_ps;
 910        }
 911
 912        tRP_ps = data_setup_cycles * period_ps;
 913        sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
 914        if (sample_delay_ps > 0)
 915                sample_delay_factor = sample_delay_ps / reference_period_ps;
 916        else
 917                sample_delay_factor = 0;
 918
 919        hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
 920        if (sample_delay_factor)
 921                hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
 922                              BM_GPMI_CTRL1_DLL_ENABLE |
 923                              (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
 924        return 0;
 925}
 926
 927static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
 928{
 929        struct gpmi_nfc_hardware_timing *hw = &this->hw;
 930        struct resources *r = &this->resources;
 931        void __iomem *gpmi_regs = r->gpmi_regs;
 932        unsigned int dll_wait_time_us;
 933        int ret;
 934
 935        /* Clock dividers do NOT guarantee a clean clock signal on its output
 936         * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
 937         * all clock dividers provide these guarantee.
 938         */
 939        if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
 940                clk_disable_unprepare(r->clock[0]);
 941
 942        ret = clk_set_rate(r->clock[0], hw->clk_rate);
 943        if (ret) {
 944                dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
 945                return ret;
 946        }
 947
 948        if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
 949                ret = clk_prepare_enable(r->clock[0]);
 950                if (ret)
 951                        return ret;
 952        }
 953
 954        writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
 955        writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
 956
 957        /*
 958         * Clear several CTRL1 fields, DLL must be disabled when setting
 959         * RDN_DELAY or HALF_PERIOD.
 960         */
 961        writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
 962        writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
 963
 964        /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
 965        dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
 966        if (!dll_wait_time_us)
 967                dll_wait_time_us = 1;
 968
 969        /* Wait for the DLL to settle. */
 970        udelay(dll_wait_time_us);
 971
 972        return 0;
 973}
 974
 975static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
 976                                const struct nand_interface_config *conf)
 977{
 978        struct gpmi_nand_data *this = nand_get_controller_data(chip);
 979        const struct nand_sdr_timings *sdr;
 980        int ret;
 981
 982        /* Retrieve required NAND timings */
 983        sdr = nand_get_sdr_timings(conf);
 984        if (IS_ERR(sdr))
 985                return PTR_ERR(sdr);
 986
 987        /* Only MX28/MX6 GPMI controller can reach EDO timings */
 988        if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
 989                return -ENOTSUPP;
 990
 991        /* Stop here if this call was just a check */
 992        if (chipnr < 0)
 993                return 0;
 994
 995        /* Do the actual derivation of the controller timings */
 996        ret = gpmi_nfc_compute_timings(this, sdr);
 997        if (ret)
 998                return ret;
 999
1000        this->hw.must_apply_timings = true;
1001
1002        return 0;
1003}
1004
1005/* Clears a BCH interrupt. */
1006static void gpmi_clear_bch(struct gpmi_nand_data *this)
1007{
1008        struct resources *r = &this->resources;
1009        writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
1010}
1011
1012static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
1013{
1014        /* We use the DMA channel 0 to access all the nand chips. */
1015        return this->dma_chans[0];
1016}
1017
1018/* This will be called after the DMA operation is finished. */
1019static void dma_irq_callback(void *param)
1020{
1021        struct gpmi_nand_data *this = param;
1022        struct completion *dma_c = &this->dma_done;
1023
1024        complete(dma_c);
1025}
1026
1027static irqreturn_t bch_irq(int irq, void *cookie)
1028{
1029        struct gpmi_nand_data *this = cookie;
1030
1031        gpmi_clear_bch(this);
1032        complete(&this->bch_done);
1033        return IRQ_HANDLED;
1034}
1035
1036static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
1037{
1038        /*
1039         * raw_len is the length to read/write including bch data which
1040         * we are passed in exec_op. Calculate the data length from it.
1041         */
1042        if (this->bch)
1043                return ALIGN_DOWN(raw_len, this->bch_geometry.eccn_chunk_size);
1044        else
1045                return raw_len;
1046}
1047
1048/* Can we use the upper's buffer directly for DMA? */
1049static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
1050                             int raw_len, struct scatterlist *sgl,
1051                             enum dma_data_direction dr)
1052{
1053        int ret;
1054        int len = gpmi_raw_len_to_len(this, raw_len);
1055
1056        /* first try to map the upper buffer directly */
1057        if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
1058                sg_init_one(sgl, buf, len);
1059                ret = dma_map_sg(this->dev, sgl, 1, dr);
1060                if (ret == 0)
1061                        goto map_fail;
1062
1063                return true;
1064        }
1065
1066map_fail:
1067        /* We have to use our own DMA buffer. */
1068        sg_init_one(sgl, this->data_buffer_dma, len);
1069
1070        if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
1071                memcpy(this->data_buffer_dma, buf, len);
1072
1073        dma_map_sg(this->dev, sgl, 1, dr);
1074
1075        return false;
1076}
1077
1078/* add our owner bbt descriptor */
1079static uint8_t scan_ff_pattern[] = { 0xff };
1080static struct nand_bbt_descr gpmi_bbt_descr = {
1081        .options        = 0,
1082        .offs           = 0,
1083        .len            = 1,
1084        .pattern        = scan_ff_pattern
1085};
1086
1087/*
1088 * We may change the layout if we can get the ECC info from the datasheet,
1089 * else we will use all the (page + OOB).
1090 */
1091static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
1092                              struct mtd_oob_region *oobregion)
1093{
1094        struct nand_chip *chip = mtd_to_nand(mtd);
1095        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1096        struct bch_geometry *geo = &this->bch_geometry;
1097
1098        if (section)
1099                return -ERANGE;
1100
1101        oobregion->offset = 0;
1102        oobregion->length = geo->page_size - mtd->writesize;
1103
1104        return 0;
1105}
1106
1107static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
1108                               struct mtd_oob_region *oobregion)
1109{
1110        struct nand_chip *chip = mtd_to_nand(mtd);
1111        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1112        struct bch_geometry *geo = &this->bch_geometry;
1113
1114        if (section)
1115                return -ERANGE;
1116
1117        /* The available oob size we have. */
1118        if (geo->page_size < mtd->writesize + mtd->oobsize) {
1119                oobregion->offset = geo->page_size - mtd->writesize;
1120                oobregion->length = mtd->oobsize - oobregion->offset;
1121        }
1122
1123        return 0;
1124}
1125
1126static const char * const gpmi_clks_for_mx2x[] = {
1127        "gpmi_io",
1128};
1129
1130static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1131        .ecc = gpmi_ooblayout_ecc,
1132        .free = gpmi_ooblayout_free,
1133};
1134
1135static const struct gpmi_devdata gpmi_devdata_imx23 = {
1136        .type = IS_MX23,
1137        .bch_max_ecc_strength = 20,
1138        .max_chain_delay = 16000,
1139        .clks = gpmi_clks_for_mx2x,
1140        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1141};
1142
1143static const struct gpmi_devdata gpmi_devdata_imx28 = {
1144        .type = IS_MX28,
1145        .bch_max_ecc_strength = 20,
1146        .max_chain_delay = 16000,
1147        .clks = gpmi_clks_for_mx2x,
1148        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1149};
1150
1151static const char * const gpmi_clks_for_mx6[] = {
1152        "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1153};
1154
1155static const struct gpmi_devdata gpmi_devdata_imx6q = {
1156        .type = IS_MX6Q,
1157        .bch_max_ecc_strength = 40,
1158        .max_chain_delay = 12000,
1159        .clks = gpmi_clks_for_mx6,
1160        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1161};
1162
1163static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1164        .type = IS_MX6SX,
1165        .bch_max_ecc_strength = 62,
1166        .max_chain_delay = 12000,
1167        .clks = gpmi_clks_for_mx6,
1168        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1169};
1170
1171static const char * const gpmi_clks_for_mx7d[] = {
1172        "gpmi_io", "gpmi_bch_apb",
1173};
1174
1175static const struct gpmi_devdata gpmi_devdata_imx7d = {
1176        .type = IS_MX7D,
1177        .bch_max_ecc_strength = 62,
1178        .max_chain_delay = 12000,
1179        .clks = gpmi_clks_for_mx7d,
1180        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1181};
1182
1183static int acquire_register_block(struct gpmi_nand_data *this,
1184                                  const char *res_name)
1185{
1186        struct platform_device *pdev = this->pdev;
1187        struct resources *res = &this->resources;
1188        void __iomem *p;
1189
1190        p = devm_platform_ioremap_resource_byname(pdev, res_name);
1191        if (IS_ERR(p))
1192                return PTR_ERR(p);
1193
1194        if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
1195                res->gpmi_regs = p;
1196        else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
1197                res->bch_regs = p;
1198        else
1199                dev_err(this->dev, "unknown resource name : %s\n", res_name);
1200
1201        return 0;
1202}
1203
1204static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
1205{
1206        struct platform_device *pdev = this->pdev;
1207        const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
1208        int err;
1209
1210        err = platform_get_irq_byname(pdev, res_name);
1211        if (err < 0)
1212                return err;
1213
1214        err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
1215        if (err)
1216                dev_err(this->dev, "error requesting BCH IRQ\n");
1217
1218        return err;
1219}
1220
1221static void release_dma_channels(struct gpmi_nand_data *this)
1222{
1223        unsigned int i;
1224        for (i = 0; i < DMA_CHANS; i++)
1225                if (this->dma_chans[i]) {
1226                        dma_release_channel(this->dma_chans[i]);
1227                        this->dma_chans[i] = NULL;
1228                }
1229}
1230
1231static int acquire_dma_channels(struct gpmi_nand_data *this)
1232{
1233        struct platform_device *pdev = this->pdev;
1234        struct dma_chan *dma_chan;
1235        int ret = 0;
1236
1237        /* request dma channel */
1238        dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
1239        if (IS_ERR(dma_chan)) {
1240                ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
1241                                    "DMA channel request failed\n");
1242                release_dma_channels(this);
1243        } else {
1244                this->dma_chans[0] = dma_chan;
1245        }
1246
1247        return ret;
1248}
1249
1250static int gpmi_get_clks(struct gpmi_nand_data *this)
1251{
1252        struct resources *r = &this->resources;
1253        struct clk *clk;
1254        int err, i;
1255
1256        for (i = 0; i < this->devdata->clks_count; i++) {
1257                clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1258                if (IS_ERR(clk)) {
1259                        err = PTR_ERR(clk);
1260                        goto err_clock;
1261                }
1262
1263                r->clock[i] = clk;
1264        }
1265
1266        return 0;
1267
1268err_clock:
1269        dev_dbg(this->dev, "failed in finding the clocks.\n");
1270        return err;
1271}
1272
1273static int acquire_resources(struct gpmi_nand_data *this)
1274{
1275        int ret;
1276
1277        ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1278        if (ret)
1279                goto exit_regs;
1280
1281        ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1282        if (ret)
1283                goto exit_regs;
1284
1285        ret = acquire_bch_irq(this, bch_irq);
1286        if (ret)
1287                goto exit_regs;
1288
1289        ret = acquire_dma_channels(this);
1290        if (ret)
1291                goto exit_regs;
1292
1293        ret = gpmi_get_clks(this);
1294        if (ret)
1295                goto exit_clock;
1296        return 0;
1297
1298exit_clock:
1299        release_dma_channels(this);
1300exit_regs:
1301        return ret;
1302}
1303
1304static void release_resources(struct gpmi_nand_data *this)
1305{
1306        release_dma_channels(this);
1307}
1308
1309static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1310{
1311        struct device *dev = this->dev;
1312        struct bch_geometry *geo = &this->bch_geometry;
1313
1314        if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1315                dma_free_coherent(dev, geo->auxiliary_size,
1316                                        this->auxiliary_virt,
1317                                        this->auxiliary_phys);
1318        kfree(this->data_buffer_dma);
1319        kfree(this->raw_buffer);
1320
1321        this->data_buffer_dma   = NULL;
1322        this->raw_buffer        = NULL;
1323}
1324
1325/* Allocate the DMA buffers */
1326static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1327{
1328        struct bch_geometry *geo = &this->bch_geometry;
1329        struct device *dev = this->dev;
1330        struct mtd_info *mtd = nand_to_mtd(&this->nand);
1331
1332        /*
1333         * [2] Allocate a read/write data buffer.
1334         *     The gpmi_alloc_dma_buffer can be called twice.
1335         *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
1336         *     is called before the NAND identification; and we allocate a
1337         *     buffer of the real NAND page size when the gpmi_alloc_dma_buffer
1338         *     is called after.
1339         */
1340        this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1341                                        GFP_DMA | GFP_KERNEL);
1342        if (this->data_buffer_dma == NULL)
1343                goto error_alloc;
1344
1345        this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1346                                        &this->auxiliary_phys, GFP_DMA);
1347        if (!this->auxiliary_virt)
1348                goto error_alloc;
1349
1350        this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1351        if (!this->raw_buffer)
1352                goto error_alloc;
1353
1354        return 0;
1355
1356error_alloc:
1357        gpmi_free_dma_buffer(this);
1358        return -ENOMEM;
1359}
1360
1361/*
1362 * Handles block mark swapping.
1363 * It can be called in swapping the block mark, or swapping it back,
1364 * because the the operations are the same.
1365 */
1366static void block_mark_swapping(struct gpmi_nand_data *this,
1367                                void *payload, void *auxiliary)
1368{
1369        struct bch_geometry *nfc_geo = &this->bch_geometry;
1370        unsigned char *p;
1371        unsigned char *a;
1372        unsigned int  bit;
1373        unsigned char mask;
1374        unsigned char from_data;
1375        unsigned char from_oob;
1376
1377        if (!this->swap_block_mark)
1378                return;
1379
1380        /*
1381         * If control arrives here, we're swapping. Make some convenience
1382         * variables.
1383         */
1384        bit = nfc_geo->block_mark_bit_offset;
1385        p   = payload + nfc_geo->block_mark_byte_offset;
1386        a   = auxiliary;
1387
1388        /*
1389         * Get the byte from the data area that overlays the block mark. Since
1390         * the ECC engine applies its own view to the bits in the page, the
1391         * physical block mark won't (in general) appear on a byte boundary in
1392         * the data.
1393         */
1394        from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1395
1396        /* Get the byte from the OOB. */
1397        from_oob = a[0];
1398
1399        /* Swap them. */
1400        a[0] = from_data;
1401
1402        mask = (0x1 << bit) - 1;
1403        p[0] = (p[0] & mask) | (from_oob << bit);
1404
1405        mask = ~0 << bit;
1406        p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1407}
1408
1409static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1410                               int last, int meta)
1411{
1412        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1413        struct bch_geometry *nfc_geo = &this->bch_geometry;
1414        struct mtd_info *mtd = nand_to_mtd(chip);
1415        int i;
1416        unsigned char *status;
1417        unsigned int max_bitflips = 0;
1418
1419        /* Loop over status bytes, accumulating ECC status. */
1420        status = this->auxiliary_virt + ALIGN(meta, 4);
1421
1422        for (i = first; i < last; i++, status++) {
1423                if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1424                        continue;
1425
1426                if (*status == STATUS_UNCORRECTABLE) {
1427                        int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1428                        u8 *eccbuf = this->raw_buffer;
1429                        int offset, bitoffset;
1430                        int eccbytes;
1431                        int flips;
1432
1433                        /* Read ECC bytes into our internal raw_buffer */
1434                        offset = nfc_geo->metadata_size * 8;
1435                        offset += ((8 * nfc_geo->eccn_chunk_size) + eccbits) * (i + 1);
1436                        offset -= eccbits;
1437                        bitoffset = offset % 8;
1438                        eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1439                        offset /= 8;
1440                        eccbytes -= offset;
1441                        nand_change_read_column_op(chip, offset, eccbuf,
1442                                                   eccbytes, false);
1443
1444                        /*
1445                         * ECC data are not byte aligned and we may have
1446                         * in-band data in the first and last byte of
1447                         * eccbuf. Set non-eccbits to one so that
1448                         * nand_check_erased_ecc_chunk() does not count them
1449                         * as bitflips.
1450                         */
1451                        if (bitoffset)
1452                                eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1453
1454                        bitoffset = (bitoffset + eccbits) % 8;
1455                        if (bitoffset)
1456                                eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1457
1458                        /*
1459                         * The ECC hardware has an uncorrectable ECC status
1460                         * code in case we have bitflips in an erased page. As
1461                         * nothing was written into this subpage the ECC is
1462                         * obviously wrong and we can not trust it. We assume
1463                         * at this point that we are reading an erased page and
1464                         * try to correct the bitflips in buffer up to
1465                         * ecc_strength bitflips. If this is a page with random
1466                         * data, we exceed this number of bitflips and have a
1467                         * ECC failure. Otherwise we use the corrected buffer.
1468                         */
1469                        if (i == 0) {
1470                                /* The first block includes metadata */
1471                                flips = nand_check_erased_ecc_chunk(
1472                                                buf + i * nfc_geo->eccn_chunk_size,
1473                                                nfc_geo->eccn_chunk_size,
1474                                                eccbuf, eccbytes,
1475                                                this->auxiliary_virt,
1476                                                nfc_geo->metadata_size,
1477                                                nfc_geo->ecc_strength);
1478                        } else {
1479                                flips = nand_check_erased_ecc_chunk(
1480                                                buf + i * nfc_geo->eccn_chunk_size,
1481                                                nfc_geo->eccn_chunk_size,
1482                                                eccbuf, eccbytes,
1483                                                NULL, 0,
1484                                                nfc_geo->ecc_strength);
1485                        }
1486
1487                        if (flips > 0) {
1488                                max_bitflips = max_t(unsigned int, max_bitflips,
1489                                                     flips);
1490                                mtd->ecc_stats.corrected += flips;
1491                                continue;
1492                        }
1493
1494                        mtd->ecc_stats.failed++;
1495                        continue;
1496                }
1497
1498                mtd->ecc_stats.corrected += *status;
1499                max_bitflips = max_t(unsigned int, max_bitflips, *status);
1500        }
1501
1502        return max_bitflips;
1503}
1504
1505static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1506{
1507        struct bch_geometry *geo = &this->bch_geometry;
1508        unsigned int ecc_strength = geo->ecc_strength >> 1;
1509        unsigned int gf_len = geo->gf_len;
1510        unsigned int block0_size = geo->ecc0_chunk_size;
1511        unsigned int blockn_size = geo->eccn_chunk_size;
1512
1513        this->bch_flashlayout0 =
1514                BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1515                BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1516                BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1517                BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1518                BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size, this);
1519
1520        this->bch_flashlayout1 =
1521                BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1522                BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1523                BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1524                BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size, this);
1525}
1526
1527static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1528                              int oob_required, int page)
1529{
1530        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1531        struct mtd_info *mtd = nand_to_mtd(chip);
1532        struct bch_geometry *geo = &this->bch_geometry;
1533        unsigned int max_bitflips;
1534        int ret;
1535
1536        gpmi_bch_layout_std(this);
1537        this->bch = true;
1538
1539        ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1540        if (ret)
1541                return ret;
1542
1543        max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1544                                           geo->ecc_chunk_count,
1545                                           geo->auxiliary_status_offset);
1546
1547        /* handle the block mark swapping */
1548        block_mark_swapping(this, buf, this->auxiliary_virt);
1549
1550        if (oob_required) {
1551                /*
1552                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1553                 * for details about our policy for delivering the OOB.
1554                 *
1555                 * We fill the caller's buffer with set bits, and then copy the
1556                 * block mark to th caller's buffer. Note that, if block mark
1557                 * swapping was necessary, it has already been done, so we can
1558                 * rely on the first byte of the auxiliary buffer to contain
1559                 * the block mark.
1560                 */
1561                memset(chip->oob_poi, ~0, mtd->oobsize);
1562                chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1563        }
1564
1565        return max_bitflips;
1566}
1567
1568/* Fake a virtual small page for the subpage read */
1569static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1570                                 uint32_t len, uint8_t *buf, int page)
1571{
1572        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1573        struct bch_geometry *geo = &this->bch_geometry;
1574        int size = chip->ecc.size; /* ECC chunk size */
1575        int meta, n, page_size;
1576        unsigned int max_bitflips;
1577        unsigned int ecc_strength;
1578        int first, last, marker_pos;
1579        int ecc_parity_size;
1580        int col = 0;
1581        int ret;
1582
1583        /* The size of ECC parity */
1584        ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1585
1586        /* Align it with the chunk size */
1587        first = offs / size;
1588        last = (offs + len - 1) / size;
1589
1590        if (this->swap_block_mark) {
1591                /*
1592                 * Find the chunk which contains the Block Marker.
1593                 * If this chunk is in the range of [first, last],
1594                 * we have to read out the whole page.
1595                 * Why? since we had swapped the data at the position of Block
1596                 * Marker to the metadata which is bound with the chunk 0.
1597                 */
1598                marker_pos = geo->block_mark_byte_offset / size;
1599                if (last >= marker_pos && first <= marker_pos) {
1600                        dev_dbg(this->dev,
1601                                "page:%d, first:%d, last:%d, marker at:%d\n",
1602                                page, first, last, marker_pos);
1603                        return gpmi_ecc_read_page(chip, buf, 0, page);
1604                }
1605        }
1606
1607        /*
1608         * if there is an ECC dedicate for meta:
1609         * - need to add an extra ECC size when calculating col and page_size,
1610         *   if the meta size is NOT zero.
1611         * - ecc0_chunk size need to set to the same size as other chunks,
1612         *   if the meta size is zero.
1613         */
1614
1615        meta = geo->metadata_size;
1616        if (first) {
1617                if (geo->ecc_for_meta)
1618                        col = meta + ecc_parity_size
1619                                + (size + ecc_parity_size) * first;
1620                else
1621                        col = meta + (size + ecc_parity_size) * first;
1622
1623                meta = 0;
1624                buf = buf + first * size;
1625        }
1626
1627        ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1628        n = last - first + 1;
1629
1630        if (geo->ecc_for_meta && meta)
1631                page_size = meta + ecc_parity_size
1632                            + (size + ecc_parity_size) * n;
1633        else
1634                page_size = meta + (size + ecc_parity_size) * n;
1635
1636        ecc_strength = geo->ecc_strength >> 1;
1637
1638        this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(
1639                (geo->ecc_for_meta ? n : n - 1)) |
1640                BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1641                BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1642                BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1643                BF_BCH_FLASH0LAYOUT0_DATA0_SIZE((geo->ecc_for_meta ?
1644                0 : geo->ecc0_chunk_size), this);
1645
1646        this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1647                BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1648                BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1649                BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->eccn_chunk_size, this);
1650
1651        this->bch = true;
1652
1653        ret = nand_read_page_op(chip, page, col, buf, page_size);
1654        if (ret)
1655                return ret;
1656
1657        dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1658                page, offs, len, col, first, n, page_size);
1659
1660        max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1661
1662        return max_bitflips;
1663}
1664
1665static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1666                               int oob_required, int page)
1667{
1668        struct mtd_info *mtd = nand_to_mtd(chip);
1669        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1670        struct bch_geometry *nfc_geo = &this->bch_geometry;
1671
1672        dev_dbg(this->dev, "ecc write page.\n");
1673
1674        gpmi_bch_layout_std(this);
1675        this->bch = true;
1676
1677        memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1678
1679        if (this->swap_block_mark) {
1680                /*
1681                 * When doing bad block marker swapping we must always copy the
1682                 * input buffer as we can't modify the const buffer.
1683                 */
1684                memcpy(this->data_buffer_dma, buf, mtd->writesize);
1685                buf = this->data_buffer_dma;
1686                block_mark_swapping(this, this->data_buffer_dma,
1687                                    this->auxiliary_virt);
1688        }
1689
1690        return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1691}
1692
1693/*
1694 * There are several places in this driver where we have to handle the OOB and
1695 * block marks. This is the function where things are the most complicated, so
1696 * this is where we try to explain it all. All the other places refer back to
1697 * here.
1698 *
1699 * These are the rules, in order of decreasing importance:
1700 *
1701 * 1) Nothing the caller does can be allowed to imperil the block mark.
1702 *
1703 * 2) In read operations, the first byte of the OOB we return must reflect the
1704 *    true state of the block mark, no matter where that block mark appears in
1705 *    the physical page.
1706 *
1707 * 3) ECC-based read operations return an OOB full of set bits (since we never
1708 *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1709 *    return).
1710 *
1711 * 4) "Raw" read operations return a direct view of the physical bytes in the
1712 *    page, using the conventional definition of which bytes are data and which
1713 *    are OOB. This gives the caller a way to see the actual, physical bytes
1714 *    in the page, without the distortions applied by our ECC engine.
1715 *
1716 *
1717 * What we do for this specific read operation depends on two questions:
1718 *
1719 * 1) Are we doing a "raw" read, or an ECC-based read?
1720 *
1721 * 2) Are we using block mark swapping or transcription?
1722 *
1723 * There are four cases, illustrated by the following Karnaugh map:
1724 *
1725 *                    |           Raw           |         ECC-based       |
1726 *       -------------+-------------------------+-------------------------+
1727 *                    | Read the conventional   |                         |
1728 *                    | OOB at the end of the   |                         |
1729 *       Swapping     | page and return it. It  |                         |
1730 *                    | contains exactly what   |                         |
1731 *                    | we want.                | Read the block mark and |
1732 *       -------------+-------------------------+ return it in a buffer   |
1733 *                    | Read the conventional   | full of set bits.       |
1734 *                    | OOB at the end of the   |                         |
1735 *                    | page and also the block |                         |
1736 *       Transcribing | mark in the metadata.   |                         |
1737 *                    | Copy the block mark     |                         |
1738 *                    | into the first byte of  |                         |
1739 *                    | the OOB.                |                         |
1740 *       -------------+-------------------------+-------------------------+
1741 *
1742 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1743 * giving an accurate view of the actual, physical bytes in the page (we're
1744 * overwriting the block mark). That's OK because it's more important to follow
1745 * rule #2.
1746 *
1747 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1748 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1749 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1750 * ECC-based or raw view of the page is implicit in which function it calls
1751 * (there is a similar pair of ECC-based/raw functions for writing).
1752 */
1753static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1754{
1755        struct mtd_info *mtd = nand_to_mtd(chip);
1756        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1757        int ret;
1758
1759        /* clear the OOB buffer */
1760        memset(chip->oob_poi, ~0, mtd->oobsize);
1761
1762        /* Read out the conventional OOB. */
1763        ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1764                                mtd->oobsize);
1765        if (ret)
1766                return ret;
1767
1768        /*
1769         * Now, we want to make sure the block mark is correct. In the
1770         * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1771         * Otherwise, we need to explicitly read it.
1772         */
1773        if (GPMI_IS_MX23(this)) {
1774                /* Read the block mark into the first byte of the OOB buffer. */
1775                ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1776                if (ret)
1777                        return ret;
1778        }
1779
1780        return 0;
1781}
1782
1783static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1784{
1785        struct mtd_info *mtd = nand_to_mtd(chip);
1786        struct mtd_oob_region of = { };
1787
1788        /* Do we have available oob area? */
1789        mtd_ooblayout_free(mtd, 0, &of);
1790        if (!of.length)
1791                return -EPERM;
1792
1793        if (!nand_is_slc(chip))
1794                return -EPERM;
1795
1796        return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1797                                 chip->oob_poi + of.offset, of.length);
1798}
1799
1800/*
1801 * This function reads a NAND page without involving the ECC engine (no HW
1802 * ECC correction).
1803 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1804 * inline (interleaved with payload DATA), and do not align data chunk on
1805 * byte boundaries.
1806 * We thus need to take care moving the payload data and ECC bits stored in the
1807 * page into the provided buffers, which is why we're using nand_extract_bits().
1808 *
1809 * See set_geometry_by_ecc_info inline comments to have a full description
1810 * of the layout used by the GPMI controller.
1811 */
1812static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1813                                  int oob_required, int page)
1814{
1815        struct mtd_info *mtd = nand_to_mtd(chip);
1816        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1817        struct bch_geometry *nfc_geo = &this->bch_geometry;
1818        int eccsize = nfc_geo->eccn_chunk_size;
1819        int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1820        u8 *tmp_buf = this->raw_buffer;
1821        size_t src_bit_off;
1822        size_t oob_bit_off;
1823        size_t oob_byte_off;
1824        uint8_t *oob = chip->oob_poi;
1825        int step;
1826        int ret;
1827
1828        ret = nand_read_page_op(chip, page, 0, tmp_buf,
1829                                mtd->writesize + mtd->oobsize);
1830        if (ret)
1831                return ret;
1832
1833        /*
1834         * If required, swap the bad block marker and the data stored in the
1835         * metadata section, so that we don't wrongly consider a block as bad.
1836         *
1837         * See the layout description for a detailed explanation on why this
1838         * is needed.
1839         */
1840        if (this->swap_block_mark)
1841                swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1842
1843        /*
1844         * Copy the metadata section into the oob buffer (this section is
1845         * guaranteed to be aligned on a byte boundary).
1846         */
1847        if (oob_required)
1848                memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1849
1850        oob_bit_off = nfc_geo->metadata_size * 8;
1851        src_bit_off = oob_bit_off;
1852
1853        /* Extract interleaved payload data and ECC bits */
1854        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1855                if (buf)
1856                        nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
1857                                          src_bit_off, eccsize * 8);
1858                src_bit_off += eccsize * 8;
1859
1860                /* Align last ECC block to align a byte boundary */
1861                if (step == nfc_geo->ecc_chunk_count - 1 &&
1862                    (oob_bit_off + eccbits) % 8)
1863                        eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1864
1865                if (oob_required)
1866                        nand_extract_bits(oob, oob_bit_off, tmp_buf,
1867                                          src_bit_off, eccbits);
1868
1869                src_bit_off += eccbits;
1870                oob_bit_off += eccbits;
1871        }
1872
1873        if (oob_required) {
1874                oob_byte_off = oob_bit_off / 8;
1875
1876                if (oob_byte_off < mtd->oobsize)
1877                        memcpy(oob + oob_byte_off,
1878                               tmp_buf + mtd->writesize + oob_byte_off,
1879                               mtd->oobsize - oob_byte_off);
1880        }
1881
1882        return 0;
1883}
1884
1885/*
1886 * This function writes a NAND page without involving the ECC engine (no HW
1887 * ECC generation).
1888 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1889 * inline (interleaved with payload DATA), and do not align data chunk on
1890 * byte boundaries.
1891 * We thus need to take care moving the OOB area at the right place in the
1892 * final page, which is why we're using nand_extract_bits().
1893 *
1894 * See set_geometry_by_ecc_info inline comments to have a full description
1895 * of the layout used by the GPMI controller.
1896 */
1897static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1898                                   int oob_required, int page)
1899{
1900        struct mtd_info *mtd = nand_to_mtd(chip);
1901        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1902        struct bch_geometry *nfc_geo = &this->bch_geometry;
1903        int eccsize = nfc_geo->eccn_chunk_size;
1904        int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1905        u8 *tmp_buf = this->raw_buffer;
1906        uint8_t *oob = chip->oob_poi;
1907        size_t dst_bit_off;
1908        size_t oob_bit_off;
1909        size_t oob_byte_off;
1910        int step;
1911
1912        /*
1913         * Initialize all bits to 1 in case we don't have a buffer for the
1914         * payload or oob data in order to leave unspecified bits of data
1915         * to their initial state.
1916         */
1917        if (!buf || !oob_required)
1918                memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1919
1920        /*
1921         * First copy the metadata section (stored in oob buffer) at the
1922         * beginning of the page, as imposed by the GPMI layout.
1923         */
1924        memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1925        oob_bit_off = nfc_geo->metadata_size * 8;
1926        dst_bit_off = oob_bit_off;
1927
1928        /* Interleave payload data and ECC bits */
1929        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1930                if (buf)
1931                        nand_extract_bits(tmp_buf, dst_bit_off, buf,
1932                                          step * eccsize * 8, eccsize * 8);
1933                dst_bit_off += eccsize * 8;
1934
1935                /* Align last ECC block to align a byte boundary */
1936                if (step == nfc_geo->ecc_chunk_count - 1 &&
1937                    (oob_bit_off + eccbits) % 8)
1938                        eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1939
1940                if (oob_required)
1941                        nand_extract_bits(tmp_buf, dst_bit_off, oob,
1942                                          oob_bit_off, eccbits);
1943
1944                dst_bit_off += eccbits;
1945                oob_bit_off += eccbits;
1946        }
1947
1948        oob_byte_off = oob_bit_off / 8;
1949
1950        if (oob_required && oob_byte_off < mtd->oobsize)
1951                memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1952                       oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1953
1954        /*
1955         * If required, swap the bad block marker and the first byte of the
1956         * metadata section, so that we don't modify the bad block marker.
1957         *
1958         * See the layout description for a detailed explanation on why this
1959         * is needed.
1960         */
1961        if (this->swap_block_mark)
1962                swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1963
1964        return nand_prog_page_op(chip, page, 0, tmp_buf,
1965                                 mtd->writesize + mtd->oobsize);
1966}
1967
1968static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1969{
1970        return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1971}
1972
1973static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1974{
1975        return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1976}
1977
1978static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1979{
1980        struct mtd_info *mtd = nand_to_mtd(chip);
1981        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1982        int ret = 0;
1983        uint8_t *block_mark;
1984        int column, page, chipnr;
1985
1986        chipnr = (int)(ofs >> chip->chip_shift);
1987        nand_select_target(chip, chipnr);
1988
1989        column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1990
1991        /* Write the block mark. */
1992        block_mark = this->data_buffer_dma;
1993        block_mark[0] = 0; /* bad block marker */
1994
1995        /* Shift to get page */
1996        page = (int)(ofs >> chip->page_shift);
1997
1998        ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1999
2000        nand_deselect_target(chip);
2001
2002        return ret;
2003}
2004
2005static int nand_boot_set_geometry(struct gpmi_nand_data *this)
2006{
2007        struct boot_rom_geometry *geometry = &this->rom_geometry;
2008
2009        /*
2010         * Set the boot block stride size.
2011         *
2012         * In principle, we should be reading this from the OTP bits, since
2013         * that's where the ROM is going to get it. In fact, we don't have any
2014         * way to read the OTP bits, so we go with the default and hope for the
2015         * best.
2016         */
2017        geometry->stride_size_in_pages = 64;
2018
2019        /*
2020         * Set the search area stride exponent.
2021         *
2022         * In principle, we should be reading this from the OTP bits, since
2023         * that's where the ROM is going to get it. In fact, we don't have any
2024         * way to read the OTP bits, so we go with the default and hope for the
2025         * best.
2026         */
2027        geometry->search_area_stride_exponent = 2;
2028        return 0;
2029}
2030
2031static const char  *fingerprint = "STMP";
2032static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
2033{
2034        struct boot_rom_geometry *rom_geo = &this->rom_geometry;
2035        struct device *dev = this->dev;
2036        struct nand_chip *chip = &this->nand;
2037        unsigned int search_area_size_in_strides;
2038        unsigned int stride;
2039        unsigned int page;
2040        u8 *buffer = nand_get_data_buf(chip);
2041        int found_an_ncb_fingerprint = false;
2042        int ret;
2043
2044        /* Compute the number of strides in a search area. */
2045        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2046
2047        nand_select_target(chip, 0);
2048
2049        /*
2050         * Loop through the first search area, looking for the NCB fingerprint.
2051         */
2052        dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
2053
2054        for (stride = 0; stride < search_area_size_in_strides; stride++) {
2055                /* Compute the page addresses. */
2056                page = stride * rom_geo->stride_size_in_pages;
2057
2058                dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
2059
2060                /*
2061                 * Read the NCB fingerprint. The fingerprint is four bytes long
2062                 * and starts in the 12th byte of the page.
2063                 */
2064                ret = nand_read_page_op(chip, page, 12, buffer,
2065                                        strlen(fingerprint));
2066                if (ret)
2067                        continue;
2068
2069                /* Look for the fingerprint. */
2070                if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
2071                        found_an_ncb_fingerprint = true;
2072                        break;
2073                }
2074
2075        }
2076
2077        nand_deselect_target(chip);
2078
2079        if (found_an_ncb_fingerprint)
2080                dev_dbg(dev, "\tFound a fingerprint\n");
2081        else
2082                dev_dbg(dev, "\tNo fingerprint found\n");
2083        return found_an_ncb_fingerprint;
2084}
2085
2086/* Writes a transcription stamp. */
2087static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
2088{
2089        struct device *dev = this->dev;
2090        struct boot_rom_geometry *rom_geo = &this->rom_geometry;
2091        struct nand_chip *chip = &this->nand;
2092        struct mtd_info *mtd = nand_to_mtd(chip);
2093        unsigned int block_size_in_pages;
2094        unsigned int search_area_size_in_strides;
2095        unsigned int search_area_size_in_pages;
2096        unsigned int search_area_size_in_blocks;
2097        unsigned int block;
2098        unsigned int stride;
2099        unsigned int page;
2100        u8 *buffer = nand_get_data_buf(chip);
2101        int status;
2102
2103        /* Compute the search area geometry. */
2104        block_size_in_pages = mtd->erasesize / mtd->writesize;
2105        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2106        search_area_size_in_pages = search_area_size_in_strides *
2107                                        rom_geo->stride_size_in_pages;
2108        search_area_size_in_blocks =
2109                  (search_area_size_in_pages + (block_size_in_pages - 1)) /
2110                                    block_size_in_pages;
2111
2112        dev_dbg(dev, "Search Area Geometry :\n");
2113        dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
2114        dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
2115        dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
2116
2117        nand_select_target(chip, 0);
2118
2119        /* Loop over blocks in the first search area, erasing them. */
2120        dev_dbg(dev, "Erasing the search area...\n");
2121
2122        for (block = 0; block < search_area_size_in_blocks; block++) {
2123                /* Erase this block. */
2124                dev_dbg(dev, "\tErasing block 0x%x\n", block);
2125                status = nand_erase_op(chip, block);
2126                if (status)
2127                        dev_err(dev, "[%s] Erase failed.\n", __func__);
2128        }
2129
2130        /* Write the NCB fingerprint into the page buffer. */
2131        memset(buffer, ~0, mtd->writesize);
2132        memcpy(buffer + 12, fingerprint, strlen(fingerprint));
2133
2134        /* Loop through the first search area, writing NCB fingerprints. */
2135        dev_dbg(dev, "Writing NCB fingerprints...\n");
2136        for (stride = 0; stride < search_area_size_in_strides; stride++) {
2137                /* Compute the page addresses. */
2138                page = stride * rom_geo->stride_size_in_pages;
2139
2140                /* Write the first page of the current stride. */
2141                dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
2142
2143                status = chip->ecc.write_page_raw(chip, buffer, 0, page);
2144                if (status)
2145                        dev_err(dev, "[%s] Write failed.\n", __func__);
2146        }
2147
2148        nand_deselect_target(chip);
2149
2150        return 0;
2151}
2152
2153static int mx23_boot_init(struct gpmi_nand_data  *this)
2154{
2155        struct device *dev = this->dev;
2156        struct nand_chip *chip = &this->nand;
2157        struct mtd_info *mtd = nand_to_mtd(chip);
2158        unsigned int block_count;
2159        unsigned int block;
2160        int     chipnr;
2161        int     page;
2162        loff_t  byte;
2163        uint8_t block_mark;
2164        int     ret = 0;
2165
2166        /*
2167         * If control arrives here, we can't use block mark swapping, which
2168         * means we're forced to use transcription. First, scan for the
2169         * transcription stamp. If we find it, then we don't have to do
2170         * anything -- the block marks are already transcribed.
2171         */
2172        if (mx23_check_transcription_stamp(this))
2173                return 0;
2174
2175        /*
2176         * If control arrives here, we couldn't find a transcription stamp, so
2177         * so we presume the block marks are in the conventional location.
2178         */
2179        dev_dbg(dev, "Transcribing bad block marks...\n");
2180
2181        /* Compute the number of blocks in the entire medium. */
2182        block_count = nanddev_eraseblocks_per_target(&chip->base);
2183
2184        /*
2185         * Loop over all the blocks in the medium, transcribing block marks as
2186         * we go.
2187         */
2188        for (block = 0; block < block_count; block++) {
2189                /*
2190                 * Compute the chip, page and byte addresses for this block's
2191                 * conventional mark.
2192                 */
2193                chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
2194                page = block << (chip->phys_erase_shift - chip->page_shift);
2195                byte = block <<  chip->phys_erase_shift;
2196
2197                /* Send the command to read the conventional block mark. */
2198                nand_select_target(chip, chipnr);
2199                ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
2200                                        1);
2201                nand_deselect_target(chip);
2202
2203                if (ret)
2204                        continue;
2205
2206                /*
2207                 * Check if the block is marked bad. If so, we need to mark it
2208                 * again, but this time the result will be a mark in the
2209                 * location where we transcribe block marks.
2210                 */
2211                if (block_mark != 0xff) {
2212                        dev_dbg(dev, "Transcribing mark in block %u\n", block);
2213                        ret = chip->legacy.block_markbad(chip, byte);
2214                        if (ret)
2215                                dev_err(dev,
2216                                        "Failed to mark block bad with ret %d\n",
2217                                        ret);
2218                }
2219        }
2220
2221        /* Write the stamp that indicates we've transcribed the block marks. */
2222        mx23_write_transcription_stamp(this);
2223        return 0;
2224}
2225
2226static int nand_boot_init(struct gpmi_nand_data  *this)
2227{
2228        nand_boot_set_geometry(this);
2229
2230        /* This is ROM arch-specific initilization before the BBT scanning. */
2231        if (GPMI_IS_MX23(this))
2232                return mx23_boot_init(this);
2233        return 0;
2234}
2235
2236static int gpmi_set_geometry(struct gpmi_nand_data *this)
2237{
2238        int ret;
2239
2240        /* Free the temporary DMA memory for reading ID. */
2241        gpmi_free_dma_buffer(this);
2242
2243        /* Set up the NFC geometry which is used by BCH. */
2244        ret = bch_set_geometry(this);
2245        if (ret) {
2246                dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2247                return ret;
2248        }
2249
2250        /* Alloc the new DMA buffers according to the pagesize and oobsize */
2251        return gpmi_alloc_dma_buffer(this);
2252}
2253
2254static int gpmi_init_last(struct gpmi_nand_data *this)
2255{
2256        struct nand_chip *chip = &this->nand;
2257        struct mtd_info *mtd = nand_to_mtd(chip);
2258        struct nand_ecc_ctrl *ecc = &chip->ecc;
2259        struct bch_geometry *bch_geo = &this->bch_geometry;
2260        int ret;
2261
2262        /* Set up the medium geometry */
2263        ret = gpmi_set_geometry(this);
2264        if (ret)
2265                return ret;
2266
2267        /* Init the nand_ecc_ctrl{} */
2268        ecc->read_page  = gpmi_ecc_read_page;
2269        ecc->write_page = gpmi_ecc_write_page;
2270        ecc->read_oob   = gpmi_ecc_read_oob;
2271        ecc->write_oob  = gpmi_ecc_write_oob;
2272        ecc->read_page_raw = gpmi_ecc_read_page_raw;
2273        ecc->write_page_raw = gpmi_ecc_write_page_raw;
2274        ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2275        ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2276        ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2277        ecc->size       = bch_geo->eccn_chunk_size;
2278        ecc->strength   = bch_geo->ecc_strength;
2279        mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2280
2281        /*
2282         * We only enable the subpage read when:
2283         *  (1) the chip is imx6, and
2284         *  (2) the size of the ECC parity is byte aligned.
2285         */
2286        if (GPMI_IS_MX6(this) &&
2287                ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2288                ecc->read_subpage = gpmi_ecc_read_subpage;
2289                chip->options |= NAND_SUBPAGE_READ;
2290        }
2291
2292        return 0;
2293}
2294
2295static int gpmi_nand_attach_chip(struct nand_chip *chip)
2296{
2297        struct gpmi_nand_data *this = nand_get_controller_data(chip);
2298        int ret;
2299
2300        if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2301                chip->bbt_options |= NAND_BBT_NO_OOB;
2302
2303                if (of_property_read_bool(this->dev->of_node,
2304                                          "fsl,no-blockmark-swap"))
2305                        this->swap_block_mark = false;
2306        }
2307        dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2308                this->swap_block_mark ? "en" : "dis");
2309
2310        ret = gpmi_init_last(this);
2311        if (ret)
2312                return ret;
2313
2314        chip->options |= NAND_SKIP_BBTSCAN;
2315
2316        return 0;
2317}
2318
2319static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2320{
2321        struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2322
2323        this->ntransfers++;
2324
2325        if (this->ntransfers == GPMI_MAX_TRANSFERS)
2326                return NULL;
2327
2328        return transfer;
2329}
2330
2331static struct dma_async_tx_descriptor *gpmi_chain_command(
2332        struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2333{
2334        struct dma_chan *channel = get_dma_chan(this);
2335        struct dma_async_tx_descriptor *desc;
2336        struct gpmi_transfer *transfer;
2337        int chip = this->nand.cur_cs;
2338        u32 pio[3];
2339
2340        /* [1] send out the PIO words */
2341        pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2342                | BM_GPMI_CTRL0_WORD_LENGTH
2343                | BF_GPMI_CTRL0_CS(chip, this)
2344                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2345                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2346                | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2347                | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2348        pio[1] = 0;
2349        pio[2] = 0;
2350        desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2351                                      DMA_TRANS_NONE, 0);
2352        if (!desc)
2353                return NULL;
2354
2355        transfer = get_next_transfer(this);
2356        if (!transfer)
2357                return NULL;
2358
2359        transfer->cmdbuf[0] = cmd;
2360        if (naddr)
2361                memcpy(&transfer->cmdbuf[1], addr, naddr);
2362
2363        sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2364        dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2365
2366        transfer->direction = DMA_TO_DEVICE;
2367
2368        desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2369                                       MXS_DMA_CTRL_WAIT4END);
2370        return desc;
2371}
2372
2373static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2374        struct gpmi_nand_data *this)
2375{
2376        struct dma_chan *channel = get_dma_chan(this);
2377        u32 pio[2];
2378
2379        pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2380                | BM_GPMI_CTRL0_WORD_LENGTH
2381                | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2382                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2383                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2384                | BF_GPMI_CTRL0_XFER_COUNT(0);
2385        pio[1] = 0;
2386
2387        return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2388                                MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2389}
2390
2391static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2392        struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2393{
2394        struct dma_async_tx_descriptor *desc;
2395        struct dma_chan *channel = get_dma_chan(this);
2396        struct gpmi_transfer *transfer;
2397        u32 pio[6] = {};
2398
2399        transfer = get_next_transfer(this);
2400        if (!transfer)
2401                return NULL;
2402
2403        transfer->direction = DMA_FROM_DEVICE;
2404
2405        *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2406                                   DMA_FROM_DEVICE);
2407
2408        pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2409                | BM_GPMI_CTRL0_WORD_LENGTH
2410                | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2411                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2412                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2413                | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2414
2415        if (this->bch) {
2416                pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
2417                        | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2418                        | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2419                                | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2420                pio[3] = raw_len;
2421                pio[4] = transfer->sgl.dma_address;
2422                pio[5] = this->auxiliary_phys;
2423        }
2424
2425        desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2426                                      DMA_TRANS_NONE, 0);
2427        if (!desc)
2428                return NULL;
2429
2430        if (!this->bch)
2431                desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2432                                             DMA_DEV_TO_MEM,
2433                                             MXS_DMA_CTRL_WAIT4END);
2434
2435        return desc;
2436}
2437
2438static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2439        struct gpmi_nand_data *this, const void *buf, int raw_len)
2440{
2441        struct dma_chan *channel = get_dma_chan(this);
2442        struct dma_async_tx_descriptor *desc;
2443        struct gpmi_transfer *transfer;
2444        u32 pio[6] = {};
2445
2446        transfer = get_next_transfer(this);
2447        if (!transfer)
2448                return NULL;
2449
2450        transfer->direction = DMA_TO_DEVICE;
2451
2452        prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2453
2454        pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2455                | BM_GPMI_CTRL0_WORD_LENGTH
2456                | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2457                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2458                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2459                | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2460
2461        if (this->bch) {
2462                pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2463                        | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2464                        | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2465                                        BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2466                pio[3] = raw_len;
2467                pio[4] = transfer->sgl.dma_address;
2468                pio[5] = this->auxiliary_phys;
2469        }
2470
2471        desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2472                                      DMA_TRANS_NONE,
2473                                      (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2474        if (!desc)
2475                return NULL;
2476
2477        if (!this->bch)
2478                desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2479                                               DMA_MEM_TO_DEV,
2480                                               MXS_DMA_CTRL_WAIT4END);
2481
2482        return desc;
2483}
2484
2485static int gpmi_nfc_exec_op(struct nand_chip *chip,
2486                             const struct nand_operation *op,
2487                             bool check_only)
2488{
2489        const struct nand_op_instr *instr;
2490        struct gpmi_nand_data *this = nand_get_controller_data(chip);
2491        struct dma_async_tx_descriptor *desc = NULL;
2492        int i, ret, buf_len = 0, nbufs = 0;
2493        u8 cmd = 0;
2494        void *buf_read = NULL;
2495        const void *buf_write = NULL;
2496        bool direct = false;
2497        struct completion *dma_completion, *bch_completion;
2498        unsigned long to;
2499
2500        if (check_only)
2501                return 0;
2502
2503        this->ntransfers = 0;
2504        for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2505                this->transfers[i].direction = DMA_NONE;
2506
2507        ret = pm_runtime_get_sync(this->dev);
2508        if (ret < 0) {
2509                pm_runtime_put_noidle(this->dev);
2510                return ret;
2511        }
2512
2513        /*
2514         * This driver currently supports only one NAND chip. Plus, dies share
2515         * the same configuration. So once timings have been applied on the
2516         * controller side, they will not change anymore. When the time will
2517         * come, the check on must_apply_timings will have to be dropped.
2518         */
2519        if (this->hw.must_apply_timings) {
2520                this->hw.must_apply_timings = false;
2521                ret = gpmi_nfc_apply_timings(this);
2522                if (ret)
2523                        goto out_pm;
2524        }
2525
2526        dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2527
2528        for (i = 0; i < op->ninstrs; i++) {
2529                instr = &op->instrs[i];
2530
2531                nand_op_trace("  ", instr);
2532
2533                switch (instr->type) {
2534                case NAND_OP_WAITRDY_INSTR:
2535                        desc = gpmi_chain_wait_ready(this);
2536                        break;
2537                case NAND_OP_CMD_INSTR:
2538                        cmd = instr->ctx.cmd.opcode;
2539
2540                        /*
2541                         * When this command has an address cycle chain it
2542                         * together with the address cycle
2543                         */
2544                        if (i + 1 != op->ninstrs &&
2545                            op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2546                                continue;
2547
2548                        desc = gpmi_chain_command(this, cmd, NULL, 0);
2549
2550                        break;
2551                case NAND_OP_ADDR_INSTR:
2552                        desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2553                                                  instr->ctx.addr.naddrs);
2554                        break;
2555                case NAND_OP_DATA_OUT_INSTR:
2556                        buf_write = instr->ctx.data.buf.out;
2557                        buf_len = instr->ctx.data.len;
2558                        nbufs++;
2559
2560                        desc = gpmi_chain_data_write(this, buf_write, buf_len);
2561
2562                        break;
2563                case NAND_OP_DATA_IN_INSTR:
2564                        if (!instr->ctx.data.len)
2565                                break;
2566                        buf_read = instr->ctx.data.buf.in;
2567                        buf_len = instr->ctx.data.len;
2568                        nbufs++;
2569
2570                        desc = gpmi_chain_data_read(this, buf_read, buf_len,
2571                                                   &direct);
2572                        break;
2573                }
2574
2575                if (!desc) {
2576                        ret = -ENXIO;
2577                        goto unmap;
2578                }
2579        }
2580
2581        dev_dbg(this->dev, "%s setup done\n", __func__);
2582
2583        if (nbufs > 1) {
2584                dev_err(this->dev, "Multiple data instructions not supported\n");
2585                ret = -EINVAL;
2586                goto unmap;
2587        }
2588
2589        if (this->bch) {
2590                writel(this->bch_flashlayout0,
2591                       this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2592                writel(this->bch_flashlayout1,
2593                       this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2594        }
2595
2596        desc->callback = dma_irq_callback;
2597        desc->callback_param = this;
2598        dma_completion = &this->dma_done;
2599        bch_completion = NULL;
2600
2601        init_completion(dma_completion);
2602
2603        if (this->bch && buf_read) {
2604                writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2605                       this->resources.bch_regs + HW_BCH_CTRL_SET);
2606                bch_completion = &this->bch_done;
2607                init_completion(bch_completion);
2608        }
2609
2610        dmaengine_submit(desc);
2611        dma_async_issue_pending(get_dma_chan(this));
2612
2613        to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
2614        if (!to) {
2615                dev_err(this->dev, "DMA timeout, last DMA\n");
2616                gpmi_dump_info(this);
2617                ret = -ETIMEDOUT;
2618                goto unmap;
2619        }
2620
2621        if (this->bch && buf_read) {
2622                to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
2623                if (!to) {
2624                        dev_err(this->dev, "BCH timeout, last DMA\n");
2625                        gpmi_dump_info(this);
2626                        ret = -ETIMEDOUT;
2627                        goto unmap;
2628                }
2629        }
2630
2631        writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2632               this->resources.bch_regs + HW_BCH_CTRL_CLR);
2633        gpmi_clear_bch(this);
2634
2635        ret = 0;
2636
2637unmap:
2638        for (i = 0; i < this->ntransfers; i++) {
2639                struct gpmi_transfer *transfer = &this->transfers[i];
2640
2641                if (transfer->direction != DMA_NONE)
2642                        dma_unmap_sg(this->dev, &transfer->sgl, 1,
2643                                     transfer->direction);
2644        }
2645
2646        if (!ret && buf_read && !direct)
2647                memcpy(buf_read, this->data_buffer_dma,
2648                       gpmi_raw_len_to_len(this, buf_len));
2649
2650        this->bch = false;
2651
2652out_pm:
2653        pm_runtime_mark_last_busy(this->dev);
2654        pm_runtime_put_autosuspend(this->dev);
2655
2656        return ret;
2657}
2658
2659static const struct nand_controller_ops gpmi_nand_controller_ops = {
2660        .attach_chip = gpmi_nand_attach_chip,
2661        .setup_interface = gpmi_setup_interface,
2662        .exec_op = gpmi_nfc_exec_op,
2663};
2664
2665static int gpmi_nand_init(struct gpmi_nand_data *this)
2666{
2667        struct nand_chip *chip = &this->nand;
2668        struct mtd_info  *mtd = nand_to_mtd(chip);
2669        int ret;
2670
2671        /* init the MTD data structures */
2672        mtd->name               = "gpmi-nand";
2673        mtd->dev.parent         = this->dev;
2674
2675        /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2676        nand_set_controller_data(chip, this);
2677        nand_set_flash_node(chip, this->pdev->dev.of_node);
2678        chip->legacy.block_markbad = gpmi_block_markbad;
2679        chip->badblock_pattern  = &gpmi_bbt_descr;
2680        chip->options           |= NAND_NO_SUBPAGE_WRITE;
2681
2682        /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2683        this->swap_block_mark = !GPMI_IS_MX23(this);
2684
2685        /*
2686         * Allocate a temporary DMA buffer for reading ID in the
2687         * nand_scan_ident().
2688         */
2689        this->bch_geometry.payload_size = 1024;
2690        this->bch_geometry.auxiliary_size = 128;
2691        ret = gpmi_alloc_dma_buffer(this);
2692        if (ret)
2693                return ret;
2694
2695        nand_controller_init(&this->base);
2696        this->base.ops = &gpmi_nand_controller_ops;
2697        chip->controller = &this->base;
2698
2699        ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2700        if (ret)
2701                goto err_out;
2702
2703        ret = nand_boot_init(this);
2704        if (ret)
2705                goto err_nand_cleanup;
2706        ret = nand_create_bbt(chip);
2707        if (ret)
2708                goto err_nand_cleanup;
2709
2710        ret = mtd_device_register(mtd, NULL, 0);
2711        if (ret)
2712                goto err_nand_cleanup;
2713        return 0;
2714
2715err_nand_cleanup:
2716        nand_cleanup(chip);
2717err_out:
2718        gpmi_free_dma_buffer(this);
2719        return ret;
2720}
2721
2722static const struct of_device_id gpmi_nand_id_table[] = {
2723        { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, },
2724        { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, },
2725        { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
2726        { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
2727        { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
2728        {}
2729};
2730MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2731
2732static int gpmi_nand_probe(struct platform_device *pdev)
2733{
2734        struct gpmi_nand_data *this;
2735        int ret;
2736
2737        this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2738        if (!this)
2739                return -ENOMEM;
2740
2741        this->devdata = of_device_get_match_data(&pdev->dev);
2742        platform_set_drvdata(pdev, this);
2743        this->pdev  = pdev;
2744        this->dev   = &pdev->dev;
2745
2746        ret = acquire_resources(this);
2747        if (ret)
2748                goto exit_acquire_resources;
2749
2750        ret = __gpmi_enable_clk(this, true);
2751        if (ret)
2752                goto exit_acquire_resources;
2753
2754        pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2755        pm_runtime_use_autosuspend(&pdev->dev);
2756        pm_runtime_set_active(&pdev->dev);
2757        pm_runtime_enable(&pdev->dev);
2758        pm_runtime_get_sync(&pdev->dev);
2759
2760        ret = gpmi_init(this);
2761        if (ret)
2762                goto exit_nfc_init;
2763
2764        ret = gpmi_nand_init(this);
2765        if (ret)
2766                goto exit_nfc_init;
2767
2768        pm_runtime_mark_last_busy(&pdev->dev);
2769        pm_runtime_put_autosuspend(&pdev->dev);
2770
2771        dev_info(this->dev, "driver registered.\n");
2772
2773        return 0;
2774
2775exit_nfc_init:
2776        pm_runtime_put(&pdev->dev);
2777        pm_runtime_disable(&pdev->dev);
2778        release_resources(this);
2779exit_acquire_resources:
2780
2781        return ret;
2782}
2783
2784static int gpmi_nand_remove(struct platform_device *pdev)
2785{
2786        struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2787        struct nand_chip *chip = &this->nand;
2788        int ret;
2789
2790        pm_runtime_put_sync(&pdev->dev);
2791        pm_runtime_disable(&pdev->dev);
2792
2793        ret = mtd_device_unregister(nand_to_mtd(chip));
2794        WARN_ON(ret);
2795        nand_cleanup(chip);
2796        gpmi_free_dma_buffer(this);
2797        release_resources(this);
2798        return 0;
2799}
2800
2801#ifdef CONFIG_PM_SLEEP
2802static int gpmi_pm_suspend(struct device *dev)
2803{
2804        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2805
2806        release_dma_channels(this);
2807        return 0;
2808}
2809
2810static int gpmi_pm_resume(struct device *dev)
2811{
2812        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2813        int ret;
2814
2815        ret = acquire_dma_channels(this);
2816        if (ret < 0)
2817                return ret;
2818
2819        /* re-init the GPMI registers */
2820        ret = gpmi_init(this);
2821        if (ret) {
2822                dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2823                return ret;
2824        }
2825
2826        /* Set flag to get timing setup restored for next exec_op */
2827        if (this->hw.clk_rate)
2828                this->hw.must_apply_timings = true;
2829
2830        /* re-init the BCH registers */
2831        ret = bch_set_geometry(this);
2832        if (ret) {
2833                dev_err(this->dev, "Error setting BCH : %d\n", ret);
2834                return ret;
2835        }
2836
2837        return 0;
2838}
2839#endif /* CONFIG_PM_SLEEP */
2840
2841static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2842{
2843        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2844
2845        return __gpmi_enable_clk(this, false);
2846}
2847
2848static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2849{
2850        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2851
2852        return __gpmi_enable_clk(this, true);
2853}
2854
2855static const struct dev_pm_ops gpmi_pm_ops = {
2856        SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2857        SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2858};
2859
2860static struct platform_driver gpmi_nand_driver = {
2861        .driver = {
2862                .name = "gpmi-nand",
2863                .pm = &gpmi_pm_ops,
2864                .of_match_table = gpmi_nand_id_table,
2865        },
2866        .probe   = gpmi_nand_probe,
2867        .remove  = gpmi_nand_remove,
2868};
2869module_platform_driver(gpmi_nand_driver);
2870
2871MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2872MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2873MODULE_LICENSE("GPL");
2874