linux/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Freescale GPMI NAND Flash Driver
   4 *
   5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
   6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
   7 */
   8#include <linux/clk.h>
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/interrupt.h>
  13#include <linux/module.h>
  14#include <linux/mtd/partitions.h>
  15#include <linux/of.h>
  16#include <linux/of_device.h>
  17#include <linux/pm_runtime.h>
  18#include <linux/dma/mxs-dma.h>
  19#include "gpmi-nand.h"
  20#include "gpmi-regs.h"
  21#include "bch-regs.h"
  22
  23/* Resource names for the GPMI NAND driver. */
  24#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
  25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
  26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
  27
  28/* Converts time to clock cycles */
  29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
  30
  31#define MXS_SET_ADDR            0x4
  32#define MXS_CLR_ADDR            0x8
  33/*
  34 * Clear the bit and poll it cleared.  This is usually called with
  35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
  36 * (bit 30).
  37 */
  38static int clear_poll_bit(void __iomem *addr, u32 mask)
  39{
  40        int timeout = 0x400;
  41
  42        /* clear the bit */
  43        writel(mask, addr + MXS_CLR_ADDR);
  44
  45        /*
  46         * SFTRST needs 3 GPMI clocks to settle, the reference manual
  47         * recommends to wait 1us.
  48         */
  49        udelay(1);
  50
  51        /* poll the bit becoming clear */
  52        while ((readl(addr) & mask) && --timeout)
  53                /* nothing */;
  54
  55        return !timeout;
  56}
  57
  58#define MODULE_CLKGATE          (1 << 30)
  59#define MODULE_SFTRST           (1 << 31)
  60/*
  61 * The current mxs_reset_block() will do two things:
  62 *  [1] enable the module.
  63 *  [2] reset the module.
  64 *
  65 * In most of the cases, it's ok.
  66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
  67 * If you try to soft reset the BCH block, it becomes unusable until
  68 * the next hard reset. This case occurs in the NAND boot mode. When the board
  69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
  70 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
  71 * You will see a DMA timeout in this case. The bug has been fixed
  72 * in the following chips, such as MX28.
  73 *
  74 * To avoid this bug, just add a new parameter `just_enable` for
  75 * the mxs_reset_block(), and rewrite it here.
  76 */
  77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
  78{
  79        int ret;
  80        int timeout = 0x400;
  81
  82        /* clear and poll SFTRST */
  83        ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
  84        if (unlikely(ret))
  85                goto error;
  86
  87        /* clear CLKGATE */
  88        writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
  89
  90        if (!just_enable) {
  91                /* set SFTRST to reset the block */
  92                writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
  93                udelay(1);
  94
  95                /* poll CLKGATE becoming set */
  96                while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
  97                        /* nothing */;
  98                if (unlikely(!timeout))
  99                        goto error;
 100        }
 101
 102        /* clear and poll SFTRST */
 103        ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
 104        if (unlikely(ret))
 105                goto error;
 106
 107        /* clear and poll CLKGATE */
 108        ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
 109        if (unlikely(ret))
 110                goto error;
 111
 112        return 0;
 113
 114error:
 115        pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
 116        return -ETIMEDOUT;
 117}
 118
 119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
 120{
 121        struct clk *clk;
 122        int ret;
 123        int i;
 124
 125        for (i = 0; i < GPMI_CLK_MAX; i++) {
 126                clk = this->resources.clock[i];
 127                if (!clk)
 128                        break;
 129
 130                if (v) {
 131                        ret = clk_prepare_enable(clk);
 132                        if (ret)
 133                                goto err_clk;
 134                } else {
 135                        clk_disable_unprepare(clk);
 136                }
 137        }
 138        return 0;
 139
 140err_clk:
 141        for (; i > 0; i--)
 142                clk_disable_unprepare(this->resources.clock[i - 1]);
 143        return ret;
 144}
 145
 146static int gpmi_init(struct gpmi_nand_data *this)
 147{
 148        struct resources *r = &this->resources;
 149        int ret;
 150
 151        ret = gpmi_reset_block(r->gpmi_regs, false);
 152        if (ret)
 153                goto err_out;
 154
 155        /*
 156         * Reset BCH here, too. We got failures otherwise :(
 157         * See later BCH reset for explanation of MX23 and MX28 handling
 158         */
 159        ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
 160        if (ret)
 161                goto err_out;
 162
 163        /* Choose NAND mode. */
 164        writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
 165
 166        /* Set the IRQ polarity. */
 167        writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
 168                                r->gpmi_regs + HW_GPMI_CTRL1_SET);
 169
 170        /* Disable Write-Protection. */
 171        writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 172
 173        /* Select BCH ECC. */
 174        writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 175
 176        /*
 177         * Decouple the chip select from dma channel. We use dma0 for all
 178         * the chips.
 179         */
 180        writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 181
 182        return 0;
 183err_out:
 184        return ret;
 185}
 186
 187/* This function is very useful. It is called only when the bug occur. */
 188static void gpmi_dump_info(struct gpmi_nand_data *this)
 189{
 190        struct resources *r = &this->resources;
 191        struct bch_geometry *geo = &this->bch_geometry;
 192        u32 reg;
 193        int i;
 194
 195        dev_err(this->dev, "Show GPMI registers :\n");
 196        for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
 197                reg = readl(r->gpmi_regs + i * 0x10);
 198                dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
 199        }
 200
 201        /* start to print out the BCH info */
 202        dev_err(this->dev, "Show BCH registers :\n");
 203        for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
 204                reg = readl(r->bch_regs + i * 0x10);
 205                dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
 206        }
 207        dev_err(this->dev, "BCH Geometry :\n"
 208                "GF length              : %u\n"
 209                "ECC Strength           : %u\n"
 210                "Page Size in Bytes     : %u\n"
 211                "Metadata Size in Bytes : %u\n"
 212                "ECC Chunk Size in Bytes: %u\n"
 213                "ECC Chunk Count        : %u\n"
 214                "Payload Size in Bytes  : %u\n"
 215                "Auxiliary Size in Bytes: %u\n"
 216                "Auxiliary Status Offset: %u\n"
 217                "Block Mark Byte Offset : %u\n"
 218                "Block Mark Bit Offset  : %u\n",
 219                geo->gf_len,
 220                geo->ecc_strength,
 221                geo->page_size,
 222                geo->metadata_size,
 223                geo->ecc_chunk_size,
 224                geo->ecc_chunk_count,
 225                geo->payload_size,
 226                geo->auxiliary_size,
 227                geo->auxiliary_status_offset,
 228                geo->block_mark_byte_offset,
 229                geo->block_mark_bit_offset);
 230}
 231
 232static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
 233{
 234        struct bch_geometry *geo = &this->bch_geometry;
 235
 236        /* Do the sanity check. */
 237        if (GPMI_IS_MXS(this)) {
 238                /* The mx23/mx28 only support the GF13. */
 239                if (geo->gf_len == 14)
 240                        return false;
 241        }
 242        return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
 243}
 244
 245/*
 246 * If we can get the ECC information from the nand chip, we do not
 247 * need to calculate them ourselves.
 248 *
 249 * We may have available oob space in this case.
 250 */
 251static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
 252                                    unsigned int ecc_strength,
 253                                    unsigned int ecc_step)
 254{
 255        struct bch_geometry *geo = &this->bch_geometry;
 256        struct nand_chip *chip = &this->nand;
 257        struct mtd_info *mtd = nand_to_mtd(chip);
 258        unsigned int block_mark_bit_offset;
 259
 260        switch (ecc_step) {
 261        case SZ_512:
 262                geo->gf_len = 13;
 263                break;
 264        case SZ_1K:
 265                geo->gf_len = 14;
 266                break;
 267        default:
 268                dev_err(this->dev,
 269                        "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
 270                        chip->base.eccreq.strength,
 271                        chip->base.eccreq.step_size);
 272                return -EINVAL;
 273        }
 274        geo->ecc_chunk_size = ecc_step;
 275        geo->ecc_strength = round_up(ecc_strength, 2);
 276        if (!gpmi_check_ecc(this))
 277                return -EINVAL;
 278
 279        /* Keep the C >= O */
 280        if (geo->ecc_chunk_size < mtd->oobsize) {
 281                dev_err(this->dev,
 282                        "unsupported nand chip. ecc size: %d, oob size : %d\n",
 283                        ecc_step, mtd->oobsize);
 284                return -EINVAL;
 285        }
 286
 287        /* The default value, see comment in the legacy_set_geometry(). */
 288        geo->metadata_size = 10;
 289
 290        geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
 291
 292        /*
 293         * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
 294         *
 295         *    |                          P                            |
 296         *    |<----------------------------------------------------->|
 297         *    |                                                       |
 298         *    |                                        (Block Mark)   |
 299         *    |                      P'                      |      | |     |
 300         *    |<-------------------------------------------->|  D   | |  O' |
 301         *    |                                              |<---->| |<--->|
 302         *    V                                              V      V V     V
 303         *    +---+----------+-+----------+-+----------+-+----------+-+-----+
 304         *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
 305         *    +---+----------+-+----------+-+----------+-+----------+-+-----+
 306         *                                                   ^              ^
 307         *                                                   |      O       |
 308         *                                                   |<------------>|
 309         *                                                   |              |
 310         *
 311         *      P : the page size for BCH module.
 312         *      E : The ECC strength.
 313         *      G : the length of Galois Field.
 314         *      N : The chunk count of per page.
 315         *      M : the metasize of per page.
 316         *      C : the ecc chunk size, aka the "data" above.
 317         *      P': the nand chip's page size.
 318         *      O : the nand chip's oob size.
 319         *      O': the free oob.
 320         *
 321         *      The formula for P is :
 322         *
 323         *                  E * G * N
 324         *             P = ------------ + P' + M
 325         *                      8
 326         *
 327         * The position of block mark moves forward in the ECC-based view
 328         * of page, and the delta is:
 329         *
 330         *                   E * G * (N - 1)
 331         *             D = (---------------- + M)
 332         *                          8
 333         *
 334         * Please see the comment in legacy_set_geometry().
 335         * With the condition C >= O , we still can get same result.
 336         * So the bit position of the physical block mark within the ECC-based
 337         * view of the page is :
 338         *             (P' - D) * 8
 339         */
 340        geo->page_size = mtd->writesize + geo->metadata_size +
 341                (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 342
 343        geo->payload_size = mtd->writesize;
 344
 345        geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
 346        geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
 347                                + ALIGN(geo->ecc_chunk_count, 4);
 348
 349        if (!this->swap_block_mark)
 350                return 0;
 351
 352        /* For bit swap. */
 353        block_mark_bit_offset = mtd->writesize * 8 -
 354                (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 355                                + geo->metadata_size * 8);
 356
 357        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 358        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 359        return 0;
 360}
 361
 362/*
 363 *  Calculate the ECC strength by hand:
 364 *      E : The ECC strength.
 365 *      G : the length of Galois Field.
 366 *      N : The chunk count of per page.
 367 *      O : the oobsize of the NAND chip.
 368 *      M : the metasize of per page.
 369 *
 370 *      The formula is :
 371 *              E * G * N
 372 *            ------------ <= (O - M)
 373 *                  8
 374 *
 375 *      So, we get E by:
 376 *                    (O - M) * 8
 377 *              E <= -------------
 378 *                       G * N
 379 */
 380static inline int get_ecc_strength(struct gpmi_nand_data *this)
 381{
 382        struct bch_geometry *geo = &this->bch_geometry;
 383        struct mtd_info *mtd = nand_to_mtd(&this->nand);
 384        int ecc_strength;
 385
 386        ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
 387                        / (geo->gf_len * geo->ecc_chunk_count);
 388
 389        /* We need the minor even number. */
 390        return round_down(ecc_strength, 2);
 391}
 392
 393static int legacy_set_geometry(struct gpmi_nand_data *this)
 394{
 395        struct bch_geometry *geo = &this->bch_geometry;
 396        struct mtd_info *mtd = nand_to_mtd(&this->nand);
 397        unsigned int metadata_size;
 398        unsigned int status_size;
 399        unsigned int block_mark_bit_offset;
 400
 401        /*
 402         * The size of the metadata can be changed, though we set it to 10
 403         * bytes now. But it can't be too large, because we have to save
 404         * enough space for BCH.
 405         */
 406        geo->metadata_size = 10;
 407
 408        /* The default for the length of Galois Field. */
 409        geo->gf_len = 13;
 410
 411        /* The default for chunk size. */
 412        geo->ecc_chunk_size = 512;
 413        while (geo->ecc_chunk_size < mtd->oobsize) {
 414                geo->ecc_chunk_size *= 2; /* keep C >= O */
 415                geo->gf_len = 14;
 416        }
 417
 418        geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
 419
 420        /* We use the same ECC strength for all chunks. */
 421        geo->ecc_strength = get_ecc_strength(this);
 422        if (!gpmi_check_ecc(this)) {
 423                dev_err(this->dev,
 424                        "ecc strength: %d cannot be supported by the controller (%d)\n"
 425                        "try to use minimum ecc strength that NAND chip required\n",
 426                        geo->ecc_strength,
 427                        this->devdata->bch_max_ecc_strength);
 428                return -EINVAL;
 429        }
 430
 431        geo->page_size = mtd->writesize + geo->metadata_size +
 432                (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 433        geo->payload_size = mtd->writesize;
 434
 435        /*
 436         * The auxiliary buffer contains the metadata and the ECC status. The
 437         * metadata is padded to the nearest 32-bit boundary. The ECC status
 438         * contains one byte for every ECC chunk, and is also padded to the
 439         * nearest 32-bit boundary.
 440         */
 441        metadata_size = ALIGN(geo->metadata_size, 4);
 442        status_size   = ALIGN(geo->ecc_chunk_count, 4);
 443
 444        geo->auxiliary_size = metadata_size + status_size;
 445        geo->auxiliary_status_offset = metadata_size;
 446
 447        if (!this->swap_block_mark)
 448                return 0;
 449
 450        /*
 451         * We need to compute the byte and bit offsets of
 452         * the physical block mark within the ECC-based view of the page.
 453         *
 454         * NAND chip with 2K page shows below:
 455         *                                             (Block Mark)
 456         *                                                   |      |
 457         *                                                   |  D   |
 458         *                                                   |<---->|
 459         *                                                   V      V
 460         *    +---+----------+-+----------+-+----------+-+----------+-+
 461         *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
 462         *    +---+----------+-+----------+-+----------+-+----------+-+
 463         *
 464         * The position of block mark moves forward in the ECC-based view
 465         * of page, and the delta is:
 466         *
 467         *                   E * G * (N - 1)
 468         *             D = (---------------- + M)
 469         *                          8
 470         *
 471         * With the formula to compute the ECC strength, and the condition
 472         *       : C >= O         (C is the ecc chunk size)
 473         *
 474         * It's easy to deduce to the following result:
 475         *
 476         *         E * G       (O - M)      C - M         C - M
 477         *      ----------- <= ------- <=  --------  <  ---------
 478         *           8            N           N          (N - 1)
 479         *
 480         *  So, we get:
 481         *
 482         *                   E * G * (N - 1)
 483         *             D = (---------------- + M) < C
 484         *                          8
 485         *
 486         *  The above inequality means the position of block mark
 487         *  within the ECC-based view of the page is still in the data chunk,
 488         *  and it's NOT in the ECC bits of the chunk.
 489         *
 490         *  Use the following to compute the bit position of the
 491         *  physical block mark within the ECC-based view of the page:
 492         *          (page_size - D) * 8
 493         *
 494         *  --Huang Shijie
 495         */
 496        block_mark_bit_offset = mtd->writesize * 8 -
 497                (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
 498                                + geo->metadata_size * 8);
 499
 500        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
 501        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
 502        return 0;
 503}
 504
 505static int common_nfc_set_geometry(struct gpmi_nand_data *this)
 506{
 507        struct nand_chip *chip = &this->nand;
 508
 509        if (chip->ecc.strength > 0 && chip->ecc.size > 0)
 510                return set_geometry_by_ecc_info(this, chip->ecc.strength,
 511                                                chip->ecc.size);
 512
 513        if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
 514                                || legacy_set_geometry(this)) {
 515                if (!(chip->base.eccreq.strength > 0 &&
 516                      chip->base.eccreq.step_size > 0))
 517                        return -EINVAL;
 518
 519                return set_geometry_by_ecc_info(this,
 520                                                chip->base.eccreq.strength,
 521                                                chip->base.eccreq.step_size);
 522        }
 523
 524        return 0;
 525}
 526
 527/* Configures the geometry for BCH.  */
 528static int bch_set_geometry(struct gpmi_nand_data *this)
 529{
 530        struct resources *r = &this->resources;
 531        int ret;
 532
 533        ret = common_nfc_set_geometry(this);
 534        if (ret)
 535                return ret;
 536
 537        ret = pm_runtime_get_sync(this->dev);
 538        if (ret < 0)
 539                return ret;
 540
 541        /*
 542        * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
 543        * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
 544        * and MX28.
 545        */
 546        ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
 547        if (ret)
 548                goto err_out;
 549
 550        /* Set *all* chip selects to use layout 0. */
 551        writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
 552
 553        ret = 0;
 554err_out:
 555        pm_runtime_mark_last_busy(this->dev);
 556        pm_runtime_put_autosuspend(this->dev);
 557
 558        return ret;
 559}
 560
 561/*
 562 * <1> Firstly, we should know what's the GPMI-clock means.
 563 *     The GPMI-clock is the internal clock in the gpmi nand controller.
 564 *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
 565 *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
 566 *
 567 * <2> Secondly, we should know what's the frequency on the nand chip pins.
 568 *     The frequency on the nand chip pins is derived from the GPMI-clock.
 569 *     We can get it from the following equation:
 570 *
 571 *         F = G / (DS + DH)
 572 *
 573 *         F  : the frequency on the nand chip pins.
 574 *         G  : the GPMI clock, such as 100MHz.
 575 *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
 576 *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
 577 *
 578 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
 579 *     the nand EDO(extended Data Out) timing could be applied.
 580 *     The GPMI implements a feedback read strobe to sample the read data.
 581 *     The feedback read strobe can be delayed to support the nand EDO timing
 582 *     where the read strobe may deasserts before the read data is valid, and
 583 *     read data is valid for some time after read strobe.
 584 *
 585 *     The following figure illustrates some aspects of a NAND Flash read:
 586 *
 587 *                   |<---tREA---->|
 588 *                   |             |
 589 *                   |         |   |
 590 *                   |<--tRP-->|   |
 591 *                   |         |   |
 592 *                  __          ___|__________________________________
 593 *     RDN            \________/   |
 594 *                                 |
 595 *                                 /---------\
 596 *     Read Data    --------------<           >---------
 597 *                                 \---------/
 598 *                                |     |
 599 *                                |<-D->|
 600 *     FeedbackRDN  ________             ____________
 601 *                          \___________/
 602 *
 603 *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
 604 *
 605 *
 606 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
 607 *
 608 *  4.1) From the aspect of the nand chip pins:
 609 *        Delay = (tREA + C - tRP)               {1}
 610 *
 611 *        tREA : the maximum read access time.
 612 *        C    : a constant to adjust the delay. default is 4000ps.
 613 *        tRP  : the read pulse width, which is exactly:
 614 *                   tRP = (GPMI-clock-period) * DATA_SETUP
 615 *
 616 *  4.2) From the aspect of the GPMI nand controller:
 617 *         Delay = RDN_DELAY * 0.125 * RP        {2}
 618 *
 619 *         RP   : the DLL reference period.
 620 *            if (GPMI-clock-period > DLL_THRETHOLD)
 621 *                   RP = GPMI-clock-period / 2;
 622 *            else
 623 *                   RP = GPMI-clock-period;
 624 *
 625 *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
 626 *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
 627 *            is 16000ps, but in mx6q, we use 12000ps.
 628 *
 629 *  4.3) since {1} equals {2}, we get:
 630 *
 631 *                     (tREA + 4000 - tRP) * 8
 632 *         RDN_DELAY = -----------------------     {3}
 633 *                           RP
 634 */
 635static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
 636                                     const struct nand_sdr_timings *sdr)
 637{
 638        struct gpmi_nfc_hardware_timing *hw = &this->hw;
 639        unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
 640        unsigned int period_ps, reference_period_ps;
 641        unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
 642        unsigned int tRP_ps;
 643        bool use_half_period;
 644        int sample_delay_ps, sample_delay_factor;
 645        u16 busy_timeout_cycles;
 646        u8 wrn_dly_sel;
 647
 648        if (sdr->tRC_min >= 30000) {
 649                /* ONFI non-EDO modes [0-3] */
 650                hw->clk_rate = 22000000;
 651                wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
 652        } else if (sdr->tRC_min >= 25000) {
 653                /* ONFI EDO mode 4 */
 654                hw->clk_rate = 80000000;
 655                wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
 656        } else {
 657                /* ONFI EDO mode 5 */
 658                hw->clk_rate = 100000000;
 659                wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
 660        }
 661
 662        /* SDR core timings are given in picoseconds */
 663        period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
 664
 665        addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
 666        data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
 667        data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
 668        busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
 669
 670        hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
 671                      BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
 672                      BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
 673        hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
 674
 675        /*
 676         * Derive NFC ideal delay from {3}:
 677         *
 678         *                     (tREA + 4000 - tRP) * 8
 679         *         RDN_DELAY = -----------------------
 680         *                                RP
 681         */
 682        if (period_ps > dll_threshold_ps) {
 683                use_half_period = true;
 684                reference_period_ps = period_ps / 2;
 685        } else {
 686                use_half_period = false;
 687                reference_period_ps = period_ps;
 688        }
 689
 690        tRP_ps = data_setup_cycles * period_ps;
 691        sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
 692        if (sample_delay_ps > 0)
 693                sample_delay_factor = sample_delay_ps / reference_period_ps;
 694        else
 695                sample_delay_factor = 0;
 696
 697        hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
 698        if (sample_delay_factor)
 699                hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
 700                              BM_GPMI_CTRL1_DLL_ENABLE |
 701                              (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
 702}
 703
 704static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
 705{
 706        struct gpmi_nfc_hardware_timing *hw = &this->hw;
 707        struct resources *r = &this->resources;
 708        void __iomem *gpmi_regs = r->gpmi_regs;
 709        unsigned int dll_wait_time_us;
 710
 711        clk_set_rate(r->clock[0], hw->clk_rate);
 712
 713        writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
 714        writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
 715
 716        /*
 717         * Clear several CTRL1 fields, DLL must be disabled when setting
 718         * RDN_DELAY or HALF_PERIOD.
 719         */
 720        writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
 721        writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
 722
 723        /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
 724        dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
 725        if (!dll_wait_time_us)
 726                dll_wait_time_us = 1;
 727
 728        /* Wait for the DLL to settle. */
 729        udelay(dll_wait_time_us);
 730}
 731
 732static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
 733                                     const struct nand_data_interface *conf)
 734{
 735        struct gpmi_nand_data *this = nand_get_controller_data(chip);
 736        const struct nand_sdr_timings *sdr;
 737
 738        /* Retrieve required NAND timings */
 739        sdr = nand_get_sdr_timings(conf);
 740        if (IS_ERR(sdr))
 741                return PTR_ERR(sdr);
 742
 743        /* Only MX6 GPMI controller can reach EDO timings */
 744        if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
 745                return -ENOTSUPP;
 746
 747        /* Stop here if this call was just a check */
 748        if (chipnr < 0)
 749                return 0;
 750
 751        /* Do the actual derivation of the controller timings */
 752        gpmi_nfc_compute_timings(this, sdr);
 753
 754        this->hw.must_apply_timings = true;
 755
 756        return 0;
 757}
 758
 759/* Clears a BCH interrupt. */
 760static void gpmi_clear_bch(struct gpmi_nand_data *this)
 761{
 762        struct resources *r = &this->resources;
 763        writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
 764}
 765
 766static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 767{
 768        /* We use the DMA channel 0 to access all the nand chips. */
 769        return this->dma_chans[0];
 770}
 771
 772/* This will be called after the DMA operation is finished. */
 773static void dma_irq_callback(void *param)
 774{
 775        struct gpmi_nand_data *this = param;
 776        struct completion *dma_c = &this->dma_done;
 777
 778        complete(dma_c);
 779}
 780
 781static irqreturn_t bch_irq(int irq, void *cookie)
 782{
 783        struct gpmi_nand_data *this = cookie;
 784
 785        gpmi_clear_bch(this);
 786        complete(&this->bch_done);
 787        return IRQ_HANDLED;
 788}
 789
 790static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
 791{
 792        /*
 793         * raw_len is the length to read/write including bch data which
 794         * we are passed in exec_op. Calculate the data length from it.
 795         */
 796        if (this->bch)
 797                return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
 798        else
 799                return raw_len;
 800}
 801
 802/* Can we use the upper's buffer directly for DMA? */
 803static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
 804                             int raw_len, struct scatterlist *sgl,
 805                             enum dma_data_direction dr)
 806{
 807        int ret;
 808        int len = gpmi_raw_len_to_len(this, raw_len);
 809
 810        /* first try to map the upper buffer directly */
 811        if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
 812                sg_init_one(sgl, buf, len);
 813                ret = dma_map_sg(this->dev, sgl, 1, dr);
 814                if (ret == 0)
 815                        goto map_fail;
 816
 817                return true;
 818        }
 819
 820map_fail:
 821        /* We have to use our own DMA buffer. */
 822        sg_init_one(sgl, this->data_buffer_dma, len);
 823
 824        if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
 825                memcpy(this->data_buffer_dma, buf, len);
 826
 827        dma_map_sg(this->dev, sgl, 1, dr);
 828
 829        return false;
 830}
 831
 832/**
 833 * gpmi_copy_bits - copy bits from one memory region to another
 834 * @dst: destination buffer
 835 * @dst_bit_off: bit offset we're starting to write at
 836 * @src: source buffer
 837 * @src_bit_off: bit offset we're starting to read from
 838 * @nbits: number of bits to copy
 839 *
 840 * This functions copies bits from one memory region to another, and is used by
 841 * the GPMI driver to copy ECC sections which are not guaranteed to be byte
 842 * aligned.
 843 *
 844 * src and dst should not overlap.
 845 *
 846 */
 847static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
 848                           size_t src_bit_off, size_t nbits)
 849{
 850        size_t i;
 851        size_t nbytes;
 852        u32 src_buffer = 0;
 853        size_t bits_in_src_buffer = 0;
 854
 855        if (!nbits)
 856                return;
 857
 858        /*
 859         * Move src and dst pointers to the closest byte pointer and store bit
 860         * offsets within a byte.
 861         */
 862        src += src_bit_off / 8;
 863        src_bit_off %= 8;
 864
 865        dst += dst_bit_off / 8;
 866        dst_bit_off %= 8;
 867
 868        /*
 869         * Initialize the src_buffer value with bits available in the first
 870         * byte of data so that we end up with a byte aligned src pointer.
 871         */
 872        if (src_bit_off) {
 873                src_buffer = src[0] >> src_bit_off;
 874                if (nbits >= (8 - src_bit_off)) {
 875                        bits_in_src_buffer += 8 - src_bit_off;
 876                } else {
 877                        src_buffer &= GENMASK(nbits - 1, 0);
 878                        bits_in_src_buffer += nbits;
 879                }
 880                nbits -= bits_in_src_buffer;
 881                src++;
 882        }
 883
 884        /* Calculate the number of bytes that can be copied from src to dst. */
 885        nbytes = nbits / 8;
 886
 887        /* Try to align dst to a byte boundary. */
 888        if (dst_bit_off) {
 889                if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
 890                        src_buffer |= src[0] << bits_in_src_buffer;
 891                        bits_in_src_buffer += 8;
 892                        src++;
 893                        nbytes--;
 894                }
 895
 896                if (bits_in_src_buffer >= (8 - dst_bit_off)) {
 897                        dst[0] &= GENMASK(dst_bit_off - 1, 0);
 898                        dst[0] |= src_buffer << dst_bit_off;
 899                        src_buffer >>= (8 - dst_bit_off);
 900                        bits_in_src_buffer -= (8 - dst_bit_off);
 901                        dst_bit_off = 0;
 902                        dst++;
 903                        if (bits_in_src_buffer > 7) {
 904                                bits_in_src_buffer -= 8;
 905                                dst[0] = src_buffer;
 906                                dst++;
 907                                src_buffer >>= 8;
 908                        }
 909                }
 910        }
 911
 912        if (!bits_in_src_buffer && !dst_bit_off) {
 913                /*
 914                 * Both src and dst pointers are byte aligned, thus we can
 915                 * just use the optimized memcpy function.
 916                 */
 917                if (nbytes)
 918                        memcpy(dst, src, nbytes);
 919        } else {
 920                /*
 921                 * src buffer is not byte aligned, hence we have to copy each
 922                 * src byte to the src_buffer variable before extracting a byte
 923                 * to store in dst.
 924                 */
 925                for (i = 0; i < nbytes; i++) {
 926                        src_buffer |= src[i] << bits_in_src_buffer;
 927                        dst[i] = src_buffer;
 928                        src_buffer >>= 8;
 929                }
 930        }
 931        /* Update dst and src pointers */
 932        dst += nbytes;
 933        src += nbytes;
 934
 935        /*
 936         * nbits is the number of remaining bits. It should not exceed 8 as
 937         * we've already copied as much bytes as possible.
 938         */
 939        nbits %= 8;
 940
 941        /*
 942         * If there's no more bits to copy to the destination and src buffer
 943         * was already byte aligned, then we're done.
 944         */
 945        if (!nbits && !bits_in_src_buffer)
 946                return;
 947
 948        /* Copy the remaining bits to src_buffer */
 949        if (nbits)
 950                src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
 951                              bits_in_src_buffer;
 952        bits_in_src_buffer += nbits;
 953
 954        /*
 955         * In case there were not enough bits to get a byte aligned dst buffer
 956         * prepare the src_buffer variable to match the dst organization (shift
 957         * src_buffer by dst_bit_off and retrieve the least significant bits
 958         * from dst).
 959         */
 960        if (dst_bit_off)
 961                src_buffer = (src_buffer << dst_bit_off) |
 962                             (*dst & GENMASK(dst_bit_off - 1, 0));
 963        bits_in_src_buffer += dst_bit_off;
 964
 965        /*
 966         * Keep most significant bits from dst if we end up with an unaligned
 967         * number of bits.
 968         */
 969        nbytes = bits_in_src_buffer / 8;
 970        if (bits_in_src_buffer % 8) {
 971                src_buffer |= (dst[nbytes] &
 972                               GENMASK(7, bits_in_src_buffer % 8)) <<
 973                              (nbytes * 8);
 974                nbytes++;
 975        }
 976
 977        /* Copy the remaining bytes to dst */
 978        for (i = 0; i < nbytes; i++) {
 979                dst[i] = src_buffer;
 980                src_buffer >>= 8;
 981        }
 982}
 983
 984/* add our owner bbt descriptor */
 985static uint8_t scan_ff_pattern[] = { 0xff };
 986static struct nand_bbt_descr gpmi_bbt_descr = {
 987        .options        = 0,
 988        .offs           = 0,
 989        .len            = 1,
 990        .pattern        = scan_ff_pattern
 991};
 992
 993/*
 994 * We may change the layout if we can get the ECC info from the datasheet,
 995 * else we will use all the (page + OOB).
 996 */
 997static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
 998                              struct mtd_oob_region *oobregion)
 999{
1000        struct nand_chip *chip = mtd_to_nand(mtd);
1001        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1002        struct bch_geometry *geo = &this->bch_geometry;
1003
1004        if (section)
1005                return -ERANGE;
1006
1007        oobregion->offset = 0;
1008        oobregion->length = geo->page_size - mtd->writesize;
1009
1010        return 0;
1011}
1012
1013static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
1014                               struct mtd_oob_region *oobregion)
1015{
1016        struct nand_chip *chip = mtd_to_nand(mtd);
1017        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1018        struct bch_geometry *geo = &this->bch_geometry;
1019
1020        if (section)
1021                return -ERANGE;
1022
1023        /* The available oob size we have. */
1024        if (geo->page_size < mtd->writesize + mtd->oobsize) {
1025                oobregion->offset = geo->page_size - mtd->writesize;
1026                oobregion->length = mtd->oobsize - oobregion->offset;
1027        }
1028
1029        return 0;
1030}
1031
1032static const char * const gpmi_clks_for_mx2x[] = {
1033        "gpmi_io",
1034};
1035
1036static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1037        .ecc = gpmi_ooblayout_ecc,
1038        .free = gpmi_ooblayout_free,
1039};
1040
1041static const struct gpmi_devdata gpmi_devdata_imx23 = {
1042        .type = IS_MX23,
1043        .bch_max_ecc_strength = 20,
1044        .max_chain_delay = 16000,
1045        .clks = gpmi_clks_for_mx2x,
1046        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1047};
1048
1049static const struct gpmi_devdata gpmi_devdata_imx28 = {
1050        .type = IS_MX28,
1051        .bch_max_ecc_strength = 20,
1052        .max_chain_delay = 16000,
1053        .clks = gpmi_clks_for_mx2x,
1054        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1055};
1056
1057static const char * const gpmi_clks_for_mx6[] = {
1058        "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1059};
1060
1061static const struct gpmi_devdata gpmi_devdata_imx6q = {
1062        .type = IS_MX6Q,
1063        .bch_max_ecc_strength = 40,
1064        .max_chain_delay = 12000,
1065        .clks = gpmi_clks_for_mx6,
1066        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1067};
1068
1069static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1070        .type = IS_MX6SX,
1071        .bch_max_ecc_strength = 62,
1072        .max_chain_delay = 12000,
1073        .clks = gpmi_clks_for_mx6,
1074        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1075};
1076
1077static const char * const gpmi_clks_for_mx7d[] = {
1078        "gpmi_io", "gpmi_bch_apb",
1079};
1080
1081static const struct gpmi_devdata gpmi_devdata_imx7d = {
1082        .type = IS_MX7D,
1083        .bch_max_ecc_strength = 62,
1084        .max_chain_delay = 12000,
1085        .clks = gpmi_clks_for_mx7d,
1086        .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1087};
1088
1089static int acquire_register_block(struct gpmi_nand_data *this,
1090                                  const char *res_name)
1091{
1092        struct platform_device *pdev = this->pdev;
1093        struct resources *res = &this->resources;
1094        struct resource *r;
1095        void __iomem *p;
1096
1097        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
1098        p = devm_ioremap_resource(&pdev->dev, r);
1099        if (IS_ERR(p))
1100                return PTR_ERR(p);
1101
1102        if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
1103                res->gpmi_regs = p;
1104        else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
1105                res->bch_regs = p;
1106        else
1107                dev_err(this->dev, "unknown resource name : %s\n", res_name);
1108
1109        return 0;
1110}
1111
1112static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
1113{
1114        struct platform_device *pdev = this->pdev;
1115        const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
1116        struct resource *r;
1117        int err;
1118
1119        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
1120        if (!r) {
1121                dev_err(this->dev, "Can't get resource for %s\n", res_name);
1122                return -ENODEV;
1123        }
1124
1125        err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
1126        if (err)
1127                dev_err(this->dev, "error requesting BCH IRQ\n");
1128
1129        return err;
1130}
1131
1132static void release_dma_channels(struct gpmi_nand_data *this)
1133{
1134        unsigned int i;
1135        for (i = 0; i < DMA_CHANS; i++)
1136                if (this->dma_chans[i]) {
1137                        dma_release_channel(this->dma_chans[i]);
1138                        this->dma_chans[i] = NULL;
1139                }
1140}
1141
1142static int acquire_dma_channels(struct gpmi_nand_data *this)
1143{
1144        struct platform_device *pdev = this->pdev;
1145        struct dma_chan *dma_chan;
1146
1147        /* request dma channel */
1148        dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
1149        if (!dma_chan) {
1150                dev_err(this->dev, "Failed to request DMA channel.\n");
1151                goto acquire_err;
1152        }
1153
1154        this->dma_chans[0] = dma_chan;
1155        return 0;
1156
1157acquire_err:
1158        release_dma_channels(this);
1159        return -EINVAL;
1160}
1161
1162static int gpmi_get_clks(struct gpmi_nand_data *this)
1163{
1164        struct resources *r = &this->resources;
1165        struct clk *clk;
1166        int err, i;
1167
1168        for (i = 0; i < this->devdata->clks_count; i++) {
1169                clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1170                if (IS_ERR(clk)) {
1171                        err = PTR_ERR(clk);
1172                        goto err_clock;
1173                }
1174
1175                r->clock[i] = clk;
1176        }
1177
1178        if (GPMI_IS_MX6(this))
1179                /*
1180                 * Set the default value for the gpmi clock.
1181                 *
1182                 * If you want to use the ONFI nand which is in the
1183                 * Synchronous Mode, you should change the clock as you need.
1184                 */
1185                clk_set_rate(r->clock[0], 22000000);
1186
1187        return 0;
1188
1189err_clock:
1190        dev_dbg(this->dev, "failed in finding the clocks.\n");
1191        return err;
1192}
1193
1194static int acquire_resources(struct gpmi_nand_data *this)
1195{
1196        int ret;
1197
1198        ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1199        if (ret)
1200                goto exit_regs;
1201
1202        ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1203        if (ret)
1204                goto exit_regs;
1205
1206        ret = acquire_bch_irq(this, bch_irq);
1207        if (ret)
1208                goto exit_regs;
1209
1210        ret = acquire_dma_channels(this);
1211        if (ret)
1212                goto exit_regs;
1213
1214        ret = gpmi_get_clks(this);
1215        if (ret)
1216                goto exit_clock;
1217        return 0;
1218
1219exit_clock:
1220        release_dma_channels(this);
1221exit_regs:
1222        return ret;
1223}
1224
1225static void release_resources(struct gpmi_nand_data *this)
1226{
1227        release_dma_channels(this);
1228}
1229
1230static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1231{
1232        struct device *dev = this->dev;
1233        struct bch_geometry *geo = &this->bch_geometry;
1234
1235        if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1236                dma_free_coherent(dev, geo->auxiliary_size,
1237                                        this->auxiliary_virt,
1238                                        this->auxiliary_phys);
1239        kfree(this->data_buffer_dma);
1240        kfree(this->raw_buffer);
1241
1242        this->data_buffer_dma   = NULL;
1243        this->raw_buffer        = NULL;
1244}
1245
1246/* Allocate the DMA buffers */
1247static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1248{
1249        struct bch_geometry *geo = &this->bch_geometry;
1250        struct device *dev = this->dev;
1251        struct mtd_info *mtd = nand_to_mtd(&this->nand);
1252
1253        /*
1254         * [2] Allocate a read/write data buffer.
1255         *     The gpmi_alloc_dma_buffer can be called twice.
1256         *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
1257         *     is called before the NAND identification; and we allocate a
1258         *     buffer of the real NAND page size when the gpmi_alloc_dma_buffer
1259         *     is called after.
1260         */
1261        this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1262                                        GFP_DMA | GFP_KERNEL);
1263        if (this->data_buffer_dma == NULL)
1264                goto error_alloc;
1265
1266        this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1267                                        &this->auxiliary_phys, GFP_DMA);
1268        if (!this->auxiliary_virt)
1269                goto error_alloc;
1270
1271        this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1272        if (!this->raw_buffer)
1273                goto error_alloc;
1274
1275        return 0;
1276
1277error_alloc:
1278        gpmi_free_dma_buffer(this);
1279        return -ENOMEM;
1280}
1281
1282/*
1283 * Handles block mark swapping.
1284 * It can be called in swapping the block mark, or swapping it back,
1285 * because the the operations are the same.
1286 */
1287static void block_mark_swapping(struct gpmi_nand_data *this,
1288                                void *payload, void *auxiliary)
1289{
1290        struct bch_geometry *nfc_geo = &this->bch_geometry;
1291        unsigned char *p;
1292        unsigned char *a;
1293        unsigned int  bit;
1294        unsigned char mask;
1295        unsigned char from_data;
1296        unsigned char from_oob;
1297
1298        if (!this->swap_block_mark)
1299                return;
1300
1301        /*
1302         * If control arrives here, we're swapping. Make some convenience
1303         * variables.
1304         */
1305        bit = nfc_geo->block_mark_bit_offset;
1306        p   = payload + nfc_geo->block_mark_byte_offset;
1307        a   = auxiliary;
1308
1309        /*
1310         * Get the byte from the data area that overlays the block mark. Since
1311         * the ECC engine applies its own view to the bits in the page, the
1312         * physical block mark won't (in general) appear on a byte boundary in
1313         * the data.
1314         */
1315        from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1316
1317        /* Get the byte from the OOB. */
1318        from_oob = a[0];
1319
1320        /* Swap them. */
1321        a[0] = from_data;
1322
1323        mask = (0x1 << bit) - 1;
1324        p[0] = (p[0] & mask) | (from_oob << bit);
1325
1326        mask = ~0 << bit;
1327        p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1328}
1329
1330static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1331                               int last, int meta)
1332{
1333        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1334        struct bch_geometry *nfc_geo = &this->bch_geometry;
1335        struct mtd_info *mtd = nand_to_mtd(chip);
1336        int i;
1337        unsigned char *status;
1338        unsigned int max_bitflips = 0;
1339
1340        /* Loop over status bytes, accumulating ECC status. */
1341        status = this->auxiliary_virt + ALIGN(meta, 4);
1342
1343        for (i = first; i < last; i++, status++) {
1344                if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1345                        continue;
1346
1347                if (*status == STATUS_UNCORRECTABLE) {
1348                        int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1349                        u8 *eccbuf = this->raw_buffer;
1350                        int offset, bitoffset;
1351                        int eccbytes;
1352                        int flips;
1353
1354                        /* Read ECC bytes into our internal raw_buffer */
1355                        offset = nfc_geo->metadata_size * 8;
1356                        offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1357                        offset -= eccbits;
1358                        bitoffset = offset % 8;
1359                        eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1360                        offset /= 8;
1361                        eccbytes -= offset;
1362                        nand_change_read_column_op(chip, offset, eccbuf,
1363                                                   eccbytes, false);
1364
1365                        /*
1366                         * ECC data are not byte aligned and we may have
1367                         * in-band data in the first and last byte of
1368                         * eccbuf. Set non-eccbits to one so that
1369                         * nand_check_erased_ecc_chunk() does not count them
1370                         * as bitflips.
1371                         */
1372                        if (bitoffset)
1373                                eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1374
1375                        bitoffset = (bitoffset + eccbits) % 8;
1376                        if (bitoffset)
1377                                eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1378
1379                        /*
1380                         * The ECC hardware has an uncorrectable ECC status
1381                         * code in case we have bitflips in an erased page. As
1382                         * nothing was written into this subpage the ECC is
1383                         * obviously wrong and we can not trust it. We assume
1384                         * at this point that we are reading an erased page and
1385                         * try to correct the bitflips in buffer up to
1386                         * ecc_strength bitflips. If this is a page with random
1387                         * data, we exceed this number of bitflips and have a
1388                         * ECC failure. Otherwise we use the corrected buffer.
1389                         */
1390                        if (i == 0) {
1391                                /* The first block includes metadata */
1392                                flips = nand_check_erased_ecc_chunk(
1393                                                buf + i * nfc_geo->ecc_chunk_size,
1394                                                nfc_geo->ecc_chunk_size,
1395                                                eccbuf, eccbytes,
1396                                                this->auxiliary_virt,
1397                                                nfc_geo->metadata_size,
1398                                                nfc_geo->ecc_strength);
1399                        } else {
1400                                flips = nand_check_erased_ecc_chunk(
1401                                                buf + i * nfc_geo->ecc_chunk_size,
1402                                                nfc_geo->ecc_chunk_size,
1403                                                eccbuf, eccbytes,
1404                                                NULL, 0,
1405                                                nfc_geo->ecc_strength);
1406                        }
1407
1408                        if (flips > 0) {
1409                                max_bitflips = max_t(unsigned int, max_bitflips,
1410                                                     flips);
1411                                mtd->ecc_stats.corrected += flips;
1412                                continue;
1413                        }
1414
1415                        mtd->ecc_stats.failed++;
1416                        continue;
1417                }
1418
1419                mtd->ecc_stats.corrected += *status;
1420                max_bitflips = max_t(unsigned int, max_bitflips, *status);
1421        }
1422
1423        return max_bitflips;
1424}
1425
1426static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1427{
1428        struct bch_geometry *geo = &this->bch_geometry;
1429        unsigned int ecc_strength = geo->ecc_strength >> 1;
1430        unsigned int gf_len = geo->gf_len;
1431        unsigned int block_size = geo->ecc_chunk_size;
1432
1433        this->bch_flashlayout0 =
1434                BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1435                BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1436                BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1437                BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1438                BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1439
1440        this->bch_flashlayout1 =
1441                BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1442                BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1443                BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1444                BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1445}
1446
1447static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1448                              int oob_required, int page)
1449{
1450        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1451        struct mtd_info *mtd = nand_to_mtd(chip);
1452        struct bch_geometry *geo = &this->bch_geometry;
1453        unsigned int max_bitflips;
1454        int ret;
1455
1456        gpmi_bch_layout_std(this);
1457        this->bch = true;
1458
1459        ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1460        if (ret)
1461                return ret;
1462
1463        max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1464                                           geo->ecc_chunk_count,
1465                                           geo->auxiliary_status_offset);
1466
1467        /* handle the block mark swapping */
1468        block_mark_swapping(this, buf, this->auxiliary_virt);
1469
1470        if (oob_required) {
1471                /*
1472                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1473                 * for details about our policy for delivering the OOB.
1474                 *
1475                 * We fill the caller's buffer with set bits, and then copy the
1476                 * block mark to th caller's buffer. Note that, if block mark
1477                 * swapping was necessary, it has already been done, so we can
1478                 * rely on the first byte of the auxiliary buffer to contain
1479                 * the block mark.
1480                 */
1481                memset(chip->oob_poi, ~0, mtd->oobsize);
1482                chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1483        }
1484
1485        return max_bitflips;
1486}
1487
1488/* Fake a virtual small page for the subpage read */
1489static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1490                                 uint32_t len, uint8_t *buf, int page)
1491{
1492        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1493        struct bch_geometry *geo = &this->bch_geometry;
1494        int size = chip->ecc.size; /* ECC chunk size */
1495        int meta, n, page_size;
1496        unsigned int max_bitflips;
1497        unsigned int ecc_strength;
1498        int first, last, marker_pos;
1499        int ecc_parity_size;
1500        int col = 0;
1501        int ret;
1502
1503        /* The size of ECC parity */
1504        ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1505
1506        /* Align it with the chunk size */
1507        first = offs / size;
1508        last = (offs + len - 1) / size;
1509
1510        if (this->swap_block_mark) {
1511                /*
1512                 * Find the chunk which contains the Block Marker.
1513                 * If this chunk is in the range of [first, last],
1514                 * we have to read out the whole page.
1515                 * Why? since we had swapped the data at the position of Block
1516                 * Marker to the metadata which is bound with the chunk 0.
1517                 */
1518                marker_pos = geo->block_mark_byte_offset / size;
1519                if (last >= marker_pos && first <= marker_pos) {
1520                        dev_dbg(this->dev,
1521                                "page:%d, first:%d, last:%d, marker at:%d\n",
1522                                page, first, last, marker_pos);
1523                        return gpmi_ecc_read_page(chip, buf, 0, page);
1524                }
1525        }
1526
1527        meta = geo->metadata_size;
1528        if (first) {
1529                col = meta + (size + ecc_parity_size) * first;
1530                meta = 0;
1531                buf = buf + first * size;
1532        }
1533
1534        ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1535
1536        n = last - first + 1;
1537        page_size = meta + (size + ecc_parity_size) * n;
1538        ecc_strength = geo->ecc_strength >> 1;
1539
1540        this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1541                BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1542                BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1543                BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1544                BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1545
1546        this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1547                BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1548                BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1549                BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1550
1551        this->bch = true;
1552
1553        ret = nand_read_page_op(chip, page, col, buf, page_size);
1554        if (ret)
1555                return ret;
1556
1557        dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1558                page, offs, len, col, first, n, page_size);
1559
1560        max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1561
1562        return max_bitflips;
1563}
1564
1565static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1566                               int oob_required, int page)
1567{
1568        struct mtd_info *mtd = nand_to_mtd(chip);
1569        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1570        struct bch_geometry *nfc_geo = &this->bch_geometry;
1571        int ret;
1572
1573        dev_dbg(this->dev, "ecc write page.\n");
1574
1575        gpmi_bch_layout_std(this);
1576        this->bch = true;
1577
1578        memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1579
1580        if (this->swap_block_mark) {
1581                /*
1582                 * When doing bad block marker swapping we must always copy the
1583                 * input buffer as we can't modify the const buffer.
1584                 */
1585                memcpy(this->data_buffer_dma, buf, mtd->writesize);
1586                buf = this->data_buffer_dma;
1587                block_mark_swapping(this, this->data_buffer_dma,
1588                                    this->auxiliary_virt);
1589        }
1590
1591        ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1592
1593        return ret;
1594}
1595
1596/*
1597 * There are several places in this driver where we have to handle the OOB and
1598 * block marks. This is the function where things are the most complicated, so
1599 * this is where we try to explain it all. All the other places refer back to
1600 * here.
1601 *
1602 * These are the rules, in order of decreasing importance:
1603 *
1604 * 1) Nothing the caller does can be allowed to imperil the block mark.
1605 *
1606 * 2) In read operations, the first byte of the OOB we return must reflect the
1607 *    true state of the block mark, no matter where that block mark appears in
1608 *    the physical page.
1609 *
1610 * 3) ECC-based read operations return an OOB full of set bits (since we never
1611 *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1612 *    return).
1613 *
1614 * 4) "Raw" read operations return a direct view of the physical bytes in the
1615 *    page, using the conventional definition of which bytes are data and which
1616 *    are OOB. This gives the caller a way to see the actual, physical bytes
1617 *    in the page, without the distortions applied by our ECC engine.
1618 *
1619 *
1620 * What we do for this specific read operation depends on two questions:
1621 *
1622 * 1) Are we doing a "raw" read, or an ECC-based read?
1623 *
1624 * 2) Are we using block mark swapping or transcription?
1625 *
1626 * There are four cases, illustrated by the following Karnaugh map:
1627 *
1628 *                    |           Raw           |         ECC-based       |
1629 *       -------------+-------------------------+-------------------------+
1630 *                    | Read the conventional   |                         |
1631 *                    | OOB at the end of the   |                         |
1632 *       Swapping     | page and return it. It  |                         |
1633 *                    | contains exactly what   |                         |
1634 *                    | we want.                | Read the block mark and |
1635 *       -------------+-------------------------+ return it in a buffer   |
1636 *                    | Read the conventional   | full of set bits.       |
1637 *                    | OOB at the end of the   |                         |
1638 *                    | page and also the block |                         |
1639 *       Transcribing | mark in the metadata.   |                         |
1640 *                    | Copy the block mark     |                         |
1641 *                    | into the first byte of  |                         |
1642 *                    | the OOB.                |                         |
1643 *       -------------+-------------------------+-------------------------+
1644 *
1645 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1646 * giving an accurate view of the actual, physical bytes in the page (we're
1647 * overwriting the block mark). That's OK because it's more important to follow
1648 * rule #2.
1649 *
1650 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1651 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1652 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1653 * ECC-based or raw view of the page is implicit in which function it calls
1654 * (there is a similar pair of ECC-based/raw functions for writing).
1655 */
1656static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1657{
1658        struct mtd_info *mtd = nand_to_mtd(chip);
1659        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1660        int ret;
1661
1662        /* clear the OOB buffer */
1663        memset(chip->oob_poi, ~0, mtd->oobsize);
1664
1665        /* Read out the conventional OOB. */
1666        ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1667                                mtd->oobsize);
1668        if (ret)
1669                return ret;
1670
1671        /*
1672         * Now, we want to make sure the block mark is correct. In the
1673         * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1674         * Otherwise, we need to explicitly read it.
1675         */
1676        if (GPMI_IS_MX23(this)) {
1677                /* Read the block mark into the first byte of the OOB buffer. */
1678                ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1679                if (ret)
1680                        return ret;
1681        }
1682
1683        return 0;
1684}
1685
1686static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1687{
1688        struct mtd_info *mtd = nand_to_mtd(chip);
1689        struct mtd_oob_region of = { };
1690
1691        /* Do we have available oob area? */
1692        mtd_ooblayout_free(mtd, 0, &of);
1693        if (!of.length)
1694                return -EPERM;
1695
1696        if (!nand_is_slc(chip))
1697                return -EPERM;
1698
1699        return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1700                                 chip->oob_poi + of.offset, of.length);
1701}
1702
1703/*
1704 * This function reads a NAND page without involving the ECC engine (no HW
1705 * ECC correction).
1706 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1707 * inline (interleaved with payload DATA), and do not align data chunk on
1708 * byte boundaries.
1709 * We thus need to take care moving the payload data and ECC bits stored in the
1710 * page into the provided buffers, which is why we're using gpmi_copy_bits.
1711 *
1712 * See set_geometry_by_ecc_info inline comments to have a full description
1713 * of the layout used by the GPMI controller.
1714 */
1715static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1716                                  int oob_required, int page)
1717{
1718        struct mtd_info *mtd = nand_to_mtd(chip);
1719        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1720        struct bch_geometry *nfc_geo = &this->bch_geometry;
1721        int eccsize = nfc_geo->ecc_chunk_size;
1722        int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1723        u8 *tmp_buf = this->raw_buffer;
1724        size_t src_bit_off;
1725        size_t oob_bit_off;
1726        size_t oob_byte_off;
1727        uint8_t *oob = chip->oob_poi;
1728        int step;
1729        int ret;
1730
1731        ret = nand_read_page_op(chip, page, 0, tmp_buf,
1732                                mtd->writesize + mtd->oobsize);
1733        if (ret)
1734                return ret;
1735
1736        /*
1737         * If required, swap the bad block marker and the data stored in the
1738         * metadata section, so that we don't wrongly consider a block as bad.
1739         *
1740         * See the layout description for a detailed explanation on why this
1741         * is needed.
1742         */
1743        if (this->swap_block_mark)
1744                swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1745
1746        /*
1747         * Copy the metadata section into the oob buffer (this section is
1748         * guaranteed to be aligned on a byte boundary).
1749         */
1750        if (oob_required)
1751                memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1752
1753        oob_bit_off = nfc_geo->metadata_size * 8;
1754        src_bit_off = oob_bit_off;
1755
1756        /* Extract interleaved payload data and ECC bits */
1757        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1758                if (buf)
1759                        gpmi_copy_bits(buf, step * eccsize * 8,
1760                                       tmp_buf, src_bit_off,
1761                                       eccsize * 8);
1762                src_bit_off += eccsize * 8;
1763
1764                /* Align last ECC block to align a byte boundary */
1765                if (step == nfc_geo->ecc_chunk_count - 1 &&
1766                    (oob_bit_off + eccbits) % 8)
1767                        eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1768
1769                if (oob_required)
1770                        gpmi_copy_bits(oob, oob_bit_off,
1771                                       tmp_buf, src_bit_off,
1772                                       eccbits);
1773
1774                src_bit_off += eccbits;
1775                oob_bit_off += eccbits;
1776        }
1777
1778        if (oob_required) {
1779                oob_byte_off = oob_bit_off / 8;
1780
1781                if (oob_byte_off < mtd->oobsize)
1782                        memcpy(oob + oob_byte_off,
1783                               tmp_buf + mtd->writesize + oob_byte_off,
1784                               mtd->oobsize - oob_byte_off);
1785        }
1786
1787        return 0;
1788}
1789
1790/*
1791 * This function writes a NAND page without involving the ECC engine (no HW
1792 * ECC generation).
1793 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1794 * inline (interleaved with payload DATA), and do not align data chunk on
1795 * byte boundaries.
1796 * We thus need to take care moving the OOB area at the right place in the
1797 * final page, which is why we're using gpmi_copy_bits.
1798 *
1799 * See set_geometry_by_ecc_info inline comments to have a full description
1800 * of the layout used by the GPMI controller.
1801 */
1802static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1803                                   int oob_required, int page)
1804{
1805        struct mtd_info *mtd = nand_to_mtd(chip);
1806        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1807        struct bch_geometry *nfc_geo = &this->bch_geometry;
1808        int eccsize = nfc_geo->ecc_chunk_size;
1809        int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1810        u8 *tmp_buf = this->raw_buffer;
1811        uint8_t *oob = chip->oob_poi;
1812        size_t dst_bit_off;
1813        size_t oob_bit_off;
1814        size_t oob_byte_off;
1815        int step;
1816
1817        /*
1818         * Initialize all bits to 1 in case we don't have a buffer for the
1819         * payload or oob data in order to leave unspecified bits of data
1820         * to their initial state.
1821         */
1822        if (!buf || !oob_required)
1823                memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1824
1825        /*
1826         * First copy the metadata section (stored in oob buffer) at the
1827         * beginning of the page, as imposed by the GPMI layout.
1828         */
1829        memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1830        oob_bit_off = nfc_geo->metadata_size * 8;
1831        dst_bit_off = oob_bit_off;
1832
1833        /* Interleave payload data and ECC bits */
1834        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1835                if (buf)
1836                        gpmi_copy_bits(tmp_buf, dst_bit_off,
1837                                       buf, step * eccsize * 8, eccsize * 8);
1838                dst_bit_off += eccsize * 8;
1839
1840                /* Align last ECC block to align a byte boundary */
1841                if (step == nfc_geo->ecc_chunk_count - 1 &&
1842                    (oob_bit_off + eccbits) % 8)
1843                        eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1844
1845                if (oob_required)
1846                        gpmi_copy_bits(tmp_buf, dst_bit_off,
1847                                       oob, oob_bit_off, eccbits);
1848
1849                dst_bit_off += eccbits;
1850                oob_bit_off += eccbits;
1851        }
1852
1853        oob_byte_off = oob_bit_off / 8;
1854
1855        if (oob_required && oob_byte_off < mtd->oobsize)
1856                memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1857                       oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1858
1859        /*
1860         * If required, swap the bad block marker and the first byte of the
1861         * metadata section, so that we don't modify the bad block marker.
1862         *
1863         * See the layout description for a detailed explanation on why this
1864         * is needed.
1865         */
1866        if (this->swap_block_mark)
1867                swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1868
1869        return nand_prog_page_op(chip, page, 0, tmp_buf,
1870                                 mtd->writesize + mtd->oobsize);
1871}
1872
1873static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1874{
1875        return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1876}
1877
1878static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1879{
1880        return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1881}
1882
1883static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1884{
1885        struct mtd_info *mtd = nand_to_mtd(chip);
1886        struct gpmi_nand_data *this = nand_get_controller_data(chip);
1887        int ret = 0;
1888        uint8_t *block_mark;
1889        int column, page, chipnr;
1890
1891        chipnr = (int)(ofs >> chip->chip_shift);
1892        nand_select_target(chip, chipnr);
1893
1894        column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1895
1896        /* Write the block mark. */
1897        block_mark = this->data_buffer_dma;
1898        block_mark[0] = 0; /* bad block marker */
1899
1900        /* Shift to get page */
1901        page = (int)(ofs >> chip->page_shift);
1902
1903        ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1904
1905        nand_deselect_target(chip);
1906
1907        return ret;
1908}
1909
1910static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1911{
1912        struct boot_rom_geometry *geometry = &this->rom_geometry;
1913
1914        /*
1915         * Set the boot block stride size.
1916         *
1917         * In principle, we should be reading this from the OTP bits, since
1918         * that's where the ROM is going to get it. In fact, we don't have any
1919         * way to read the OTP bits, so we go with the default and hope for the
1920         * best.
1921         */
1922        geometry->stride_size_in_pages = 64;
1923
1924        /*
1925         * Set the search area stride exponent.
1926         *
1927         * In principle, we should be reading this from the OTP bits, since
1928         * that's where the ROM is going to get it. In fact, we don't have any
1929         * way to read the OTP bits, so we go with the default and hope for the
1930         * best.
1931         */
1932        geometry->search_area_stride_exponent = 2;
1933        return 0;
1934}
1935
1936static const char  *fingerprint = "STMP";
1937static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1938{
1939        struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1940        struct device *dev = this->dev;
1941        struct nand_chip *chip = &this->nand;
1942        unsigned int search_area_size_in_strides;
1943        unsigned int stride;
1944        unsigned int page;
1945        u8 *buffer = nand_get_data_buf(chip);
1946        int found_an_ncb_fingerprint = false;
1947        int ret;
1948
1949        /* Compute the number of strides in a search area. */
1950        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1951
1952        nand_select_target(chip, 0);
1953
1954        /*
1955         * Loop through the first search area, looking for the NCB fingerprint.
1956         */
1957        dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1958
1959        for (stride = 0; stride < search_area_size_in_strides; stride++) {
1960                /* Compute the page addresses. */
1961                page = stride * rom_geo->stride_size_in_pages;
1962
1963                dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1964
1965                /*
1966                 * Read the NCB fingerprint. The fingerprint is four bytes long
1967                 * and starts in the 12th byte of the page.
1968                 */
1969                ret = nand_read_page_op(chip, page, 12, buffer,
1970                                        strlen(fingerprint));
1971                if (ret)
1972                        continue;
1973
1974                /* Look for the fingerprint. */
1975                if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1976                        found_an_ncb_fingerprint = true;
1977                        break;
1978                }
1979
1980        }
1981
1982        nand_deselect_target(chip);
1983
1984        if (found_an_ncb_fingerprint)
1985                dev_dbg(dev, "\tFound a fingerprint\n");
1986        else
1987                dev_dbg(dev, "\tNo fingerprint found\n");
1988        return found_an_ncb_fingerprint;
1989}
1990
1991/* Writes a transcription stamp. */
1992static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1993{
1994        struct device *dev = this->dev;
1995        struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1996        struct nand_chip *chip = &this->nand;
1997        struct mtd_info *mtd = nand_to_mtd(chip);
1998        unsigned int block_size_in_pages;
1999        unsigned int search_area_size_in_strides;
2000        unsigned int search_area_size_in_pages;
2001        unsigned int search_area_size_in_blocks;
2002        unsigned int block;
2003        unsigned int stride;
2004        unsigned int page;
2005        u8 *buffer = nand_get_data_buf(chip);
2006        int status;
2007
2008        /* Compute the search area geometry. */
2009        block_size_in_pages = mtd->erasesize / mtd->writesize;
2010        search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2011        search_area_size_in_pages = search_area_size_in_strides *
2012                                        rom_geo->stride_size_in_pages;
2013        search_area_size_in_blocks =
2014                  (search_area_size_in_pages + (block_size_in_pages - 1)) /
2015                                    block_size_in_pages;
2016
2017        dev_dbg(dev, "Search Area Geometry :\n");
2018        dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
2019        dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
2020        dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
2021
2022        nand_select_target(chip, 0);
2023
2024        /* Loop over blocks in the first search area, erasing them. */
2025        dev_dbg(dev, "Erasing the search area...\n");
2026
2027        for (block = 0; block < search_area_size_in_blocks; block++) {
2028                /* Erase this block. */
2029                dev_dbg(dev, "\tErasing block 0x%x\n", block);
2030                status = nand_erase_op(chip, block);
2031                if (status)
2032                        dev_err(dev, "[%s] Erase failed.\n", __func__);
2033        }
2034
2035        /* Write the NCB fingerprint into the page buffer. */
2036        memset(buffer, ~0, mtd->writesize);
2037        memcpy(buffer + 12, fingerprint, strlen(fingerprint));
2038
2039        /* Loop through the first search area, writing NCB fingerprints. */
2040        dev_dbg(dev, "Writing NCB fingerprints...\n");
2041        for (stride = 0; stride < search_area_size_in_strides; stride++) {
2042                /* Compute the page addresses. */
2043                page = stride * rom_geo->stride_size_in_pages;
2044
2045                /* Write the first page of the current stride. */
2046                dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
2047
2048                status = chip->ecc.write_page_raw(chip, buffer, 0, page);
2049                if (status)
2050                        dev_err(dev, "[%s] Write failed.\n", __func__);
2051        }
2052
2053        nand_deselect_target(chip);
2054
2055        return 0;
2056}
2057
2058static int mx23_boot_init(struct gpmi_nand_data  *this)
2059{
2060        struct device *dev = this->dev;
2061        struct nand_chip *chip = &this->nand;
2062        struct mtd_info *mtd = nand_to_mtd(chip);
2063        unsigned int block_count;
2064        unsigned int block;
2065        int     chipnr;
2066        int     page;
2067        loff_t  byte;
2068        uint8_t block_mark;
2069        int     ret = 0;
2070
2071        /*
2072         * If control arrives here, we can't use block mark swapping, which
2073         * means we're forced to use transcription. First, scan for the
2074         * transcription stamp. If we find it, then we don't have to do
2075         * anything -- the block marks are already transcribed.
2076         */
2077        if (mx23_check_transcription_stamp(this))
2078                return 0;
2079
2080        /*
2081         * If control arrives here, we couldn't find a transcription stamp, so
2082         * so we presume the block marks are in the conventional location.
2083         */
2084        dev_dbg(dev, "Transcribing bad block marks...\n");
2085
2086        /* Compute the number of blocks in the entire medium. */
2087        block_count = nanddev_eraseblocks_per_target(&chip->base);
2088
2089        /*
2090         * Loop over all the blocks in the medium, transcribing block marks as
2091         * we go.
2092         */
2093        for (block = 0; block < block_count; block++) {
2094                /*
2095                 * Compute the chip, page and byte addresses for this block's
2096                 * conventional mark.
2097                 */
2098                chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
2099                page = block << (chip->phys_erase_shift - chip->page_shift);
2100                byte = block <<  chip->phys_erase_shift;
2101
2102                /* Send the command to read the conventional block mark. */
2103                nand_select_target(chip, chipnr);
2104                ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
2105                                        1);
2106                nand_deselect_target(chip);
2107
2108                if (ret)
2109                        continue;
2110
2111                /*
2112                 * Check if the block is marked bad. If so, we need to mark it
2113                 * again, but this time the result will be a mark in the
2114                 * location where we transcribe block marks.
2115                 */
2116                if (block_mark != 0xff) {
2117                        dev_dbg(dev, "Transcribing mark in block %u\n", block);
2118                        ret = chip->legacy.block_markbad(chip, byte);
2119                        if (ret)
2120                                dev_err(dev,
2121                                        "Failed to mark block bad with ret %d\n",
2122                                        ret);
2123                }
2124        }
2125
2126        /* Write the stamp that indicates we've transcribed the block marks. */
2127        mx23_write_transcription_stamp(this);
2128        return 0;
2129}
2130
2131static int nand_boot_init(struct gpmi_nand_data  *this)
2132{
2133        nand_boot_set_geometry(this);
2134
2135        /* This is ROM arch-specific initilization before the BBT scanning. */
2136        if (GPMI_IS_MX23(this))
2137                return mx23_boot_init(this);
2138        return 0;
2139}
2140
2141static int gpmi_set_geometry(struct gpmi_nand_data *this)
2142{
2143        int ret;
2144
2145        /* Free the temporary DMA memory for reading ID. */
2146        gpmi_free_dma_buffer(this);
2147
2148        /* Set up the NFC geometry which is used by BCH. */
2149        ret = bch_set_geometry(this);
2150        if (ret) {
2151                dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2152                return ret;
2153        }
2154
2155        /* Alloc the new DMA buffers according to the pagesize and oobsize */
2156        return gpmi_alloc_dma_buffer(this);
2157}
2158
2159static int gpmi_init_last(struct gpmi_nand_data *this)
2160{
2161        struct nand_chip *chip = &this->nand;
2162        struct mtd_info *mtd = nand_to_mtd(chip);
2163        struct nand_ecc_ctrl *ecc = &chip->ecc;
2164        struct bch_geometry *bch_geo = &this->bch_geometry;
2165        int ret;
2166
2167        /* Set up the medium geometry */
2168        ret = gpmi_set_geometry(this);
2169        if (ret)
2170                return ret;
2171
2172        /* Init the nand_ecc_ctrl{} */
2173        ecc->read_page  = gpmi_ecc_read_page;
2174        ecc->write_page = gpmi_ecc_write_page;
2175        ecc->read_oob   = gpmi_ecc_read_oob;
2176        ecc->write_oob  = gpmi_ecc_write_oob;
2177        ecc->read_page_raw = gpmi_ecc_read_page_raw;
2178        ecc->write_page_raw = gpmi_ecc_write_page_raw;
2179        ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2180        ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2181        ecc->mode       = NAND_ECC_HW;
2182        ecc->size       = bch_geo->ecc_chunk_size;
2183        ecc->strength   = bch_geo->ecc_strength;
2184        mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2185
2186        /*
2187         * We only enable the subpage read when:
2188         *  (1) the chip is imx6, and
2189         *  (2) the size of the ECC parity is byte aligned.
2190         */
2191        if (GPMI_IS_MX6(this) &&
2192                ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2193                ecc->read_subpage = gpmi_ecc_read_subpage;
2194                chip->options |= NAND_SUBPAGE_READ;
2195        }
2196
2197        return 0;
2198}
2199
2200static int gpmi_nand_attach_chip(struct nand_chip *chip)
2201{
2202        struct gpmi_nand_data *this = nand_get_controller_data(chip);
2203        int ret;
2204
2205        if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2206                chip->bbt_options |= NAND_BBT_NO_OOB;
2207
2208                if (of_property_read_bool(this->dev->of_node,
2209                                          "fsl,no-blockmark-swap"))
2210                        this->swap_block_mark = false;
2211        }
2212        dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2213                this->swap_block_mark ? "en" : "dis");
2214
2215        ret = gpmi_init_last(this);
2216        if (ret)
2217                return ret;
2218
2219        chip->options |= NAND_SKIP_BBTSCAN;
2220
2221        return 0;
2222}
2223
2224static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2225{
2226        struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2227
2228        this->ntransfers++;
2229
2230        if (this->ntransfers == GPMI_MAX_TRANSFERS)
2231                return NULL;
2232
2233        return transfer;
2234}
2235
2236static struct dma_async_tx_descriptor *gpmi_chain_command(
2237        struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2238{
2239        struct dma_chan *channel = get_dma_chan(this);
2240        struct dma_async_tx_descriptor *desc;
2241        struct gpmi_transfer *transfer;
2242        int chip = this->nand.cur_cs;
2243        u32 pio[3];
2244
2245        /* [1] send out the PIO words */
2246        pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2247                | BM_GPMI_CTRL0_WORD_LENGTH
2248                | BF_GPMI_CTRL0_CS(chip, this)
2249                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2250                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2251                | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2252                | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2253        pio[1] = 0;
2254        pio[2] = 0;
2255        desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2256                                      DMA_TRANS_NONE, 0);
2257        if (!desc)
2258                return NULL;
2259
2260        transfer = get_next_transfer(this);
2261        if (!transfer)
2262                return NULL;
2263
2264        transfer->cmdbuf[0] = cmd;
2265        if (naddr)
2266                memcpy(&transfer->cmdbuf[1], addr, naddr);
2267
2268        sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2269        dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2270
2271        transfer->direction = DMA_TO_DEVICE;
2272
2273        desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2274                                       MXS_DMA_CTRL_WAIT4END);
2275        return desc;
2276}
2277
2278static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2279        struct gpmi_nand_data *this)
2280{
2281        struct dma_chan *channel = get_dma_chan(this);
2282        u32 pio[2];
2283
2284        pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2285                | BM_GPMI_CTRL0_WORD_LENGTH
2286                | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2287                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2288                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2289                | BF_GPMI_CTRL0_XFER_COUNT(0);
2290        pio[1] = 0;
2291
2292        return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2293                                MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2294}
2295
2296static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2297        struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2298{
2299        struct dma_async_tx_descriptor *desc;
2300        struct dma_chan *channel = get_dma_chan(this);
2301        struct gpmi_transfer *transfer;
2302        u32 pio[6] = {};
2303
2304        transfer = get_next_transfer(this);
2305        if (!transfer)
2306                return NULL;
2307
2308        transfer->direction = DMA_FROM_DEVICE;
2309
2310        *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2311                                   DMA_FROM_DEVICE);
2312
2313        pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2314                | BM_GPMI_CTRL0_WORD_LENGTH
2315                | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2316                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2317                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2318                | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2319
2320        if (this->bch) {
2321                pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
2322                        | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2323                        | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2324                                | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2325                pio[3] = raw_len;
2326                pio[4] = transfer->sgl.dma_address;
2327                pio[5] = this->auxiliary_phys;
2328        }
2329
2330        desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2331                                      DMA_TRANS_NONE, 0);
2332        if (!desc)
2333                return NULL;
2334
2335        if (!this->bch)
2336                desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2337                                             DMA_DEV_TO_MEM,
2338                                             MXS_DMA_CTRL_WAIT4END);
2339
2340        return desc;
2341}
2342
2343static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2344        struct gpmi_nand_data *this, const void *buf, int raw_len)
2345{
2346        struct dma_chan *channel = get_dma_chan(this);
2347        struct dma_async_tx_descriptor *desc;
2348        struct gpmi_transfer *transfer;
2349        u32 pio[6] = {};
2350
2351        transfer = get_next_transfer(this);
2352        if (!transfer)
2353                return NULL;
2354
2355        transfer->direction = DMA_TO_DEVICE;
2356
2357        prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2358
2359        pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2360                | BM_GPMI_CTRL0_WORD_LENGTH
2361                | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2362                | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2363                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2364                | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2365
2366        if (this->bch) {
2367                pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2368                        | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2369                        | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2370                                        BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2371                pio[3] = raw_len;
2372                pio[4] = transfer->sgl.dma_address;
2373                pio[5] = this->auxiliary_phys;
2374        }
2375
2376        desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2377                                      DMA_TRANS_NONE,
2378                                      (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2379        if (!desc)
2380                return NULL;
2381
2382        if (!this->bch)
2383                desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2384                                               DMA_MEM_TO_DEV,
2385                                               MXS_DMA_CTRL_WAIT4END);
2386
2387        return desc;
2388}
2389
2390static int gpmi_nfc_exec_op(struct nand_chip *chip,
2391                             const struct nand_operation *op,
2392                             bool check_only)
2393{
2394        const struct nand_op_instr *instr;
2395        struct gpmi_nand_data *this = nand_get_controller_data(chip);
2396        struct dma_async_tx_descriptor *desc = NULL;
2397        int i, ret, buf_len = 0, nbufs = 0;
2398        u8 cmd = 0;
2399        void *buf_read = NULL;
2400        const void *buf_write = NULL;
2401        bool direct = false;
2402        struct completion *completion;
2403        unsigned long to;
2404
2405        this->ntransfers = 0;
2406        for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2407                this->transfers[i].direction = DMA_NONE;
2408
2409        ret = pm_runtime_get_sync(this->dev);
2410        if (ret < 0)
2411                return ret;
2412
2413        /*
2414         * This driver currently supports only one NAND chip. Plus, dies share
2415         * the same configuration. So once timings have been applied on the
2416         * controller side, they will not change anymore. When the time will
2417         * come, the check on must_apply_timings will have to be dropped.
2418         */
2419        if (this->hw.must_apply_timings) {
2420                this->hw.must_apply_timings = false;
2421                gpmi_nfc_apply_timings(this);
2422        }
2423
2424        dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2425
2426        for (i = 0; i < op->ninstrs; i++) {
2427                instr = &op->instrs[i];
2428
2429                nand_op_trace("  ", instr);
2430
2431                switch (instr->type) {
2432                case NAND_OP_WAITRDY_INSTR:
2433                        desc = gpmi_chain_wait_ready(this);
2434                        break;
2435                case NAND_OP_CMD_INSTR:
2436                        cmd = instr->ctx.cmd.opcode;
2437
2438                        /*
2439                         * When this command has an address cycle chain it
2440                         * together with the address cycle
2441                         */
2442                        if (i + 1 != op->ninstrs &&
2443                            op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2444                                continue;
2445
2446                        desc = gpmi_chain_command(this, cmd, NULL, 0);
2447
2448                        break;
2449                case NAND_OP_ADDR_INSTR:
2450                        desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2451                                                  instr->ctx.addr.naddrs);
2452                        break;
2453                case NAND_OP_DATA_OUT_INSTR:
2454                        buf_write = instr->ctx.data.buf.out;
2455                        buf_len = instr->ctx.data.len;
2456                        nbufs++;
2457
2458                        desc = gpmi_chain_data_write(this, buf_write, buf_len);
2459
2460                        break;
2461                case NAND_OP_DATA_IN_INSTR:
2462                        if (!instr->ctx.data.len)
2463                                break;
2464                        buf_read = instr->ctx.data.buf.in;
2465                        buf_len = instr->ctx.data.len;
2466                        nbufs++;
2467
2468                        desc = gpmi_chain_data_read(this, buf_read, buf_len,
2469                                                   &direct);
2470                        break;
2471                }
2472
2473                if (!desc) {
2474                        ret = -ENXIO;
2475                        goto unmap;
2476                }
2477        }
2478
2479        dev_dbg(this->dev, "%s setup done\n", __func__);
2480
2481        if (nbufs > 1) {
2482                dev_err(this->dev, "Multiple data instructions not supported\n");
2483                ret = -EINVAL;
2484                goto unmap;
2485        }
2486
2487        if (this->bch) {
2488                writel(this->bch_flashlayout0,
2489                       this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2490                writel(this->bch_flashlayout1,
2491                       this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2492        }
2493
2494        if (this->bch && buf_read) {
2495                writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2496                       this->resources.bch_regs + HW_BCH_CTRL_SET);
2497                completion = &this->bch_done;
2498        } else {
2499                desc->callback = dma_irq_callback;
2500                desc->callback_param = this;
2501                completion = &this->dma_done;
2502        }
2503
2504        init_completion(completion);
2505
2506        dmaengine_submit(desc);
2507        dma_async_issue_pending(get_dma_chan(this));
2508
2509        to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2510        if (!to) {
2511                dev_err(this->dev, "DMA timeout, last DMA\n");
2512                gpmi_dump_info(this);
2513                ret = -ETIMEDOUT;
2514                goto unmap;
2515        }
2516
2517        writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2518               this->resources.bch_regs + HW_BCH_CTRL_CLR);
2519        gpmi_clear_bch(this);
2520
2521        ret = 0;
2522
2523unmap:
2524        for (i = 0; i < this->ntransfers; i++) {
2525                struct gpmi_transfer *transfer = &this->transfers[i];
2526
2527                if (transfer->direction != DMA_NONE)
2528                        dma_unmap_sg(this->dev, &transfer->sgl, 1,
2529                                     transfer->direction);
2530        }
2531
2532        if (!ret && buf_read && !direct)
2533                memcpy(buf_read, this->data_buffer_dma,
2534                       gpmi_raw_len_to_len(this, buf_len));
2535
2536        this->bch = false;
2537
2538        pm_runtime_mark_last_busy(this->dev);
2539        pm_runtime_put_autosuspend(this->dev);
2540
2541        return ret;
2542}
2543
2544static const struct nand_controller_ops gpmi_nand_controller_ops = {
2545        .attach_chip = gpmi_nand_attach_chip,
2546        .setup_data_interface = gpmi_setup_data_interface,
2547        .exec_op = gpmi_nfc_exec_op,
2548};
2549
2550static int gpmi_nand_init(struct gpmi_nand_data *this)
2551{
2552        struct nand_chip *chip = &this->nand;
2553        struct mtd_info  *mtd = nand_to_mtd(chip);
2554        int ret;
2555
2556        /* init the MTD data structures */
2557        mtd->name               = "gpmi-nand";
2558        mtd->dev.parent         = this->dev;
2559
2560        /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2561        nand_set_controller_data(chip, this);
2562        nand_set_flash_node(chip, this->pdev->dev.of_node);
2563        chip->legacy.block_markbad = gpmi_block_markbad;
2564        chip->badblock_pattern  = &gpmi_bbt_descr;
2565        chip->options           |= NAND_NO_SUBPAGE_WRITE;
2566
2567        /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2568        this->swap_block_mark = !GPMI_IS_MX23(this);
2569
2570        /*
2571         * Allocate a temporary DMA buffer for reading ID in the
2572         * nand_scan_ident().
2573         */
2574        this->bch_geometry.payload_size = 1024;
2575        this->bch_geometry.auxiliary_size = 128;
2576        ret = gpmi_alloc_dma_buffer(this);
2577        if (ret)
2578                goto err_out;
2579
2580        nand_controller_init(&this->base);
2581        this->base.ops = &gpmi_nand_controller_ops;
2582        chip->controller = &this->base;
2583
2584        ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2585        if (ret)
2586                goto err_out;
2587
2588        ret = nand_boot_init(this);
2589        if (ret)
2590                goto err_nand_cleanup;
2591        ret = nand_create_bbt(chip);
2592        if (ret)
2593                goto err_nand_cleanup;
2594
2595        ret = mtd_device_register(mtd, NULL, 0);
2596        if (ret)
2597                goto err_nand_cleanup;
2598        return 0;
2599
2600err_nand_cleanup:
2601        nand_cleanup(chip);
2602err_out:
2603        gpmi_free_dma_buffer(this);
2604        return ret;
2605}
2606
2607static const struct of_device_id gpmi_nand_id_table[] = {
2608        {
2609                .compatible = "fsl,imx23-gpmi-nand",
2610                .data = &gpmi_devdata_imx23,
2611        }, {
2612                .compatible = "fsl,imx28-gpmi-nand",
2613                .data = &gpmi_devdata_imx28,
2614        }, {
2615                .compatible = "fsl,imx6q-gpmi-nand",
2616                .data = &gpmi_devdata_imx6q,
2617        }, {
2618                .compatible = "fsl,imx6sx-gpmi-nand",
2619                .data = &gpmi_devdata_imx6sx,
2620        }, {
2621                .compatible = "fsl,imx7d-gpmi-nand",
2622                .data = &gpmi_devdata_imx7d,
2623        }, {}
2624};
2625MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2626
2627static int gpmi_nand_probe(struct platform_device *pdev)
2628{
2629        struct gpmi_nand_data *this;
2630        const struct of_device_id *of_id;
2631        int ret;
2632
2633        this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2634        if (!this)
2635                return -ENOMEM;
2636
2637        of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2638        if (of_id) {
2639                this->devdata = of_id->data;
2640        } else {
2641                dev_err(&pdev->dev, "Failed to find the right device id.\n");
2642                return -ENODEV;
2643        }
2644
2645        platform_set_drvdata(pdev, this);
2646        this->pdev  = pdev;
2647        this->dev   = &pdev->dev;
2648
2649        ret = acquire_resources(this);
2650        if (ret)
2651                goto exit_acquire_resources;
2652
2653        ret = __gpmi_enable_clk(this, true);
2654        if (ret)
2655                goto exit_nfc_init;
2656
2657        pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2658        pm_runtime_use_autosuspend(&pdev->dev);
2659        pm_runtime_set_active(&pdev->dev);
2660        pm_runtime_enable(&pdev->dev);
2661        pm_runtime_get_sync(&pdev->dev);
2662
2663        ret = gpmi_init(this);
2664        if (ret)
2665                goto exit_nfc_init;
2666
2667        ret = gpmi_nand_init(this);
2668        if (ret)
2669                goto exit_nfc_init;
2670
2671        pm_runtime_mark_last_busy(&pdev->dev);
2672        pm_runtime_put_autosuspend(&pdev->dev);
2673
2674        dev_info(this->dev, "driver registered.\n");
2675
2676        return 0;
2677
2678exit_nfc_init:
2679        pm_runtime_put(&pdev->dev);
2680        pm_runtime_disable(&pdev->dev);
2681        release_resources(this);
2682exit_acquire_resources:
2683
2684        return ret;
2685}
2686
2687static int gpmi_nand_remove(struct platform_device *pdev)
2688{
2689        struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2690
2691        pm_runtime_put_sync(&pdev->dev);
2692        pm_runtime_disable(&pdev->dev);
2693
2694        nand_release(&this->nand);
2695        gpmi_free_dma_buffer(this);
2696        release_resources(this);
2697        return 0;
2698}
2699
2700#ifdef CONFIG_PM_SLEEP
2701static int gpmi_pm_suspend(struct device *dev)
2702{
2703        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2704
2705        release_dma_channels(this);
2706        return 0;
2707}
2708
2709static int gpmi_pm_resume(struct device *dev)
2710{
2711        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2712        int ret;
2713
2714        ret = acquire_dma_channels(this);
2715        if (ret < 0)
2716                return ret;
2717
2718        /* re-init the GPMI registers */
2719        ret = gpmi_init(this);
2720        if (ret) {
2721                dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2722                return ret;
2723        }
2724
2725        /* re-init the BCH registers */
2726        ret = bch_set_geometry(this);
2727        if (ret) {
2728                dev_err(this->dev, "Error setting BCH : %d\n", ret);
2729                return ret;
2730        }
2731
2732        return 0;
2733}
2734#endif /* CONFIG_PM_SLEEP */
2735
2736static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2737{
2738        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2739
2740        return __gpmi_enable_clk(this, false);
2741}
2742
2743static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2744{
2745        struct gpmi_nand_data *this = dev_get_drvdata(dev);
2746
2747        return __gpmi_enable_clk(this, true);
2748}
2749
2750static const struct dev_pm_ops gpmi_pm_ops = {
2751        SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2752        SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2753};
2754
2755static struct platform_driver gpmi_nand_driver = {
2756        .driver = {
2757                .name = "gpmi-nand",
2758                .pm = &gpmi_pm_ops,
2759                .of_match_table = gpmi_nand_id_table,
2760        },
2761        .probe   = gpmi_nand_probe,
2762        .remove  = gpmi_nand_remove,
2763};
2764module_platform_driver(gpmi_nand_driver);
2765
2766MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2767MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2768MODULE_LICENSE("GPL");
2769