linux/drivers/mtd/nand/raw/nand_base.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Overview:
   4 *   This is the generic MTD driver for NAND flash devices. It should be
   5 *   capable of working with almost all NAND chips currently available.
   6 *
   7 *      Additional technical information is available on
   8 *      http://www.linux-mtd.infradead.org/doc/nand.html
   9 *
  10 *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
  11 *                2002-2006 Thomas Gleixner (tglx@linutronix.de)
  12 *
  13 *  Credits:
  14 *      David Woodhouse for adding multichip support
  15 *
  16 *      Aleph One Ltd. and Toby Churchill Ltd. for supporting the
  17 *      rework for 2K page size chips
  18 *
  19 *  TODO:
  20 *      Enable cached programming for 2k page size chips
  21 *      Check, if mtd->ecctype should be set to MTD_ECC_HW
  22 *      if we have HW ECC support.
  23 *      BBT table is not serialized, has to be fixed
  24 */
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#include <linux/module.h>
  29#include <linux/delay.h>
  30#include <linux/errno.h>
  31#include <linux/err.h>
  32#include <linux/sched.h>
  33#include <linux/slab.h>
  34#include <linux/mm.h>
  35#include <linux/types.h>
  36#include <linux/mtd/mtd.h>
  37#include <linux/mtd/nand_ecc.h>
  38#include <linux/mtd/nand_bch.h>
  39#include <linux/interrupt.h>
  40#include <linux/bitops.h>
  41#include <linux/io.h>
  42#include <linux/mtd/partitions.h>
  43#include <linux/of.h>
  44#include <linux/gpio/consumer.h>
  45
  46#include "internals.h"
  47
  48/* Define default oob placement schemes for large and small page devices */
  49static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
  50                                 struct mtd_oob_region *oobregion)
  51{
  52        struct nand_chip *chip = mtd_to_nand(mtd);
  53        struct nand_ecc_ctrl *ecc = &chip->ecc;
  54
  55        if (section > 1)
  56                return -ERANGE;
  57
  58        if (!section) {
  59                oobregion->offset = 0;
  60                if (mtd->oobsize == 16)
  61                        oobregion->length = 4;
  62                else
  63                        oobregion->length = 3;
  64        } else {
  65                if (mtd->oobsize == 8)
  66                        return -ERANGE;
  67
  68                oobregion->offset = 6;
  69                oobregion->length = ecc->total - 4;
  70        }
  71
  72        return 0;
  73}
  74
  75static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
  76                                  struct mtd_oob_region *oobregion)
  77{
  78        if (section > 1)
  79                return -ERANGE;
  80
  81        if (mtd->oobsize == 16) {
  82                if (section)
  83                        return -ERANGE;
  84
  85                oobregion->length = 8;
  86                oobregion->offset = 8;
  87        } else {
  88                oobregion->length = 2;
  89                if (!section)
  90                        oobregion->offset = 3;
  91                else
  92                        oobregion->offset = 6;
  93        }
  94
  95        return 0;
  96}
  97
  98const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
  99        .ecc = nand_ooblayout_ecc_sp,
 100        .free = nand_ooblayout_free_sp,
 101};
 102EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
 103
 104static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
 105                                 struct mtd_oob_region *oobregion)
 106{
 107        struct nand_chip *chip = mtd_to_nand(mtd);
 108        struct nand_ecc_ctrl *ecc = &chip->ecc;
 109
 110        if (section || !ecc->total)
 111                return -ERANGE;
 112
 113        oobregion->length = ecc->total;
 114        oobregion->offset = mtd->oobsize - oobregion->length;
 115
 116        return 0;
 117}
 118
 119static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
 120                                  struct mtd_oob_region *oobregion)
 121{
 122        struct nand_chip *chip = mtd_to_nand(mtd);
 123        struct nand_ecc_ctrl *ecc = &chip->ecc;
 124
 125        if (section)
 126                return -ERANGE;
 127
 128        oobregion->length = mtd->oobsize - ecc->total - 2;
 129        oobregion->offset = 2;
 130
 131        return 0;
 132}
 133
 134const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
 135        .ecc = nand_ooblayout_ecc_lp,
 136        .free = nand_ooblayout_free_lp,
 137};
 138EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
 139
 140/*
 141 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
 142 * are placed at a fixed offset.
 143 */
 144static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
 145                                         struct mtd_oob_region *oobregion)
 146{
 147        struct nand_chip *chip = mtd_to_nand(mtd);
 148        struct nand_ecc_ctrl *ecc = &chip->ecc;
 149
 150        if (section)
 151                return -ERANGE;
 152
 153        switch (mtd->oobsize) {
 154        case 64:
 155                oobregion->offset = 40;
 156                break;
 157        case 128:
 158                oobregion->offset = 80;
 159                break;
 160        default:
 161                return -EINVAL;
 162        }
 163
 164        oobregion->length = ecc->total;
 165        if (oobregion->offset + oobregion->length > mtd->oobsize)
 166                return -ERANGE;
 167
 168        return 0;
 169}
 170
 171static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
 172                                          struct mtd_oob_region *oobregion)
 173{
 174        struct nand_chip *chip = mtd_to_nand(mtd);
 175        struct nand_ecc_ctrl *ecc = &chip->ecc;
 176        int ecc_offset = 0;
 177
 178        if (section < 0 || section > 1)
 179                return -ERANGE;
 180
 181        switch (mtd->oobsize) {
 182        case 64:
 183                ecc_offset = 40;
 184                break;
 185        case 128:
 186                ecc_offset = 80;
 187                break;
 188        default:
 189                return -EINVAL;
 190        }
 191
 192        if (section == 0) {
 193                oobregion->offset = 2;
 194                oobregion->length = ecc_offset - 2;
 195        } else {
 196                oobregion->offset = ecc_offset + ecc->total;
 197                oobregion->length = mtd->oobsize - oobregion->offset;
 198        }
 199
 200        return 0;
 201}
 202
 203static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
 204        .ecc = nand_ooblayout_ecc_lp_hamming,
 205        .free = nand_ooblayout_free_lp_hamming,
 206};
 207
 208static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
 209{
 210        int ret = 0;
 211
 212        /* Start address must align on block boundary */
 213        if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
 214                pr_debug("%s: unaligned address\n", __func__);
 215                ret = -EINVAL;
 216        }
 217
 218        /* Length must align on block boundary */
 219        if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
 220                pr_debug("%s: length not block aligned\n", __func__);
 221                ret = -EINVAL;
 222        }
 223
 224        return ret;
 225}
 226
 227/**
 228 * nand_select_target() - Select a NAND target (A.K.A. die)
 229 * @chip: NAND chip object
 230 * @cs: the CS line to select. Note that this CS id is always from the chip
 231 *      PoV, not the controller one
 232 *
 233 * Select a NAND target so that further operations executed on @chip go to the
 234 * selected NAND target.
 235 */
 236void nand_select_target(struct nand_chip *chip, unsigned int cs)
 237{
 238        /*
 239         * cs should always lie between 0 and nanddev_ntargets(), when that's
 240         * not the case it's a bug and the caller should be fixed.
 241         */
 242        if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
 243                return;
 244
 245        chip->cur_cs = cs;
 246
 247        if (chip->legacy.select_chip)
 248                chip->legacy.select_chip(chip, cs);
 249}
 250EXPORT_SYMBOL_GPL(nand_select_target);
 251
 252/**
 253 * nand_deselect_target() - Deselect the currently selected target
 254 * @chip: NAND chip object
 255 *
 256 * Deselect the currently selected NAND target. The result of operations
 257 * executed on @chip after the target has been deselected is undefined.
 258 */
 259void nand_deselect_target(struct nand_chip *chip)
 260{
 261        if (chip->legacy.select_chip)
 262                chip->legacy.select_chip(chip, -1);
 263
 264        chip->cur_cs = -1;
 265}
 266EXPORT_SYMBOL_GPL(nand_deselect_target);
 267
 268/**
 269 * nand_release_device - [GENERIC] release chip
 270 * @chip: NAND chip object
 271 *
 272 * Release chip lock and wake up anyone waiting on the device.
 273 */
 274static void nand_release_device(struct nand_chip *chip)
 275{
 276        /* Release the controller and the chip */
 277        mutex_unlock(&chip->controller->lock);
 278        mutex_unlock(&chip->lock);
 279}
 280
 281/**
 282 * nand_bbm_get_next_page - Get the next page for bad block markers
 283 * @chip: NAND chip object
 284 * @page: First page to start checking for bad block marker usage
 285 *
 286 * Returns an integer that corresponds to the page offset within a block, for
 287 * a page that is used to store bad block markers. If no more pages are
 288 * available, -EINVAL is returned.
 289 */
 290int nand_bbm_get_next_page(struct nand_chip *chip, int page)
 291{
 292        struct mtd_info *mtd = nand_to_mtd(chip);
 293        int last_page = ((mtd->erasesize - mtd->writesize) >>
 294                         chip->page_shift) & chip->pagemask;
 295
 296        if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
 297                return 0;
 298        else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
 299                return 1;
 300        else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
 301                return last_page;
 302
 303        return -EINVAL;
 304}
 305
 306/**
 307 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
 308 * @chip: NAND chip object
 309 * @ofs: offset from device start
 310 *
 311 * Check, if the block is bad.
 312 */
 313static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
 314{
 315        int first_page, page_offset;
 316        int res;
 317        u8 bad;
 318
 319        first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
 320        page_offset = nand_bbm_get_next_page(chip, 0);
 321
 322        while (page_offset >= 0) {
 323                res = chip->ecc.read_oob(chip, first_page + page_offset);
 324                if (res < 0)
 325                        return res;
 326
 327                bad = chip->oob_poi[chip->badblockpos];
 328
 329                if (likely(chip->badblockbits == 8))
 330                        res = bad != 0xFF;
 331                else
 332                        res = hweight8(bad) < chip->badblockbits;
 333                if (res)
 334                        return res;
 335
 336                page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
 337        }
 338
 339        return 0;
 340}
 341
 342static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
 343{
 344        if (chip->legacy.block_bad)
 345                return chip->legacy.block_bad(chip, ofs);
 346
 347        return nand_block_bad(chip, ofs);
 348}
 349
 350/**
 351 * nand_get_device - [GENERIC] Get chip for selected access
 352 * @chip: NAND chip structure
 353 *
 354 * Lock the device and its controller for exclusive access
 355 *
 356 * Return: -EBUSY if the chip has been suspended, 0 otherwise
 357 */
 358static int nand_get_device(struct nand_chip *chip)
 359{
 360        mutex_lock(&chip->lock);
 361        if (chip->suspended) {
 362                mutex_unlock(&chip->lock);
 363                return -EBUSY;
 364        }
 365        mutex_lock(&chip->controller->lock);
 366
 367        return 0;
 368}
 369
 370/**
 371 * nand_check_wp - [GENERIC] check if the chip is write protected
 372 * @chip: NAND chip object
 373 *
 374 * Check, if the device is write protected. The function expects, that the
 375 * device is already selected.
 376 */
 377static int nand_check_wp(struct nand_chip *chip)
 378{
 379        u8 status;
 380        int ret;
 381
 382        /* Broken xD cards report WP despite being writable */
 383        if (chip->options & NAND_BROKEN_XD)
 384                return 0;
 385
 386        /* Check the WP bit */
 387        ret = nand_status_op(chip, &status);
 388        if (ret)
 389                return ret;
 390
 391        return status & NAND_STATUS_WP ? 0 : 1;
 392}
 393
 394/**
 395 * nand_fill_oob - [INTERN] Transfer client buffer to oob
 396 * @chip: NAND chip object
 397 * @oob: oob data buffer
 398 * @len: oob data write length
 399 * @ops: oob ops structure
 400 */
 401static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
 402                              struct mtd_oob_ops *ops)
 403{
 404        struct mtd_info *mtd = nand_to_mtd(chip);
 405        int ret;
 406
 407        /*
 408         * Initialise to all 0xFF, to avoid the possibility of left over OOB
 409         * data from a previous OOB read.
 410         */
 411        memset(chip->oob_poi, 0xff, mtd->oobsize);
 412
 413        switch (ops->mode) {
 414
 415        case MTD_OPS_PLACE_OOB:
 416        case MTD_OPS_RAW:
 417                memcpy(chip->oob_poi + ops->ooboffs, oob, len);
 418                return oob + len;
 419
 420        case MTD_OPS_AUTO_OOB:
 421                ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
 422                                                  ops->ooboffs, len);
 423                BUG_ON(ret);
 424                return oob + len;
 425
 426        default:
 427                BUG();
 428        }
 429        return NULL;
 430}
 431
 432/**
 433 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
 434 * @chip: NAND chip object
 435 * @to: offset to write to
 436 * @ops: oob operation description structure
 437 *
 438 * NAND write out-of-band.
 439 */
 440static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
 441                             struct mtd_oob_ops *ops)
 442{
 443        struct mtd_info *mtd = nand_to_mtd(chip);
 444        int chipnr, page, status, len, ret;
 445
 446        pr_debug("%s: to = 0x%08x, len = %i\n",
 447                         __func__, (unsigned int)to, (int)ops->ooblen);
 448
 449        len = mtd_oobavail(mtd, ops);
 450
 451        /* Do not allow write past end of page */
 452        if ((ops->ooboffs + ops->ooblen) > len) {
 453                pr_debug("%s: attempt to write past end of page\n",
 454                                __func__);
 455                return -EINVAL;
 456        }
 457
 458        chipnr = (int)(to >> chip->chip_shift);
 459
 460        /*
 461         * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
 462         * of my DiskOnChip 2000 test units) will clear the whole data page too
 463         * if we don't do this. I have no clue why, but I seem to have 'fixed'
 464         * it in the doc2000 driver in August 1999.  dwmw2.
 465         */
 466        ret = nand_reset(chip, chipnr);
 467        if (ret)
 468                return ret;
 469
 470        nand_select_target(chip, chipnr);
 471
 472        /* Shift to get page */
 473        page = (int)(to >> chip->page_shift);
 474
 475        /* Check, if it is write protected */
 476        if (nand_check_wp(chip)) {
 477                nand_deselect_target(chip);
 478                return -EROFS;
 479        }
 480
 481        /* Invalidate the page cache, if we write to the cached page */
 482        if (page == chip->pagecache.page)
 483                chip->pagecache.page = -1;
 484
 485        nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
 486
 487        if (ops->mode == MTD_OPS_RAW)
 488                status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
 489        else
 490                status = chip->ecc.write_oob(chip, page & chip->pagemask);
 491
 492        nand_deselect_target(chip);
 493
 494        if (status)
 495                return status;
 496
 497        ops->oobretlen = ops->ooblen;
 498
 499        return 0;
 500}
 501
 502/**
 503 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
 504 * @chip: NAND chip object
 505 * @ofs: offset from device start
 506 *
 507 * This is the default implementation, which can be overridden by a hardware
 508 * specific driver. It provides the details for writing a bad block marker to a
 509 * block.
 510 */
 511static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
 512{
 513        struct mtd_info *mtd = nand_to_mtd(chip);
 514        struct mtd_oob_ops ops;
 515        uint8_t buf[2] = { 0, 0 };
 516        int ret = 0, res, page_offset;
 517
 518        memset(&ops, 0, sizeof(ops));
 519        ops.oobbuf = buf;
 520        ops.ooboffs = chip->badblockpos;
 521        if (chip->options & NAND_BUSWIDTH_16) {
 522                ops.ooboffs &= ~0x01;
 523                ops.len = ops.ooblen = 2;
 524        } else {
 525                ops.len = ops.ooblen = 1;
 526        }
 527        ops.mode = MTD_OPS_PLACE_OOB;
 528
 529        page_offset = nand_bbm_get_next_page(chip, 0);
 530
 531        while (page_offset >= 0) {
 532                res = nand_do_write_oob(chip,
 533                                        ofs + (page_offset * mtd->writesize),
 534                                        &ops);
 535
 536                if (!ret)
 537                        ret = res;
 538
 539                page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
 540        }
 541
 542        return ret;
 543}
 544
 545/**
 546 * nand_markbad_bbm - mark a block by updating the BBM
 547 * @chip: NAND chip object
 548 * @ofs: offset of the block to mark bad
 549 */
 550int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
 551{
 552        if (chip->legacy.block_markbad)
 553                return chip->legacy.block_markbad(chip, ofs);
 554
 555        return nand_default_block_markbad(chip, ofs);
 556}
 557
 558/**
 559 * nand_block_markbad_lowlevel - mark a block bad
 560 * @chip: NAND chip object
 561 * @ofs: offset from device start
 562 *
 563 * This function performs the generic NAND bad block marking steps (i.e., bad
 564 * block table(s) and/or marker(s)). We only allow the hardware driver to
 565 * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
 566 *
 567 * We try operations in the following order:
 568 *
 569 *  (1) erase the affected block, to allow OOB marker to be written cleanly
 570 *  (2) write bad block marker to OOB area of affected block (unless flag
 571 *      NAND_BBT_NO_OOB_BBM is present)
 572 *  (3) update the BBT
 573 *
 574 * Note that we retain the first error encountered in (2) or (3), finish the
 575 * procedures, and dump the error in the end.
 576*/
 577static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
 578{
 579        struct mtd_info *mtd = nand_to_mtd(chip);
 580        int res, ret = 0;
 581
 582        if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
 583                struct erase_info einfo;
 584
 585                /* Attempt erase before marking OOB */
 586                memset(&einfo, 0, sizeof(einfo));
 587                einfo.addr = ofs;
 588                einfo.len = 1ULL << chip->phys_erase_shift;
 589                nand_erase_nand(chip, &einfo, 0);
 590
 591                /* Write bad block marker to OOB */
 592                ret = nand_get_device(chip);
 593                if (ret)
 594                        return ret;
 595
 596                ret = nand_markbad_bbm(chip, ofs);
 597                nand_release_device(chip);
 598        }
 599
 600        /* Mark block bad in BBT */
 601        if (chip->bbt) {
 602                res = nand_markbad_bbt(chip, ofs);
 603                if (!ret)
 604                        ret = res;
 605        }
 606
 607        if (!ret)
 608                mtd->ecc_stats.badblocks++;
 609
 610        return ret;
 611}
 612
 613/**
 614 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
 615 * @mtd: MTD device structure
 616 * @ofs: offset from device start
 617 *
 618 * Check if the block is marked as reserved.
 619 */
 620static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
 621{
 622        struct nand_chip *chip = mtd_to_nand(mtd);
 623
 624        if (!chip->bbt)
 625                return 0;
 626        /* Return info from the table */
 627        return nand_isreserved_bbt(chip, ofs);
 628}
 629
 630/**
 631 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
 632 * @chip: NAND chip object
 633 * @ofs: offset from device start
 634 * @allowbbt: 1, if its allowed to access the bbt area
 635 *
 636 * Check, if the block is bad. Either by reading the bad block table or
 637 * calling of the scan function.
 638 */
 639static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
 640{
 641        /* Return info from the table */
 642        if (chip->bbt)
 643                return nand_isbad_bbt(chip, ofs, allowbbt);
 644
 645        return nand_isbad_bbm(chip, ofs);
 646}
 647
 648/**
 649 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
 650 * @chip: NAND chip structure
 651 * @timeout_ms: Timeout in ms
 652 *
 653 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
 654 * If that does not happen whitin the specified timeout, -ETIMEDOUT is
 655 * returned.
 656 *
 657 * This helper is intended to be used when the controller does not have access
 658 * to the NAND R/B pin.
 659 *
 660 * Be aware that calling this helper from an ->exec_op() implementation means
 661 * ->exec_op() must be re-entrant.
 662 *
 663 * Return 0 if the NAND chip is ready, a negative error otherwise.
 664 */
 665int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
 666{
 667        const struct nand_sdr_timings *timings;
 668        u8 status = 0;
 669        int ret;
 670
 671        if (!nand_has_exec_op(chip))
 672                return -ENOTSUPP;
 673
 674        /* Wait tWB before polling the STATUS reg. */
 675        timings = nand_get_sdr_timings(&chip->data_interface);
 676        ndelay(PSEC_TO_NSEC(timings->tWB_max));
 677
 678        ret = nand_status_op(chip, NULL);
 679        if (ret)
 680                return ret;
 681
 682        timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
 683        do {
 684                ret = nand_read_data_op(chip, &status, sizeof(status), true);
 685                if (ret)
 686                        break;
 687
 688                if (status & NAND_STATUS_READY)
 689                        break;
 690
 691                /*
 692                 * Typical lowest execution time for a tR on most NANDs is 10us,
 693                 * use this as polling delay before doing something smarter (ie.
 694                 * deriving a delay from the timeout value, timeout_ms/ratio).
 695                 */
 696                udelay(10);
 697        } while (time_before(jiffies, timeout_ms));
 698
 699        /*
 700         * We have to exit READ_STATUS mode in order to read real data on the
 701         * bus in case the WAITRDY instruction is preceding a DATA_IN
 702         * instruction.
 703         */
 704        nand_exit_status_op(chip);
 705
 706        if (ret)
 707                return ret;
 708
 709        return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
 710};
 711EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
 712
 713/**
 714 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
 715 * @chip: NAND chip structure
 716 * @gpiod: GPIO descriptor of R/B pin
 717 * @timeout_ms: Timeout in ms
 718 *
 719 * Poll the R/B GPIO pin until it becomes ready. If that does not happen
 720 * whitin the specified timeout, -ETIMEDOUT is returned.
 721 *
 722 * This helper is intended to be used when the controller has access to the
 723 * NAND R/B pin over GPIO.
 724 *
 725 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
 726 */
 727int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
 728                      unsigned long timeout_ms)
 729{
 730        /* Wait until R/B pin indicates chip is ready or timeout occurs */
 731        timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
 732        do {
 733                if (gpiod_get_value_cansleep(gpiod))
 734                        return 0;
 735
 736                cond_resched();
 737        } while (time_before(jiffies, timeout_ms));
 738
 739        return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
 740};
 741EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
 742
 743/**
 744 * panic_nand_wait - [GENERIC] wait until the command is done
 745 * @chip: NAND chip structure
 746 * @timeo: timeout
 747 *
 748 * Wait for command done. This is a helper function for nand_wait used when
 749 * we are in interrupt context. May happen when in panic and trying to write
 750 * an oops through mtdoops.
 751 */
 752void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
 753{
 754        int i;
 755        for (i = 0; i < timeo; i++) {
 756                if (chip->legacy.dev_ready) {
 757                        if (chip->legacy.dev_ready(chip))
 758                                break;
 759                } else {
 760                        int ret;
 761                        u8 status;
 762
 763                        ret = nand_read_data_op(chip, &status, sizeof(status),
 764                                                true);
 765                        if (ret)
 766                                return;
 767
 768                        if (status & NAND_STATUS_READY)
 769                                break;
 770                }
 771                mdelay(1);
 772        }
 773}
 774
 775static bool nand_supports_get_features(struct nand_chip *chip, int addr)
 776{
 777        return (chip->parameters.supports_set_get_features &&
 778                test_bit(addr, chip->parameters.get_feature_list));
 779}
 780
 781static bool nand_supports_set_features(struct nand_chip *chip, int addr)
 782{
 783        return (chip->parameters.supports_set_get_features &&
 784                test_bit(addr, chip->parameters.set_feature_list));
 785}
 786
 787/**
 788 * nand_reset_data_interface - Reset data interface and timings
 789 * @chip: The NAND chip
 790 * @chipnr: Internal die id
 791 *
 792 * Reset the Data interface and timings to ONFI mode 0.
 793 *
 794 * Returns 0 for success or negative error code otherwise.
 795 */
 796static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
 797{
 798        int ret;
 799
 800        if (!nand_has_setup_data_iface(chip))
 801                return 0;
 802
 803        /*
 804         * The ONFI specification says:
 805         * "
 806         * To transition from NV-DDR or NV-DDR2 to the SDR data
 807         * interface, the host shall use the Reset (FFh) command
 808         * using SDR timing mode 0. A device in any timing mode is
 809         * required to recognize Reset (FFh) command issued in SDR
 810         * timing mode 0.
 811         * "
 812         *
 813         * Configure the data interface in SDR mode and set the
 814         * timings to timing mode 0.
 815         */
 816
 817        onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
 818        ret = chip->controller->ops->setup_data_interface(chip, chipnr,
 819                                                        &chip->data_interface);
 820        if (ret)
 821                pr_err("Failed to configure data interface to SDR timing mode 0\n");
 822
 823        return ret;
 824}
 825
 826/**
 827 * nand_setup_data_interface - Setup the best data interface and timings
 828 * @chip: The NAND chip
 829 * @chipnr: Internal die id
 830 *
 831 * Find and configure the best data interface and NAND timings supported by
 832 * the chip and the driver.
 833 * First tries to retrieve supported timing modes from ONFI information,
 834 * and if the NAND chip does not support ONFI, relies on the
 835 * ->onfi_timing_mode_default specified in the nand_ids table.
 836 *
 837 * Returns 0 for success or negative error code otherwise.
 838 */
 839static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
 840{
 841        u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
 842                chip->onfi_timing_mode_default,
 843        };
 844        int ret;
 845
 846        if (!nand_has_setup_data_iface(chip))
 847                return 0;
 848
 849        /* Change the mode on the chip side (if supported by the NAND chip) */
 850        if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
 851                nand_select_target(chip, chipnr);
 852                ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
 853                                        tmode_param);
 854                nand_deselect_target(chip);
 855                if (ret)
 856                        return ret;
 857        }
 858
 859        /* Change the mode on the controller side */
 860        ret = chip->controller->ops->setup_data_interface(chip, chipnr,
 861                                                        &chip->data_interface);
 862        if (ret)
 863                return ret;
 864
 865        /* Check the mode has been accepted by the chip, if supported */
 866        if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
 867                return 0;
 868
 869        memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
 870        nand_select_target(chip, chipnr);
 871        ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
 872                                tmode_param);
 873        nand_deselect_target(chip);
 874        if (ret)
 875                goto err_reset_chip;
 876
 877        if (tmode_param[0] != chip->onfi_timing_mode_default) {
 878                pr_warn("timing mode %d not acknowledged by the NAND chip\n",
 879                        chip->onfi_timing_mode_default);
 880                goto err_reset_chip;
 881        }
 882
 883        return 0;
 884
 885err_reset_chip:
 886        /*
 887         * Fallback to mode 0 if the chip explicitly did not ack the chosen
 888         * timing mode.
 889         */
 890        nand_reset_data_interface(chip, chipnr);
 891        nand_select_target(chip, chipnr);
 892        nand_reset_op(chip);
 893        nand_deselect_target(chip);
 894
 895        return ret;
 896}
 897
 898/**
 899 * nand_init_data_interface - find the best data interface and timings
 900 * @chip: The NAND chip
 901 *
 902 * Find the best data interface and NAND timings supported by the chip
 903 * and the driver.
 904 * First tries to retrieve supported timing modes from ONFI information,
 905 * and if the NAND chip does not support ONFI, relies on the
 906 * ->onfi_timing_mode_default specified in the nand_ids table. After this
 907 * function nand_chip->data_interface is initialized with the best timing mode
 908 * available.
 909 *
 910 * Returns 0 for success or negative error code otherwise.
 911 */
 912static int nand_init_data_interface(struct nand_chip *chip)
 913{
 914        int modes, mode, ret;
 915
 916        if (!nand_has_setup_data_iface(chip))
 917                return 0;
 918
 919        /*
 920         * First try to identify the best timings from ONFI parameters and
 921         * if the NAND does not support ONFI, fallback to the default ONFI
 922         * timing mode.
 923         */
 924        if (chip->parameters.onfi) {
 925                modes = chip->parameters.onfi->async_timing_mode;
 926        } else {
 927                if (!chip->onfi_timing_mode_default)
 928                        return 0;
 929
 930                modes = GENMASK(chip->onfi_timing_mode_default, 0);
 931        }
 932
 933        for (mode = fls(modes) - 1; mode >= 0; mode--) {
 934                ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
 935                if (ret)
 936                        continue;
 937
 938                /*
 939                 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
 940                 * controller supports the requested timings.
 941                 */
 942                ret = chip->controller->ops->setup_data_interface(chip,
 943                                                 NAND_DATA_IFACE_CHECK_ONLY,
 944                                                 &chip->data_interface);
 945                if (!ret) {
 946                        chip->onfi_timing_mode_default = mode;
 947                        break;
 948                }
 949        }
 950
 951        return 0;
 952}
 953
 954/**
 955 * nand_fill_column_cycles - fill the column cycles of an address
 956 * @chip: The NAND chip
 957 * @addrs: Array of address cycles to fill
 958 * @offset_in_page: The offset in the page
 959 *
 960 * Fills the first or the first two bytes of the @addrs field depending
 961 * on the NAND bus width and the page size.
 962 *
 963 * Returns the number of cycles needed to encode the column, or a negative
 964 * error code in case one of the arguments is invalid.
 965 */
 966static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
 967                                   unsigned int offset_in_page)
 968{
 969        struct mtd_info *mtd = nand_to_mtd(chip);
 970
 971        /* Make sure the offset is less than the actual page size. */
 972        if (offset_in_page > mtd->writesize + mtd->oobsize)
 973                return -EINVAL;
 974
 975        /*
 976         * On small page NANDs, there's a dedicated command to access the OOB
 977         * area, and the column address is relative to the start of the OOB
 978         * area, not the start of the page. Asjust the address accordingly.
 979         */
 980        if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
 981                offset_in_page -= mtd->writesize;
 982
 983        /*
 984         * The offset in page is expressed in bytes, if the NAND bus is 16-bit
 985         * wide, then it must be divided by 2.
 986         */
 987        if (chip->options & NAND_BUSWIDTH_16) {
 988                if (WARN_ON(offset_in_page % 2))
 989                        return -EINVAL;
 990
 991                offset_in_page /= 2;
 992        }
 993
 994        addrs[0] = offset_in_page;
 995
 996        /*
 997         * Small page NANDs use 1 cycle for the columns, while large page NANDs
 998         * need 2
 999         */
1000        if (mtd->writesize <= 512)
1001                return 1;
1002
1003        addrs[1] = offset_in_page >> 8;
1004
1005        return 2;
1006}
1007
1008static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1009                                     unsigned int offset_in_page, void *buf,
1010                                     unsigned int len)
1011{
1012        struct mtd_info *mtd = nand_to_mtd(chip);
1013        const struct nand_sdr_timings *sdr =
1014                nand_get_sdr_timings(&chip->data_interface);
1015        u8 addrs[4];
1016        struct nand_op_instr instrs[] = {
1017                NAND_OP_CMD(NAND_CMD_READ0, 0),
1018                NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1019                NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1020                                 PSEC_TO_NSEC(sdr->tRR_min)),
1021                NAND_OP_DATA_IN(len, buf, 0),
1022        };
1023        struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1024        int ret;
1025
1026        /* Drop the DATA_IN instruction if len is set to 0. */
1027        if (!len)
1028                op.ninstrs--;
1029
1030        if (offset_in_page >= mtd->writesize)
1031                instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1032        else if (offset_in_page >= 256 &&
1033                 !(chip->options & NAND_BUSWIDTH_16))
1034                instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1035
1036        ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1037        if (ret < 0)
1038                return ret;
1039
1040        addrs[1] = page;
1041        addrs[2] = page >> 8;
1042
1043        if (chip->options & NAND_ROW_ADDR_3) {
1044                addrs[3] = page >> 16;
1045                instrs[1].ctx.addr.naddrs++;
1046        }
1047
1048        return nand_exec_op(chip, &op);
1049}
1050
1051static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1052                                     unsigned int offset_in_page, void *buf,
1053                                     unsigned int len)
1054{
1055        const struct nand_sdr_timings *sdr =
1056                nand_get_sdr_timings(&chip->data_interface);
1057        u8 addrs[5];
1058        struct nand_op_instr instrs[] = {
1059                NAND_OP_CMD(NAND_CMD_READ0, 0),
1060                NAND_OP_ADDR(4, addrs, 0),
1061                NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1062                NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1063                                 PSEC_TO_NSEC(sdr->tRR_min)),
1064                NAND_OP_DATA_IN(len, buf, 0),
1065        };
1066        struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1067        int ret;
1068
1069        /* Drop the DATA_IN instruction if len is set to 0. */
1070        if (!len)
1071                op.ninstrs--;
1072
1073        ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1074        if (ret < 0)
1075                return ret;
1076
1077        addrs[2] = page;
1078        addrs[3] = page >> 8;
1079
1080        if (chip->options & NAND_ROW_ADDR_3) {
1081                addrs[4] = page >> 16;
1082                instrs[1].ctx.addr.naddrs++;
1083        }
1084
1085        return nand_exec_op(chip, &op);
1086}
1087
1088/**
1089 * nand_read_page_op - Do a READ PAGE operation
1090 * @chip: The NAND chip
1091 * @page: page to read
1092 * @offset_in_page: offset within the page
1093 * @buf: buffer used to store the data
1094 * @len: length of the buffer
1095 *
1096 * This function issues a READ PAGE operation.
1097 * This function does not select/unselect the CS line.
1098 *
1099 * Returns 0 on success, a negative error code otherwise.
1100 */
1101int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1102                      unsigned int offset_in_page, void *buf, unsigned int len)
1103{
1104        struct mtd_info *mtd = nand_to_mtd(chip);
1105
1106        if (len && !buf)
1107                return -EINVAL;
1108
1109        if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1110                return -EINVAL;
1111
1112        if (nand_has_exec_op(chip)) {
1113                if (mtd->writesize > 512)
1114                        return nand_lp_exec_read_page_op(chip, page,
1115                                                         offset_in_page, buf,
1116                                                         len);
1117
1118                return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1119                                                 buf, len);
1120        }
1121
1122        chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1123        if (len)
1124                chip->legacy.read_buf(chip, buf, len);
1125
1126        return 0;
1127}
1128EXPORT_SYMBOL_GPL(nand_read_page_op);
1129
1130/**
1131 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1132 * @chip: The NAND chip
1133 * @page: parameter page to read
1134 * @buf: buffer used to store the data
1135 * @len: length of the buffer
1136 *
1137 * This function issues a READ PARAMETER PAGE operation.
1138 * This function does not select/unselect the CS line.
1139 *
1140 * Returns 0 on success, a negative error code otherwise.
1141 */
1142int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1143                            unsigned int len)
1144{
1145        unsigned int i;
1146        u8 *p = buf;
1147
1148        if (len && !buf)
1149                return -EINVAL;
1150
1151        if (nand_has_exec_op(chip)) {
1152                const struct nand_sdr_timings *sdr =
1153                        nand_get_sdr_timings(&chip->data_interface);
1154                struct nand_op_instr instrs[] = {
1155                        NAND_OP_CMD(NAND_CMD_PARAM, 0),
1156                        NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1157                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1158                                         PSEC_TO_NSEC(sdr->tRR_min)),
1159                        NAND_OP_8BIT_DATA_IN(len, buf, 0),
1160                };
1161                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1162
1163                /* Drop the DATA_IN instruction if len is set to 0. */
1164                if (!len)
1165                        op.ninstrs--;
1166
1167                return nand_exec_op(chip, &op);
1168        }
1169
1170        chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1171        for (i = 0; i < len; i++)
1172                p[i] = chip->legacy.read_byte(chip);
1173
1174        return 0;
1175}
1176
1177/**
1178 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1179 * @chip: The NAND chip
1180 * @offset_in_page: offset within the page
1181 * @buf: buffer used to store the data
1182 * @len: length of the buffer
1183 * @force_8bit: force 8-bit bus access
1184 *
1185 * This function issues a CHANGE READ COLUMN operation.
1186 * This function does not select/unselect the CS line.
1187 *
1188 * Returns 0 on success, a negative error code otherwise.
1189 */
1190int nand_change_read_column_op(struct nand_chip *chip,
1191                               unsigned int offset_in_page, void *buf,
1192                               unsigned int len, bool force_8bit)
1193{
1194        struct mtd_info *mtd = nand_to_mtd(chip);
1195
1196        if (len && !buf)
1197                return -EINVAL;
1198
1199        if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1200                return -EINVAL;
1201
1202        /* Small page NANDs do not support column change. */
1203        if (mtd->writesize <= 512)
1204                return -ENOTSUPP;
1205
1206        if (nand_has_exec_op(chip)) {
1207                const struct nand_sdr_timings *sdr =
1208                        nand_get_sdr_timings(&chip->data_interface);
1209                u8 addrs[2] = {};
1210                struct nand_op_instr instrs[] = {
1211                        NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1212                        NAND_OP_ADDR(2, addrs, 0),
1213                        NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1214                                    PSEC_TO_NSEC(sdr->tCCS_min)),
1215                        NAND_OP_DATA_IN(len, buf, 0),
1216                };
1217                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1218                int ret;
1219
1220                ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1221                if (ret < 0)
1222                        return ret;
1223
1224                /* Drop the DATA_IN instruction if len is set to 0. */
1225                if (!len)
1226                        op.ninstrs--;
1227
1228                instrs[3].ctx.data.force_8bit = force_8bit;
1229
1230                return nand_exec_op(chip, &op);
1231        }
1232
1233        chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1234        if (len)
1235                chip->legacy.read_buf(chip, buf, len);
1236
1237        return 0;
1238}
1239EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1240
1241/**
1242 * nand_read_oob_op - Do a READ OOB operation
1243 * @chip: The NAND chip
1244 * @page: page to read
1245 * @offset_in_oob: offset within the OOB area
1246 * @buf: buffer used to store the data
1247 * @len: length of the buffer
1248 *
1249 * This function issues a READ OOB operation.
1250 * This function does not select/unselect the CS line.
1251 *
1252 * Returns 0 on success, a negative error code otherwise.
1253 */
1254int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1255                     unsigned int offset_in_oob, void *buf, unsigned int len)
1256{
1257        struct mtd_info *mtd = nand_to_mtd(chip);
1258
1259        if (len && !buf)
1260                return -EINVAL;
1261
1262        if (offset_in_oob + len > mtd->oobsize)
1263                return -EINVAL;
1264
1265        if (nand_has_exec_op(chip))
1266                return nand_read_page_op(chip, page,
1267                                         mtd->writesize + offset_in_oob,
1268                                         buf, len);
1269
1270        chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1271        if (len)
1272                chip->legacy.read_buf(chip, buf, len);
1273
1274        return 0;
1275}
1276EXPORT_SYMBOL_GPL(nand_read_oob_op);
1277
1278static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1279                                  unsigned int offset_in_page, const void *buf,
1280                                  unsigned int len, bool prog)
1281{
1282        struct mtd_info *mtd = nand_to_mtd(chip);
1283        const struct nand_sdr_timings *sdr =
1284                nand_get_sdr_timings(&chip->data_interface);
1285        u8 addrs[5] = {};
1286        struct nand_op_instr instrs[] = {
1287                /*
1288                 * The first instruction will be dropped if we're dealing
1289                 * with a large page NAND and adjusted if we're dealing
1290                 * with a small page NAND and the page offset is > 255.
1291                 */
1292                NAND_OP_CMD(NAND_CMD_READ0, 0),
1293                NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1294                NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1295                NAND_OP_DATA_OUT(len, buf, 0),
1296                NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1297                NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1298        };
1299        struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1300        int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1301        int ret;
1302        u8 status;
1303
1304        if (naddrs < 0)
1305                return naddrs;
1306
1307        addrs[naddrs++] = page;
1308        addrs[naddrs++] = page >> 8;
1309        if (chip->options & NAND_ROW_ADDR_3)
1310                addrs[naddrs++] = page >> 16;
1311
1312        instrs[2].ctx.addr.naddrs = naddrs;
1313
1314        /* Drop the last two instructions if we're not programming the page. */
1315        if (!prog) {
1316                op.ninstrs -= 2;
1317                /* Also drop the DATA_OUT instruction if empty. */
1318                if (!len)
1319                        op.ninstrs--;
1320        }
1321
1322        if (mtd->writesize <= 512) {
1323                /*
1324                 * Small pages need some more tweaking: we have to adjust the
1325                 * first instruction depending on the page offset we're trying
1326                 * to access.
1327                 */
1328                if (offset_in_page >= mtd->writesize)
1329                        instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1330                else if (offset_in_page >= 256 &&
1331                         !(chip->options & NAND_BUSWIDTH_16))
1332                        instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1333        } else {
1334                /*
1335                 * Drop the first command if we're dealing with a large page
1336                 * NAND.
1337                 */
1338                op.instrs++;
1339                op.ninstrs--;
1340        }
1341
1342        ret = nand_exec_op(chip, &op);
1343        if (!prog || ret)
1344                return ret;
1345
1346        ret = nand_status_op(chip, &status);
1347        if (ret)
1348                return ret;
1349
1350        return status;
1351}
1352
1353/**
1354 * nand_prog_page_begin_op - starts a PROG PAGE operation
1355 * @chip: The NAND chip
1356 * @page: page to write
1357 * @offset_in_page: offset within the page
1358 * @buf: buffer containing the data to write to the page
1359 * @len: length of the buffer
1360 *
1361 * This function issues the first half of a PROG PAGE operation.
1362 * This function does not select/unselect the CS line.
1363 *
1364 * Returns 0 on success, a negative error code otherwise.
1365 */
1366int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1367                            unsigned int offset_in_page, const void *buf,
1368                            unsigned int len)
1369{
1370        struct mtd_info *mtd = nand_to_mtd(chip);
1371
1372        if (len && !buf)
1373                return -EINVAL;
1374
1375        if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1376                return -EINVAL;
1377
1378        if (nand_has_exec_op(chip))
1379                return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1380                                              len, false);
1381
1382        chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1383
1384        if (buf)
1385                chip->legacy.write_buf(chip, buf, len);
1386
1387        return 0;
1388}
1389EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1390
1391/**
1392 * nand_prog_page_end_op - ends a PROG PAGE operation
1393 * @chip: The NAND chip
1394 *
1395 * This function issues the second half of a PROG PAGE operation.
1396 * This function does not select/unselect the CS line.
1397 *
1398 * Returns 0 on success, a negative error code otherwise.
1399 */
1400int nand_prog_page_end_op(struct nand_chip *chip)
1401{
1402        int ret;
1403        u8 status;
1404
1405        if (nand_has_exec_op(chip)) {
1406                const struct nand_sdr_timings *sdr =
1407                        nand_get_sdr_timings(&chip->data_interface);
1408                struct nand_op_instr instrs[] = {
1409                        NAND_OP_CMD(NAND_CMD_PAGEPROG,
1410                                    PSEC_TO_NSEC(sdr->tWB_max)),
1411                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1412                };
1413                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1414
1415                ret = nand_exec_op(chip, &op);
1416                if (ret)
1417                        return ret;
1418
1419                ret = nand_status_op(chip, &status);
1420                if (ret)
1421                        return ret;
1422        } else {
1423                chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1424                ret = chip->legacy.waitfunc(chip);
1425                if (ret < 0)
1426                        return ret;
1427
1428                status = ret;
1429        }
1430
1431        if (status & NAND_STATUS_FAIL)
1432                return -EIO;
1433
1434        return 0;
1435}
1436EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1437
1438/**
1439 * nand_prog_page_op - Do a full PROG PAGE operation
1440 * @chip: The NAND chip
1441 * @page: page to write
1442 * @offset_in_page: offset within the page
1443 * @buf: buffer containing the data to write to the page
1444 * @len: length of the buffer
1445 *
1446 * This function issues a full PROG PAGE operation.
1447 * This function does not select/unselect the CS line.
1448 *
1449 * Returns 0 on success, a negative error code otherwise.
1450 */
1451int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1452                      unsigned int offset_in_page, const void *buf,
1453                      unsigned int len)
1454{
1455        struct mtd_info *mtd = nand_to_mtd(chip);
1456        int status;
1457
1458        if (!len || !buf)
1459                return -EINVAL;
1460
1461        if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1462                return -EINVAL;
1463
1464        if (nand_has_exec_op(chip)) {
1465                status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1466                                                len, true);
1467        } else {
1468                chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1469                                     page);
1470                chip->legacy.write_buf(chip, buf, len);
1471                chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1472                status = chip->legacy.waitfunc(chip);
1473        }
1474
1475        if (status & NAND_STATUS_FAIL)
1476                return -EIO;
1477
1478        return 0;
1479}
1480EXPORT_SYMBOL_GPL(nand_prog_page_op);
1481
1482/**
1483 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1484 * @chip: The NAND chip
1485 * @offset_in_page: offset within the page
1486 * @buf: buffer containing the data to send to the NAND
1487 * @len: length of the buffer
1488 * @force_8bit: force 8-bit bus access
1489 *
1490 * This function issues a CHANGE WRITE COLUMN operation.
1491 * This function does not select/unselect the CS line.
1492 *
1493 * Returns 0 on success, a negative error code otherwise.
1494 */
1495int nand_change_write_column_op(struct nand_chip *chip,
1496                                unsigned int offset_in_page,
1497                                const void *buf, unsigned int len,
1498                                bool force_8bit)
1499{
1500        struct mtd_info *mtd = nand_to_mtd(chip);
1501
1502        if (len && !buf)
1503                return -EINVAL;
1504
1505        if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1506                return -EINVAL;
1507
1508        /* Small page NANDs do not support column change. */
1509        if (mtd->writesize <= 512)
1510                return -ENOTSUPP;
1511
1512        if (nand_has_exec_op(chip)) {
1513                const struct nand_sdr_timings *sdr =
1514                        nand_get_sdr_timings(&chip->data_interface);
1515                u8 addrs[2];
1516                struct nand_op_instr instrs[] = {
1517                        NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1518                        NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1519                        NAND_OP_DATA_OUT(len, buf, 0),
1520                };
1521                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1522                int ret;
1523
1524                ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1525                if (ret < 0)
1526                        return ret;
1527
1528                instrs[2].ctx.data.force_8bit = force_8bit;
1529
1530                /* Drop the DATA_OUT instruction if len is set to 0. */
1531                if (!len)
1532                        op.ninstrs--;
1533
1534                return nand_exec_op(chip, &op);
1535        }
1536
1537        chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1538        if (len)
1539                chip->legacy.write_buf(chip, buf, len);
1540
1541        return 0;
1542}
1543EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1544
1545/**
1546 * nand_readid_op - Do a READID operation
1547 * @chip: The NAND chip
1548 * @addr: address cycle to pass after the READID command
1549 * @buf: buffer used to store the ID
1550 * @len: length of the buffer
1551 *
1552 * This function sends a READID command and reads back the ID returned by the
1553 * NAND.
1554 * This function does not select/unselect the CS line.
1555 *
1556 * Returns 0 on success, a negative error code otherwise.
1557 */
1558int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1559                   unsigned int len)
1560{
1561        unsigned int i;
1562        u8 *id = buf;
1563
1564        if (len && !buf)
1565                return -EINVAL;
1566
1567        if (nand_has_exec_op(chip)) {
1568                const struct nand_sdr_timings *sdr =
1569                        nand_get_sdr_timings(&chip->data_interface);
1570                struct nand_op_instr instrs[] = {
1571                        NAND_OP_CMD(NAND_CMD_READID, 0),
1572                        NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1573                        NAND_OP_8BIT_DATA_IN(len, buf, 0),
1574                };
1575                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1576
1577                /* Drop the DATA_IN instruction if len is set to 0. */
1578                if (!len)
1579                        op.ninstrs--;
1580
1581                return nand_exec_op(chip, &op);
1582        }
1583
1584        chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1585
1586        for (i = 0; i < len; i++)
1587                id[i] = chip->legacy.read_byte(chip);
1588
1589        return 0;
1590}
1591EXPORT_SYMBOL_GPL(nand_readid_op);
1592
1593/**
1594 * nand_status_op - Do a STATUS operation
1595 * @chip: The NAND chip
1596 * @status: out variable to store the NAND status
1597 *
1598 * This function sends a STATUS command and reads back the status returned by
1599 * the NAND.
1600 * This function does not select/unselect the CS line.
1601 *
1602 * Returns 0 on success, a negative error code otherwise.
1603 */
1604int nand_status_op(struct nand_chip *chip, u8 *status)
1605{
1606        if (nand_has_exec_op(chip)) {
1607                const struct nand_sdr_timings *sdr =
1608                        nand_get_sdr_timings(&chip->data_interface);
1609                struct nand_op_instr instrs[] = {
1610                        NAND_OP_CMD(NAND_CMD_STATUS,
1611                                    PSEC_TO_NSEC(sdr->tADL_min)),
1612                        NAND_OP_8BIT_DATA_IN(1, status, 0),
1613                };
1614                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1615
1616                if (!status)
1617                        op.ninstrs--;
1618
1619                return nand_exec_op(chip, &op);
1620        }
1621
1622        chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1623        if (status)
1624                *status = chip->legacy.read_byte(chip);
1625
1626        return 0;
1627}
1628EXPORT_SYMBOL_GPL(nand_status_op);
1629
1630/**
1631 * nand_exit_status_op - Exit a STATUS operation
1632 * @chip: The NAND chip
1633 *
1634 * This function sends a READ0 command to cancel the effect of the STATUS
1635 * command to avoid reading only the status until a new read command is sent.
1636 *
1637 * This function does not select/unselect the CS line.
1638 *
1639 * Returns 0 on success, a negative error code otherwise.
1640 */
1641int nand_exit_status_op(struct nand_chip *chip)
1642{
1643        if (nand_has_exec_op(chip)) {
1644                struct nand_op_instr instrs[] = {
1645                        NAND_OP_CMD(NAND_CMD_READ0, 0),
1646                };
1647                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1648
1649                return nand_exec_op(chip, &op);
1650        }
1651
1652        chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1653
1654        return 0;
1655}
1656
1657/**
1658 * nand_erase_op - Do an erase operation
1659 * @chip: The NAND chip
1660 * @eraseblock: block to erase
1661 *
1662 * This function sends an ERASE command and waits for the NAND to be ready
1663 * before returning.
1664 * This function does not select/unselect the CS line.
1665 *
1666 * Returns 0 on success, a negative error code otherwise.
1667 */
1668int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1669{
1670        unsigned int page = eraseblock <<
1671                            (chip->phys_erase_shift - chip->page_shift);
1672        int ret;
1673        u8 status;
1674
1675        if (nand_has_exec_op(chip)) {
1676                const struct nand_sdr_timings *sdr =
1677                        nand_get_sdr_timings(&chip->data_interface);
1678                u8 addrs[3] = { page, page >> 8, page >> 16 };
1679                struct nand_op_instr instrs[] = {
1680                        NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1681                        NAND_OP_ADDR(2, addrs, 0),
1682                        NAND_OP_CMD(NAND_CMD_ERASE2,
1683                                    PSEC_TO_MSEC(sdr->tWB_max)),
1684                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1685                };
1686                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1687
1688                if (chip->options & NAND_ROW_ADDR_3)
1689                        instrs[1].ctx.addr.naddrs++;
1690
1691                ret = nand_exec_op(chip, &op);
1692                if (ret)
1693                        return ret;
1694
1695                ret = nand_status_op(chip, &status);
1696                if (ret)
1697                        return ret;
1698        } else {
1699                chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1700                chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1701
1702                ret = chip->legacy.waitfunc(chip);
1703                if (ret < 0)
1704                        return ret;
1705
1706                status = ret;
1707        }
1708
1709        if (status & NAND_STATUS_FAIL)
1710                return -EIO;
1711
1712        return 0;
1713}
1714EXPORT_SYMBOL_GPL(nand_erase_op);
1715
1716/**
1717 * nand_set_features_op - Do a SET FEATURES operation
1718 * @chip: The NAND chip
1719 * @feature: feature id
1720 * @data: 4 bytes of data
1721 *
1722 * This function sends a SET FEATURES command and waits for the NAND to be
1723 * ready before returning.
1724 * This function does not select/unselect the CS line.
1725 *
1726 * Returns 0 on success, a negative error code otherwise.
1727 */
1728static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1729                                const void *data)
1730{
1731        const u8 *params = data;
1732        int i, ret;
1733
1734        if (nand_has_exec_op(chip)) {
1735                const struct nand_sdr_timings *sdr =
1736                        nand_get_sdr_timings(&chip->data_interface);
1737                struct nand_op_instr instrs[] = {
1738                        NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1739                        NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1740                        NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1741                                              PSEC_TO_NSEC(sdr->tWB_max)),
1742                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1743                };
1744                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1745
1746                return nand_exec_op(chip, &op);
1747        }
1748
1749        chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1750        for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1751                chip->legacy.write_byte(chip, params[i]);
1752
1753        ret = chip->legacy.waitfunc(chip);
1754        if (ret < 0)
1755                return ret;
1756
1757        if (ret & NAND_STATUS_FAIL)
1758                return -EIO;
1759
1760        return 0;
1761}
1762
1763/**
1764 * nand_get_features_op - Do a GET FEATURES operation
1765 * @chip: The NAND chip
1766 * @feature: feature id
1767 * @data: 4 bytes of data
1768 *
1769 * This function sends a GET FEATURES command and waits for the NAND to be
1770 * ready before returning.
1771 * This function does not select/unselect the CS line.
1772 *
1773 * Returns 0 on success, a negative error code otherwise.
1774 */
1775static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1776                                void *data)
1777{
1778        u8 *params = data;
1779        int i;
1780
1781        if (nand_has_exec_op(chip)) {
1782                const struct nand_sdr_timings *sdr =
1783                        nand_get_sdr_timings(&chip->data_interface);
1784                struct nand_op_instr instrs[] = {
1785                        NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1786                        NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1787                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1788                                         PSEC_TO_NSEC(sdr->tRR_min)),
1789                        NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1790                                             data, 0),
1791                };
1792                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1793
1794                return nand_exec_op(chip, &op);
1795        }
1796
1797        chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1798        for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1799                params[i] = chip->legacy.read_byte(chip);
1800
1801        return 0;
1802}
1803
1804static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1805                            unsigned int delay_ns)
1806{
1807        if (nand_has_exec_op(chip)) {
1808                struct nand_op_instr instrs[] = {
1809                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1810                                         PSEC_TO_NSEC(delay_ns)),
1811                };
1812                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1813
1814                return nand_exec_op(chip, &op);
1815        }
1816
1817        /* Apply delay or wait for ready/busy pin */
1818        if (!chip->legacy.dev_ready)
1819                udelay(chip->legacy.chip_delay);
1820        else
1821                nand_wait_ready(chip);
1822
1823        return 0;
1824}
1825
1826/**
1827 * nand_reset_op - Do a reset operation
1828 * @chip: The NAND chip
1829 *
1830 * This function sends a RESET command and waits for the NAND to be ready
1831 * before returning.
1832 * This function does not select/unselect the CS line.
1833 *
1834 * Returns 0 on success, a negative error code otherwise.
1835 */
1836int nand_reset_op(struct nand_chip *chip)
1837{
1838        if (nand_has_exec_op(chip)) {
1839                const struct nand_sdr_timings *sdr =
1840                        nand_get_sdr_timings(&chip->data_interface);
1841                struct nand_op_instr instrs[] = {
1842                        NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1843                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1844                };
1845                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1846
1847                return nand_exec_op(chip, &op);
1848        }
1849
1850        chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1851
1852        return 0;
1853}
1854EXPORT_SYMBOL_GPL(nand_reset_op);
1855
1856/**
1857 * nand_read_data_op - Read data from the NAND
1858 * @chip: The NAND chip
1859 * @buf: buffer used to store the data
1860 * @len: length of the buffer
1861 * @force_8bit: force 8-bit bus access
1862 *
1863 * This function does a raw data read on the bus. Usually used after launching
1864 * another NAND operation like nand_read_page_op().
1865 * This function does not select/unselect the CS line.
1866 *
1867 * Returns 0 on success, a negative error code otherwise.
1868 */
1869int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1870                      bool force_8bit)
1871{
1872        if (!len || !buf)
1873                return -EINVAL;
1874
1875        if (nand_has_exec_op(chip)) {
1876                struct nand_op_instr instrs[] = {
1877                        NAND_OP_DATA_IN(len, buf, 0),
1878                };
1879                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1880
1881                instrs[0].ctx.data.force_8bit = force_8bit;
1882
1883                return nand_exec_op(chip, &op);
1884        }
1885
1886        if (force_8bit) {
1887                u8 *p = buf;
1888                unsigned int i;
1889
1890                for (i = 0; i < len; i++)
1891                        p[i] = chip->legacy.read_byte(chip);
1892        } else {
1893                chip->legacy.read_buf(chip, buf, len);
1894        }
1895
1896        return 0;
1897}
1898EXPORT_SYMBOL_GPL(nand_read_data_op);
1899
1900/**
1901 * nand_write_data_op - Write data from the NAND
1902 * @chip: The NAND chip
1903 * @buf: buffer containing the data to send on the bus
1904 * @len: length of the buffer
1905 * @force_8bit: force 8-bit bus access
1906 *
1907 * This function does a raw data write on the bus. Usually used after launching
1908 * another NAND operation like nand_write_page_begin_op().
1909 * This function does not select/unselect the CS line.
1910 *
1911 * Returns 0 on success, a negative error code otherwise.
1912 */
1913int nand_write_data_op(struct nand_chip *chip, const void *buf,
1914                       unsigned int len, bool force_8bit)
1915{
1916        if (!len || !buf)
1917                return -EINVAL;
1918
1919        if (nand_has_exec_op(chip)) {
1920                struct nand_op_instr instrs[] = {
1921                        NAND_OP_DATA_OUT(len, buf, 0),
1922                };
1923                struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1924
1925                instrs[0].ctx.data.force_8bit = force_8bit;
1926
1927                return nand_exec_op(chip, &op);
1928        }
1929
1930        if (force_8bit) {
1931                const u8 *p = buf;
1932                unsigned int i;
1933
1934                for (i = 0; i < len; i++)
1935                        chip->legacy.write_byte(chip, p[i]);
1936        } else {
1937                chip->legacy.write_buf(chip, buf, len);
1938        }
1939
1940        return 0;
1941}
1942EXPORT_SYMBOL_GPL(nand_write_data_op);
1943
1944/**
1945 * struct nand_op_parser_ctx - Context used by the parser
1946 * @instrs: array of all the instructions that must be addressed
1947 * @ninstrs: length of the @instrs array
1948 * @subop: Sub-operation to be passed to the NAND controller
1949 *
1950 * This structure is used by the core to split NAND operations into
1951 * sub-operations that can be handled by the NAND controller.
1952 */
1953struct nand_op_parser_ctx {
1954        const struct nand_op_instr *instrs;
1955        unsigned int ninstrs;
1956        struct nand_subop subop;
1957};
1958
1959/**
1960 * nand_op_parser_must_split_instr - Checks if an instruction must be split
1961 * @pat: the parser pattern element that matches @instr
1962 * @instr: pointer to the instruction to check
1963 * @start_offset: this is an in/out parameter. If @instr has already been
1964 *                split, then @start_offset is the offset from which to start
1965 *                (either an address cycle or an offset in the data buffer).
1966 *                Conversely, if the function returns true (ie. instr must be
1967 *                split), this parameter is updated to point to the first
1968 *                data/address cycle that has not been taken care of.
1969 *
1970 * Some NAND controllers are limited and cannot send X address cycles with a
1971 * unique operation, or cannot read/write more than Y bytes at the same time.
1972 * In this case, split the instruction that does not fit in a single
1973 * controller-operation into two or more chunks.
1974 *
1975 * Returns true if the instruction must be split, false otherwise.
1976 * The @start_offset parameter is also updated to the offset at which the next
1977 * bundle of instruction must start (if an address or a data instruction).
1978 */
1979static bool
1980nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1981                                const struct nand_op_instr *instr,
1982                                unsigned int *start_offset)
1983{
1984        switch (pat->type) {
1985        case NAND_OP_ADDR_INSTR:
1986                if (!pat->ctx.addr.maxcycles)
1987                        break;
1988
1989                if (instr->ctx.addr.naddrs - *start_offset >
1990                    pat->ctx.addr.maxcycles) {
1991                        *start_offset += pat->ctx.addr.maxcycles;
1992                        return true;
1993                }
1994                break;
1995
1996        case NAND_OP_DATA_IN_INSTR:
1997        case NAND_OP_DATA_OUT_INSTR:
1998                if (!pat->ctx.data.maxlen)
1999                        break;
2000
2001                if (instr->ctx.data.len - *start_offset >
2002                    pat->ctx.data.maxlen) {
2003                        *start_offset += pat->ctx.data.maxlen;
2004                        return true;
2005                }
2006                break;
2007
2008        default:
2009                break;
2010        }
2011
2012        return false;
2013}
2014
2015/**
2016 * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2017 *                            remaining in the parser context
2018 * @pat: the pattern to test
2019 * @ctx: the parser context structure to match with the pattern @pat
2020 *
2021 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2022 * Returns true if this is the case, false ortherwise. When true is returned,
2023 * @ctx->subop is updated with the set of instructions to be passed to the
2024 * controller driver.
2025 */
2026static bool
2027nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2028                         struct nand_op_parser_ctx *ctx)
2029{
2030        unsigned int instr_offset = ctx->subop.first_instr_start_off;
2031        const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2032        const struct nand_op_instr *instr = ctx->subop.instrs;
2033        unsigned int i, ninstrs;
2034
2035        for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2036                /*
2037                 * The pattern instruction does not match the operation
2038                 * instruction. If the instruction is marked optional in the
2039                 * pattern definition, we skip the pattern element and continue
2040                 * to the next one. If the element is mandatory, there's no
2041                 * match and we can return false directly.
2042                 */
2043                if (instr->type != pat->elems[i].type) {
2044                        if (!pat->elems[i].optional)
2045                                return false;
2046
2047                        continue;
2048                }
2049
2050                /*
2051                 * Now check the pattern element constraints. If the pattern is
2052                 * not able to handle the whole instruction in a single step,
2053                 * we have to split it.
2054                 * The last_instr_end_off value comes back updated to point to
2055                 * the position where we have to split the instruction (the
2056                 * start of the next subop chunk).
2057                 */
2058                if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2059                                                    &instr_offset)) {
2060                        ninstrs++;
2061                        i++;
2062                        break;
2063                }
2064
2065                instr++;
2066                ninstrs++;
2067                instr_offset = 0;
2068        }
2069
2070        /*
2071         * This can happen if all instructions of a pattern are optional.
2072         * Still, if there's not at least one instruction handled by this
2073         * pattern, this is not a match, and we should try the next one (if
2074         * any).
2075         */
2076        if (!ninstrs)
2077                return false;
2078
2079        /*
2080         * We had a match on the pattern head, but the pattern may be longer
2081         * than the instructions we're asked to execute. We need to make sure
2082         * there's no mandatory elements in the pattern tail.
2083         */
2084        for (; i < pat->nelems; i++) {
2085                if (!pat->elems[i].optional)
2086                        return false;
2087        }
2088
2089        /*
2090         * We have a match: update the subop structure accordingly and return
2091         * true.
2092         */
2093        ctx->subop.ninstrs = ninstrs;
2094        ctx->subop.last_instr_end_off = instr_offset;
2095
2096        return true;
2097}
2098
2099#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2100static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2101{
2102        const struct nand_op_instr *instr;
2103        char *prefix = "      ";
2104        unsigned int i;
2105
2106        pr_debug("executing subop:\n");
2107
2108        for (i = 0; i < ctx->ninstrs; i++) {
2109                instr = &ctx->instrs[i];
2110
2111                if (instr == &ctx->subop.instrs[0])
2112                        prefix = "    ->";
2113
2114                nand_op_trace(prefix, instr);
2115
2116                if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2117                        prefix = "      ";
2118        }
2119}
2120#else
2121static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2122{
2123        /* NOP */
2124}
2125#endif
2126
2127static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2128                                  const struct nand_op_parser_ctx *b)
2129{
2130        if (a->subop.ninstrs < b->subop.ninstrs)
2131                return -1;
2132        else if (a->subop.ninstrs > b->subop.ninstrs)
2133                return 1;
2134
2135        if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2136                return -1;
2137        else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2138                return 1;
2139
2140        return 0;
2141}
2142
2143/**
2144 * nand_op_parser_exec_op - exec_op parser
2145 * @chip: the NAND chip
2146 * @parser: patterns description provided by the controller driver
2147 * @op: the NAND operation to address
2148 * @check_only: when true, the function only checks if @op can be handled but
2149 *              does not execute the operation
2150 *
2151 * Helper function designed to ease integration of NAND controller drivers that
2152 * only support a limited set of instruction sequences. The supported sequences
2153 * are described in @parser, and the framework takes care of splitting @op into
2154 * multiple sub-operations (if required) and pass them back to the ->exec()
2155 * callback of the matching pattern if @check_only is set to false.
2156 *
2157 * NAND controller drivers should call this function from their own ->exec_op()
2158 * implementation.
2159 *
2160 * Returns 0 on success, a negative error code otherwise. A failure can be
2161 * caused by an unsupported operation (none of the supported patterns is able
2162 * to handle the requested operation), or an error returned by one of the
2163 * matching pattern->exec() hook.
2164 */
2165int nand_op_parser_exec_op(struct nand_chip *chip,
2166                           const struct nand_op_parser *parser,
2167                           const struct nand_operation *op, bool check_only)
2168{
2169        struct nand_op_parser_ctx ctx = {
2170                .subop.instrs = op->instrs,
2171                .instrs = op->instrs,
2172                .ninstrs = op->ninstrs,
2173        };
2174        unsigned int i;
2175
2176        while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2177                const struct nand_op_parser_pattern *pattern;
2178                struct nand_op_parser_ctx best_ctx;
2179                int ret, best_pattern = -1;
2180
2181                for (i = 0; i < parser->npatterns; i++) {
2182                        struct nand_op_parser_ctx test_ctx = ctx;
2183
2184                        pattern = &parser->patterns[i];
2185                        if (!nand_op_parser_match_pat(pattern, &test_ctx))
2186                                continue;
2187
2188                        if (best_pattern >= 0 &&
2189                            nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2190                                continue;
2191
2192                        best_pattern = i;
2193                        best_ctx = test_ctx;
2194                }
2195
2196                if (best_pattern < 0) {
2197                        pr_debug("->exec_op() parser: pattern not found!\n");
2198                        return -ENOTSUPP;
2199                }
2200
2201                ctx = best_ctx;
2202                nand_op_parser_trace(&ctx);
2203
2204                if (!check_only) {
2205                        pattern = &parser->patterns[best_pattern];
2206                        ret = pattern->exec(chip, &ctx.subop);
2207                        if (ret)
2208                                return ret;
2209                }
2210
2211                /*
2212                 * Update the context structure by pointing to the start of the
2213                 * next subop.
2214                 */
2215                ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2216                if (ctx.subop.last_instr_end_off)
2217                        ctx.subop.instrs -= 1;
2218
2219                ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2220        }
2221
2222        return 0;
2223}
2224EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2225
2226static bool nand_instr_is_data(const struct nand_op_instr *instr)
2227{
2228        return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2229                         instr->type == NAND_OP_DATA_OUT_INSTR);
2230}
2231
2232static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2233                                      unsigned int instr_idx)
2234{
2235        return subop && instr_idx < subop->ninstrs;
2236}
2237
2238static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2239                                             unsigned int instr_idx)
2240{
2241        if (instr_idx)
2242                return 0;
2243
2244        return subop->first_instr_start_off;
2245}
2246
2247/**
2248 * nand_subop_get_addr_start_off - Get the start offset in an address array
2249 * @subop: The entire sub-operation
2250 * @instr_idx: Index of the instruction inside the sub-operation
2251 *
2252 * During driver development, one could be tempted to directly use the
2253 * ->addr.addrs field of address instructions. This is wrong as address
2254 * instructions might be split.
2255 *
2256 * Given an address instruction, returns the offset of the first cycle to issue.
2257 */
2258unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2259                                           unsigned int instr_idx)
2260{
2261        if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2262                    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2263                return 0;
2264
2265        return nand_subop_get_start_off(subop, instr_idx);
2266}
2267EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2268
2269/**
2270 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2271 * @subop: The entire sub-operation
2272 * @instr_idx: Index of the instruction inside the sub-operation
2273 *
2274 * During driver development, one could be tempted to directly use the
2275 * ->addr->naddrs field of a data instruction. This is wrong as instructions
2276 * might be split.
2277 *
2278 * Given an address instruction, returns the number of address cycle to issue.
2279 */
2280unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2281                                         unsigned int instr_idx)
2282{
2283        int start_off, end_off;
2284
2285        if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2286                    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2287                return 0;
2288
2289        start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2290
2291        if (instr_idx == subop->ninstrs - 1 &&
2292            subop->last_instr_end_off)
2293                end_off = subop->last_instr_end_off;
2294        else
2295                end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2296
2297        return end_off - start_off;
2298}
2299EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2300
2301/**
2302 * nand_subop_get_data_start_off - Get the start offset in a data array
2303 * @subop: The entire sub-operation
2304 * @instr_idx: Index of the instruction inside the sub-operation
2305 *
2306 * During driver development, one could be tempted to directly use the
2307 * ->data->buf.{in,out} field of data instructions. This is wrong as data
2308 * instructions might be split.
2309 *
2310 * Given a data instruction, returns the offset to start from.
2311 */
2312unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2313                                           unsigned int instr_idx)
2314{
2315        if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2316                    !nand_instr_is_data(&subop->instrs[instr_idx])))
2317                return 0;
2318
2319        return nand_subop_get_start_off(subop, instr_idx);
2320}
2321EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2322
2323/**
2324 * nand_subop_get_data_len - Get the number of bytes to retrieve
2325 * @subop: The entire sub-operation
2326 * @instr_idx: Index of the instruction inside the sub-operation
2327 *
2328 * During driver development, one could be tempted to directly use the
2329 * ->data->len field of a data instruction. This is wrong as data instructions
2330 * might be split.
2331 *
2332 * Returns the length of the chunk of data to send/receive.
2333 */
2334unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2335                                     unsigned int instr_idx)
2336{
2337        int start_off = 0, end_off;
2338
2339        if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2340                    !nand_instr_is_data(&subop->instrs[instr_idx])))
2341                return 0;
2342
2343        start_off = nand_subop_get_data_start_off(subop, instr_idx);
2344
2345        if (instr_idx == subop->ninstrs - 1 &&
2346            subop->last_instr_end_off)
2347                end_off = subop->last_instr_end_off;
2348        else
2349                end_off = subop->instrs[instr_idx].ctx.data.len;
2350
2351        return end_off - start_off;
2352}
2353EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2354
2355/**
2356 * nand_reset - Reset and initialize a NAND device
2357 * @chip: The NAND chip
2358 * @chipnr: Internal die id
2359 *
2360 * Save the timings data structure, then apply SDR timings mode 0 (see
2361 * nand_reset_data_interface for details), do the reset operation, and
2362 * apply back the previous timings.
2363 *
2364 * Returns 0 on success, a negative error code otherwise.
2365 */
2366int nand_reset(struct nand_chip *chip, int chipnr)
2367{
2368        struct nand_data_interface saved_data_intf = chip->data_interface;
2369        int ret;
2370
2371        ret = nand_reset_data_interface(chip, chipnr);
2372        if (ret)
2373                return ret;
2374
2375        /*
2376         * The CS line has to be released before we can apply the new NAND
2377         * interface settings, hence this weird nand_select_target()
2378         * nand_deselect_target() dance.
2379         */
2380        nand_select_target(chip, chipnr);
2381        ret = nand_reset_op(chip);
2382        nand_deselect_target(chip);
2383        if (ret)
2384                return ret;
2385
2386        /*
2387         * A nand_reset_data_interface() put both the NAND chip and the NAND
2388         * controller in timings mode 0. If the default mode for this chip is
2389         * also 0, no need to proceed to the change again. Plus, at probe time,
2390         * nand_setup_data_interface() uses ->set/get_features() which would
2391         * fail anyway as the parameter page is not available yet.
2392         */
2393        if (!chip->onfi_timing_mode_default)
2394                return 0;
2395
2396        chip->data_interface = saved_data_intf;
2397        ret = nand_setup_data_interface(chip, chipnr);
2398        if (ret)
2399                return ret;
2400
2401        return 0;
2402}
2403EXPORT_SYMBOL_GPL(nand_reset);
2404
2405/**
2406 * nand_get_features - wrapper to perform a GET_FEATURE
2407 * @chip: NAND chip info structure
2408 * @addr: feature address
2409 * @subfeature_param: the subfeature parameters, a four bytes array
2410 *
2411 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2412 * operation cannot be handled.
2413 */
2414int nand_get_features(struct nand_chip *chip, int addr,
2415                      u8 *subfeature_param)
2416{
2417        if (!nand_supports_get_features(chip, addr))
2418                return -ENOTSUPP;
2419
2420        if (chip->legacy.get_features)
2421                return chip->legacy.get_features(chip, addr, subfeature_param);
2422
2423        return nand_get_features_op(chip, addr, subfeature_param);
2424}
2425
2426/**
2427 * nand_set_features - wrapper to perform a SET_FEATURE
2428 * @chip: NAND chip info structure
2429 * @addr: feature address
2430 * @subfeature_param: the subfeature parameters, a four bytes array
2431 *
2432 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2433 * operation cannot be handled.
2434 */
2435int nand_set_features(struct nand_chip *chip, int addr,
2436                      u8 *subfeature_param)
2437{
2438        if (!nand_supports_set_features(chip, addr))
2439                return -ENOTSUPP;
2440
2441        if (chip->legacy.set_features)
2442                return chip->legacy.set_features(chip, addr, subfeature_param);
2443
2444        return nand_set_features_op(chip, addr, subfeature_param);
2445}
2446
2447/**
2448 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2449 * @buf: buffer to test
2450 * @len: buffer length
2451 * @bitflips_threshold: maximum number of bitflips
2452 *
2453 * Check if a buffer contains only 0xff, which means the underlying region
2454 * has been erased and is ready to be programmed.
2455 * The bitflips_threshold specify the maximum number of bitflips before
2456 * considering the region is not erased.
2457 * Note: The logic of this function has been extracted from the memweight
2458 * implementation, except that nand_check_erased_buf function exit before
2459 * testing the whole buffer if the number of bitflips exceed the
2460 * bitflips_threshold value.
2461 *
2462 * Returns a positive number of bitflips less than or equal to
2463 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2464 * threshold.
2465 */
2466static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2467{
2468        const unsigned char *bitmap = buf;
2469        int bitflips = 0;
2470        int weight;
2471
2472        for (; len && ((uintptr_t)bitmap) % sizeof(long);
2473             len--, bitmap++) {
2474                weight = hweight8(*bitmap);
2475                bitflips += BITS_PER_BYTE - weight;
2476                if (unlikely(bitflips > bitflips_threshold))
2477                        return -EBADMSG;
2478        }
2479
2480        for (; len >= sizeof(long);
2481             len -= sizeof(long), bitmap += sizeof(long)) {
2482                unsigned long d = *((unsigned long *)bitmap);
2483                if (d == ~0UL)
2484                        continue;
2485                weight = hweight_long(d);
2486                bitflips += BITS_PER_LONG - weight;
2487                if (unlikely(bitflips > bitflips_threshold))
2488                        return -EBADMSG;
2489        }
2490
2491        for (; len > 0; len--, bitmap++) {
2492                weight = hweight8(*bitmap);
2493                bitflips += BITS_PER_BYTE - weight;
2494                if (unlikely(bitflips > bitflips_threshold))
2495                        return -EBADMSG;
2496        }
2497
2498        return bitflips;
2499}
2500
2501/**
2502 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2503 *                               0xff data
2504 * @data: data buffer to test
2505 * @datalen: data length
2506 * @ecc: ECC buffer
2507 * @ecclen: ECC length
2508 * @extraoob: extra OOB buffer
2509 * @extraooblen: extra OOB length
2510 * @bitflips_threshold: maximum number of bitflips
2511 *
2512 * Check if a data buffer and its associated ECC and OOB data contains only
2513 * 0xff pattern, which means the underlying region has been erased and is
2514 * ready to be programmed.
2515 * The bitflips_threshold specify the maximum number of bitflips before
2516 * considering the region as not erased.
2517 *
2518 * Note:
2519 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2520 *    different from the NAND page size. When fixing bitflips, ECC engines will
2521 *    report the number of errors per chunk, and the NAND core infrastructure
2522 *    expect you to return the maximum number of bitflips for the whole page.
2523 *    This is why you should always use this function on a single chunk and
2524 *    not on the whole page. After checking each chunk you should update your
2525 *    max_bitflips value accordingly.
2526 * 2/ When checking for bitflips in erased pages you should not only check
2527 *    the payload data but also their associated ECC data, because a user might
2528 *    have programmed almost all bits to 1 but a few. In this case, we
2529 *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2530 *    this case.
2531 * 3/ The extraoob argument is optional, and should be used if some of your OOB
2532 *    data are protected by the ECC engine.
2533 *    It could also be used if you support subpages and want to attach some
2534 *    extra OOB data to an ECC chunk.
2535 *
2536 * Returns a positive number of bitflips less than or equal to
2537 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2538 * threshold. In case of success, the passed buffers are filled with 0xff.
2539 */
2540int nand_check_erased_ecc_chunk(void *data, int datalen,
2541                                void *ecc, int ecclen,
2542                                void *extraoob, int extraooblen,
2543                                int bitflips_threshold)
2544{
2545        int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2546
2547        data_bitflips = nand_check_erased_buf(data, datalen,
2548                                              bitflips_threshold);
2549        if (data_bitflips < 0)
2550                return data_bitflips;
2551
2552        bitflips_threshold -= data_bitflips;
2553
2554        ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2555        if (ecc_bitflips < 0)
2556                return ecc_bitflips;
2557
2558        bitflips_threshold -= ecc_bitflips;
2559
2560        extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2561                                                  bitflips_threshold);
2562        if (extraoob_bitflips < 0)
2563                return extraoob_bitflips;
2564
2565        if (data_bitflips)
2566                memset(data, 0xff, datalen);
2567
2568        if (ecc_bitflips)
2569                memset(ecc, 0xff, ecclen);
2570
2571        if (extraoob_bitflips)
2572                memset(extraoob, 0xff, extraooblen);
2573
2574        return data_bitflips + ecc_bitflips + extraoob_bitflips;
2575}
2576EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2577
2578/**
2579 * nand_read_page_raw_notsupp - dummy read raw page function
2580 * @chip: nand chip info structure
2581 * @buf: buffer to store read data
2582 * @oob_required: caller requires OOB data read to chip->oob_poi
2583 * @page: page number to read
2584 *
2585 * Returns -ENOTSUPP unconditionally.
2586 */
2587int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2588                               int oob_required, int page)
2589{
2590        return -ENOTSUPP;
2591}
2592
2593/**
2594 * nand_read_page_raw - [INTERN] read raw page data without ecc
2595 * @chip: nand chip info structure
2596 * @buf: buffer to store read data
2597 * @oob_required: caller requires OOB data read to chip->oob_poi
2598 * @page: page number to read
2599 *
2600 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2601 */
2602int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2603                       int page)
2604{
2605        struct mtd_info *mtd = nand_to_mtd(chip);
2606        int ret;
2607
2608        ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2609        if (ret)
2610                return ret;
2611
2612        if (oob_required) {
2613                ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2614                                        false);
2615                if (ret)
2616                        return ret;
2617        }
2618
2619        return 0;
2620}
2621EXPORT_SYMBOL(nand_read_page_raw);
2622
2623/**
2624 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2625 * @chip: nand chip info structure
2626 * @buf: buffer to store read data
2627 * @oob_required: caller requires OOB data read to chip->oob_poi
2628 * @page: page number to read
2629 *
2630 * We need a special oob layout and handling even when OOB isn't used.
2631 */
2632static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2633                                       int oob_required, int page)
2634{
2635        struct mtd_info *mtd = nand_to_mtd(chip);
2636        int eccsize = chip->ecc.size;
2637        int eccbytes = chip->ecc.bytes;
2638        uint8_t *oob = chip->oob_poi;
2639        int steps, size, ret;
2640
2641        ret = nand_read_page_op(chip, page, 0, NULL, 0);
2642        if (ret)
2643                return ret;
2644
2645        for (steps = chip->ecc.steps; steps > 0; steps--) {
2646                ret = nand_read_data_op(chip, buf, eccsize, false);
2647                if (ret)
2648                        return ret;
2649
2650                buf += eccsize;
2651
2652                if (chip->ecc.prepad) {
2653                        ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2654                                                false);
2655                        if (ret)
2656                                return ret;
2657
2658                        oob += chip->ecc.prepad;
2659                }
2660
2661                ret = nand_read_data_op(chip, oob, eccbytes, false);
2662                if (ret)
2663                        return ret;
2664
2665                oob += eccbytes;
2666
2667                if (chip->ecc.postpad) {
2668                        ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2669                                                false);
2670                        if (ret)
2671                                return ret;
2672
2673                        oob += chip->ecc.postpad;
2674                }
2675        }
2676
2677        size = mtd->oobsize - (oob - chip->oob_poi);
2678        if (size) {
2679                ret = nand_read_data_op(chip, oob, size, false);
2680                if (ret)
2681                        return ret;
2682        }
2683
2684        return 0;
2685}
2686
2687/**
2688 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2689 * @chip: nand chip info structure
2690 * @buf: buffer to store read data
2691 * @oob_required: caller requires OOB data read to chip->oob_poi
2692 * @page: page number to read
2693 */
2694static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2695                                int oob_required, int page)
2696{
2697        struct mtd_info *mtd = nand_to_mtd(chip);
2698        int i, eccsize = chip->ecc.size, ret;
2699        int eccbytes = chip->ecc.bytes;
2700        int eccsteps = chip->ecc.steps;
2701        uint8_t *p = buf;
2702        uint8_t *ecc_calc = chip->ecc.calc_buf;
2703        uint8_t *ecc_code = chip->ecc.code_buf;
2704        unsigned int max_bitflips = 0;
2705
2706        chip->ecc.read_page_raw(chip, buf, 1, page);
2707
2708        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2709                chip->ecc.calculate(chip, p, &ecc_calc[i]);
2710
2711        ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2712                                         chip->ecc.total);
2713        if (ret)
2714                return ret;
2715
2716        eccsteps = chip->ecc.steps;
2717        p = buf;
2718
2719        for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2720                int stat;
2721
2722                stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2723                if (stat < 0) {
2724                        mtd->ecc_stats.failed++;
2725                } else {
2726                        mtd->ecc_stats.corrected += stat;
2727                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
2728                }
2729        }
2730        return max_bitflips;
2731}
2732
2733/**
2734 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2735 * @chip: nand chip info structure
2736 * @data_offs: offset of requested data within the page
2737 * @readlen: data length
2738 * @bufpoi: buffer to store read data
2739 * @page: page number to read
2740 */
2741static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2742                             uint32_t readlen, uint8_t *bufpoi, int page)
2743{
2744        struct mtd_info *mtd = nand_to_mtd(chip);
2745        int start_step, end_step, num_steps, ret;
2746        uint8_t *p;
2747        int data_col_addr, i, gaps = 0;
2748        int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2749        int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2750        int index, section = 0;
2751        unsigned int max_bitflips = 0;
2752        struct mtd_oob_region oobregion = { };
2753
2754        /* Column address within the page aligned to ECC size (256bytes) */
2755        start_step = data_offs / chip->ecc.size;
2756        end_step = (data_offs + readlen - 1) / chip->ecc.size;
2757        num_steps = end_step - start_step + 1;
2758        index = start_step * chip->ecc.bytes;
2759
2760        /* Data size aligned to ECC ecc.size */
2761        datafrag_len = num_steps * chip->ecc.size;
2762        eccfrag_len = num_steps * chip->ecc.bytes;
2763
2764        data_col_addr = start_step * chip->ecc.size;
2765        /* If we read not a page aligned data */
2766        p = bufpoi + data_col_addr;
2767        ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2768        if (ret)
2769                return ret;
2770
2771        /* Calculate ECC */
2772        for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2773                chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2774
2775        /*
2776         * The performance is faster if we position offsets according to
2777         * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2778         */
2779        ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2780        if (ret)
2781                return ret;
2782
2783        if (oobregion.length < eccfrag_len)
2784                gaps = 1;
2785
2786        if (gaps) {
2787                ret = nand_change_read_column_op(chip, mtd->writesize,
2788                                                 chip->oob_poi, mtd->oobsize,
2789                                                 false);
2790                if (ret)
2791                        return ret;
2792        } else {
2793                /*
2794                 * Send the command to read the particular ECC bytes take care
2795                 * about buswidth alignment in read_buf.
2796                 */
2797                aligned_pos = oobregion.offset & ~(busw - 1);
2798                aligned_len = eccfrag_len;
2799                if (oobregion.offset & (busw - 1))
2800                        aligned_len++;
2801                if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2802                    (busw - 1))
2803                        aligned_len++;
2804
2805                ret = nand_change_read_column_op(chip,
2806                                                 mtd->writesize + aligned_pos,
2807                                                 &chip->oob_poi[aligned_pos],
2808                                                 aligned_len, false);
2809                if (ret)
2810                        return ret;
2811        }
2812
2813        ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2814                                         chip->oob_poi, index, eccfrag_len);
2815        if (ret)
2816                return ret;
2817
2818        p = bufpoi + data_col_addr;
2819        for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2820                int stat;
2821
2822                stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2823                                         &chip->ecc.calc_buf[i]);
2824                if (stat == -EBADMSG &&
2825                    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2826                        /* check for empty pages with bitflips */
2827                        stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2828                                                &chip->ecc.code_buf[i],
2829                                                chip->ecc.bytes,
2830                                                NULL, 0,
2831                                                chip->ecc.strength);
2832                }
2833
2834                if (stat < 0) {
2835                        mtd->ecc_stats.failed++;
2836                } else {
2837                        mtd->ecc_stats.corrected += stat;
2838                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
2839                }
2840        }
2841        return max_bitflips;
2842}
2843
2844/**
2845 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2846 * @chip: nand chip info structure
2847 * @buf: buffer to store read data
2848 * @oob_required: caller requires OOB data read to chip->oob_poi
2849 * @page: page number to read
2850 *
2851 * Not for syndrome calculating ECC controllers which need a special oob layout.
2852 */
2853static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2854                                int oob_required, int page)
2855{
2856        struct mtd_info *mtd = nand_to_mtd(chip);
2857        int i, eccsize = chip->ecc.size, ret;
2858        int eccbytes = chip->ecc.bytes;
2859        int eccsteps = chip->ecc.steps;
2860        uint8_t *p = buf;
2861        uint8_t *ecc_calc = chip->ecc.calc_buf;
2862        uint8_t *ecc_code = chip->ecc.code_buf;
2863        unsigned int max_bitflips = 0;
2864
2865        ret = nand_read_page_op(chip, page, 0, NULL, 0);
2866        if (ret)
2867                return ret;
2868
2869        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2870                chip->ecc.hwctl(chip, NAND_ECC_READ);
2871
2872                ret = nand_read_data_op(chip, p, eccsize, false);
2873                if (ret)
2874                        return ret;
2875
2876                chip->ecc.calculate(chip, p, &ecc_calc[i]);
2877        }
2878
2879        ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2880        if (ret)
2881                return ret;
2882
2883        ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2884                                         chip->ecc.total);
2885        if (ret)
2886                return ret;
2887
2888        eccsteps = chip->ecc.steps;
2889        p = buf;
2890
2891        for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2892                int stat;
2893
2894                stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2895                if (stat == -EBADMSG &&
2896                    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2897                        /* check for empty pages with bitflips */
2898                        stat = nand_check_erased_ecc_chunk(p, eccsize,
2899                                                &ecc_code[i], eccbytes,
2900                                                NULL, 0,
2901                                                chip->ecc.strength);
2902                }
2903
2904                if (stat < 0) {
2905                        mtd->ecc_stats.failed++;
2906                } else {
2907                        mtd->ecc_stats.corrected += stat;
2908                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
2909                }
2910        }
2911        return max_bitflips;
2912}
2913
2914/**
2915 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2916 * @chip: nand chip info structure
2917 * @buf: buffer to store read data
2918 * @oob_required: caller requires OOB data read to chip->oob_poi
2919 * @page: page number to read
2920 *
2921 * Hardware ECC for large page chips, require OOB to be read first. For this
2922 * ECC mode, the write_page method is re-used from ECC_HW. These methods
2923 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2924 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2925 * the data area, by overwriting the NAND manufacturer bad block markings.
2926 */
2927static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2928                                          int oob_required, int page)
2929{
2930        struct mtd_info *mtd = nand_to_mtd(chip);
2931        int i, eccsize = chip->ecc.size, ret;
2932        int eccbytes = chip->ecc.bytes;
2933        int eccsteps = chip->ecc.steps;
2934        uint8_t *p = buf;
2935        uint8_t *ecc_code = chip->ecc.code_buf;
2936        uint8_t *ecc_calc = chip->ecc.calc_buf;
2937        unsigned int max_bitflips = 0;
2938
2939        /* Read the OOB area first */
2940        ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2941        if (ret)
2942                return ret;
2943
2944        ret = nand_read_page_op(chip, page, 0, NULL, 0);
2945        if (ret)
2946                return ret;
2947
2948        ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2949                                         chip->ecc.total);
2950        if (ret)
2951                return ret;
2952
2953        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2954                int stat;
2955
2956                chip->ecc.hwctl(chip, NAND_ECC_READ);
2957
2958                ret = nand_read_data_op(chip, p, eccsize, false);
2959                if (ret)
2960                        return ret;
2961
2962                chip->ecc.calculate(chip, p, &ecc_calc[i]);
2963
2964                stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2965                if (stat == -EBADMSG &&
2966                    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2967                        /* check for empty pages with bitflips */
2968                        stat = nand_check_erased_ecc_chunk(p, eccsize,
2969                                                &ecc_code[i], eccbytes,
2970                                                NULL, 0,
2971                                                chip->ecc.strength);
2972                }
2973
2974                if (stat < 0) {
2975                        mtd->ecc_stats.failed++;
2976                } else {
2977                        mtd->ecc_stats.corrected += stat;
2978                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
2979                }
2980        }
2981        return max_bitflips;
2982}
2983
2984/**
2985 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2986 * @chip: nand chip info structure
2987 * @buf: buffer to store read data
2988 * @oob_required: caller requires OOB data read to chip->oob_poi
2989 * @page: page number to read
2990 *
2991 * The hw generator calculates the error syndrome automatically. Therefore we
2992 * need a special oob layout and handling.
2993 */
2994static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2995                                   int oob_required, int page)
2996{
2997        struct mtd_info *mtd = nand_to_mtd(chip);
2998        int ret, i, eccsize = chip->ecc.size;
2999        int eccbytes = chip->ecc.bytes;
3000        int eccsteps = chip->ecc.steps;
3001        int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3002        uint8_t *p = buf;
3003        uint8_t *oob = chip->oob_poi;
3004        unsigned int max_bitflips = 0;
3005
3006        ret = nand_read_page_op(chip, page, 0, NULL, 0);
3007        if (ret)
3008                return ret;
3009
3010        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3011                int stat;
3012
3013                chip->ecc.hwctl(chip, NAND_ECC_READ);
3014
3015                ret = nand_read_data_op(chip, p, eccsize, false);
3016                if (ret)
3017                        return ret;
3018
3019                if (chip->ecc.prepad) {
3020                        ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3021                                                false);
3022                        if (ret)
3023                                return ret;
3024
3025                        oob += chip->ecc.prepad;
3026                }
3027
3028                chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3029
3030                ret = nand_read_data_op(chip, oob, eccbytes, false);
3031                if (ret)
3032                        return ret;
3033
3034                stat = chip->ecc.correct(chip, p, oob, NULL);
3035
3036                oob += eccbytes;
3037
3038                if (chip->ecc.postpad) {
3039                        ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3040                                                false);
3041                        if (ret)
3042                                return ret;
3043
3044                        oob += chip->ecc.postpad;
3045                }
3046
3047                if (stat == -EBADMSG &&
3048                    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3049                        /* check for empty pages with bitflips */
3050                        stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3051                                                           oob - eccpadbytes,
3052                                                           eccpadbytes,
3053                                                           NULL, 0,
3054                                                           chip->ecc.strength);
3055                }
3056
3057                if (stat < 0) {
3058                        mtd->ecc_stats.failed++;
3059                } else {
3060                        mtd->ecc_stats.corrected += stat;
3061                        max_bitflips = max_t(unsigned int, max_bitflips, stat);
3062                }
3063        }
3064
3065        /* Calculate remaining oob bytes */
3066        i = mtd->oobsize - (oob - chip->oob_poi);
3067        if (i) {
3068                ret = nand_read_data_op(chip, oob, i, false);
3069                if (ret)
3070                        return ret;
3071        }
3072
3073        return max_bitflips;
3074}
3075
3076/**
3077 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3078 * @chip: NAND chip object
3079 * @oob: oob destination address
3080 * @ops: oob ops structure
3081 * @len: size of oob to transfer
3082 */
3083static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3084                                  struct mtd_oob_ops *ops, size_t len)
3085{
3086        struct mtd_info *mtd = nand_to_mtd(chip);
3087        int ret;
3088
3089        switch (ops->mode) {
3090
3091        case MTD_OPS_PLACE_OOB:
3092        case MTD_OPS_RAW:
3093                memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3094                return oob + len;
3095
3096        case MTD_OPS_AUTO_OOB:
3097                ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3098                                                  ops->ooboffs, len);
3099                BUG_ON(ret);
3100                return oob + len;
3101
3102        default:
3103                BUG();
3104        }
3105        return NULL;
3106}
3107
3108/**
3109 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3110 * @chip: NAND chip object
3111 * @retry_mode: the retry mode to use
3112 *
3113 * Some vendors supply a special command to shift the Vt threshold, to be used
3114 * when there are too many bitflips in a page (i.e., ECC error). After setting
3115 * a new threshold, the host should retry reading the page.
3116 */
3117static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3118{
3119        pr_debug("setting READ RETRY mode %d\n", retry_mode);
3120
3121        if (retry_mode >= chip->read_retries)
3122                return -EINVAL;
3123
3124        if (!chip->setup_read_retry)
3125                return -EOPNOTSUPP;
3126
3127        return chip->setup_read_retry(chip, retry_mode);
3128}
3129
3130static void nand_wait_readrdy(struct nand_chip *chip)
3131{
3132        const struct nand_sdr_timings *sdr;
3133
3134        if (!(chip->options & NAND_NEED_READRDY))
3135                return;
3136
3137        sdr = nand_get_sdr_timings(&chip->data_interface);
3138        WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3139}
3140
3141/**
3142 * nand_do_read_ops - [INTERN] Read data with ECC
3143 * @chip: NAND chip object
3144 * @from: offset to read from
3145 * @ops: oob ops structure
3146 *
3147 * Internal function. Called with chip held.
3148 */
3149static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3150                            struct mtd_oob_ops *ops)
3151{
3152        int chipnr, page, realpage, col, bytes, aligned, oob_required;
3153        struct mtd_info *mtd = nand_to_mtd(chip);
3154        int ret = 0;
3155        uint32_t readlen = ops->len;
3156        uint32_t oobreadlen = ops->ooblen;
3157        uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3158
3159        uint8_t *bufpoi, *oob, *buf;
3160        int use_bufpoi;
3161        unsigned int max_bitflips = 0;
3162        int retry_mode = 0;
3163        bool ecc_fail = false;
3164
3165        chipnr = (int)(from >> chip->chip_shift);
3166        nand_select_target(chip, chipnr);
3167
3168        realpage = (int)(from >> chip->page_shift);
3169        page = realpage & chip->pagemask;
3170
3171        col = (int)(from & (mtd->writesize - 1));
3172
3173        buf = ops->datbuf;
3174        oob = ops->oobbuf;
3175        oob_required = oob ? 1 : 0;
3176
3177        while (1) {
3178                unsigned int ecc_failures = mtd->ecc_stats.failed;
3179
3180                bytes = min(mtd->writesize - col, readlen);
3181                aligned = (bytes == mtd->writesize);
3182
3183                if (!aligned)
3184                        use_bufpoi = 1;
3185                else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3186                        use_bufpoi = !virt_addr_valid(buf) ||
3187                                     !IS_ALIGNED((unsigned long)buf,
3188                                                 chip->buf_align);
3189                else
3190                        use_bufpoi = 0;
3191
3192                /* Is the current page in the buffer? */
3193                if (realpage != chip->pagecache.page || oob) {
3194                        bufpoi = use_bufpoi ? chip->data_buf : buf;
3195
3196                        if (use_bufpoi && aligned)
3197                                pr_debug("%s: using read bounce buffer for buf@%p\n",
3198                                                 __func__, buf);
3199
3200read_retry:
3201                        /*
3202                         * Now read the page into the buffer.  Absent an error,
3203                         * the read methods return max bitflips per ecc step.
3204                         */
3205                        if (unlikely(ops->mode == MTD_OPS_RAW))
3206                                ret = chip->ecc.read_page_raw(chip, bufpoi,
3207                                                              oob_required,
3208                                                              page);
3209                        else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3210                                 !oob)
3211                                ret = chip->ecc.read_subpage(chip, col, bytes,
3212                                                             bufpoi, page);
3213                        else
3214                                ret = chip->ecc.read_page(chip, bufpoi,
3215                                                          oob_required, page);
3216                        if (ret < 0) {
3217                                if (use_bufpoi)
3218                                        /* Invalidate page cache */
3219                                        chip->pagecache.page = -1;
3220                                break;
3221                        }
3222
3223                        /* Transfer not aligned data */
3224                        if (use_bufpoi) {
3225                                if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3226                                    !(mtd->ecc_stats.failed - ecc_failures) &&
3227                                    (ops->mode != MTD_OPS_RAW)) {
3228                                        chip->pagecache.page = realpage;
3229                                        chip->pagecache.bitflips = ret;
3230                                } else {
3231                                        /* Invalidate page cache */
3232                                        chip->pagecache.page = -1;
3233                                }
3234                                memcpy(buf, chip->data_buf + col, bytes);
3235                        }
3236
3237                        if (unlikely(oob)) {
3238                                int toread = min(oobreadlen, max_oobsize);
3239
3240                                if (toread) {
3241                                        oob = nand_transfer_oob(chip, oob, ops,
3242                                                                toread);
3243                                        oobreadlen -= toread;
3244                                }
3245                        }
3246
3247                        nand_wait_readrdy(chip);
3248
3249                        if (mtd->ecc_stats.failed - ecc_failures) {
3250                                if (retry_mode + 1 < chip->read_retries) {
3251                                        retry_mode++;
3252                                        ret = nand_setup_read_retry(chip,
3253                                                        retry_mode);
3254                                        if (ret < 0)
3255                                                break;
3256
3257                                        /* Reset failures; retry */
3258                                        mtd->ecc_stats.failed = ecc_failures;
3259                                        goto read_retry;
3260                                } else {
3261                                        /* No more retry modes; real failure */
3262                                        ecc_fail = true;
3263                                }
3264                        }
3265
3266                        buf += bytes;
3267                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
3268                } else {
3269                        memcpy(buf, chip->data_buf + col, bytes);
3270                        buf += bytes;
3271                        max_bitflips = max_t(unsigned int, max_bitflips,
3272                                             chip->pagecache.bitflips);
3273                }
3274
3275                readlen -= bytes;
3276
3277                /* Reset to retry mode 0 */
3278                if (retry_mode) {
3279                        ret = nand_setup_read_retry(chip, 0);
3280                        if (ret < 0)
3281                                break;
3282                        retry_mode = 0;
3283                }
3284
3285                if (!readlen)
3286                        break;
3287
3288                /* For subsequent reads align to page boundary */
3289                col = 0;
3290                /* Increment page address */
3291                realpage++;
3292
3293                page = realpage & chip->pagemask;
3294                /* Check, if we cross a chip boundary */
3295                if (!page) {
3296                        chipnr++;
3297                        nand_deselect_target(chip);
3298                        nand_select_target(chip, chipnr);
3299                }
3300        }
3301        nand_deselect_target(chip);
3302
3303        ops->retlen = ops->len - (size_t) readlen;
3304        if (oob)
3305                ops->oobretlen = ops->ooblen - oobreadlen;
3306
3307        if (ret < 0)
3308                return ret;
3309
3310        if (ecc_fail)
3311                return -EBADMSG;
3312
3313        return max_bitflips;
3314}
3315
3316/**
3317 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3318 * @chip: nand chip info structure
3319 * @page: page number to read
3320 */
3321int nand_read_oob_std(struct nand_chip *chip, int page)
3322{
3323        struct mtd_info *mtd = nand_to_mtd(chip);
3324
3325        return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3326}
3327EXPORT_SYMBOL(nand_read_oob_std);
3328
3329/**
3330 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3331 *                          with syndromes
3332 * @chip: nand chip info structure
3333 * @page: page number to read
3334 */
3335static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3336{
3337        struct mtd_info *mtd = nand_to_mtd(chip);
3338        int length = mtd->oobsize;
3339        int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3340        int eccsize = chip->ecc.size;
3341        uint8_t *bufpoi = chip->oob_poi;
3342        int i, toread, sndrnd = 0, pos, ret;
3343
3344        ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3345        if (ret)
3346                return ret;
3347
3348        for (i = 0; i < chip->ecc.steps; i++) {
3349                if (sndrnd) {
3350                        int ret;
3351
3352                        pos = eccsize + i * (eccsize + chunk);
3353                        if (mtd->writesize > 512)
3354                                ret = nand_change_read_column_op(chip, pos,
3355                                                                 NULL, 0,
3356                                                                 false);
3357                        else
3358                                ret = nand_read_page_op(chip, page, pos, NULL,
3359                                                        0);
3360
3361                        if (ret)
3362                                return ret;
3363                } else
3364                        sndrnd = 1;
3365                toread = min_t(int, length, chunk);
3366
3367                ret = nand_read_data_op(chip, bufpoi, toread, false);
3368                if (ret)
3369                        return ret;
3370
3371                bufpoi += toread;
3372                length -= toread;
3373        }
3374        if (length > 0) {
3375                ret = nand_read_data_op(chip, bufpoi, length, false);
3376                if (ret)
3377                        return ret;
3378        }
3379
3380        return 0;
3381}
3382
3383/**
3384 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3385 * @chip: nand chip info structure
3386 * @page: page number to write
3387 */
3388int nand_write_oob_std(struct nand_chip *chip, int page)
3389{
3390        struct mtd_info *mtd = nand_to_mtd(chip);
3391
3392        return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3393                                 mtd->oobsize);
3394}
3395EXPORT_SYMBOL(nand_write_oob_std);
3396
3397/**
3398 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3399 *                           with syndrome - only for large page flash
3400 * @chip: nand chip info structure
3401 * @page: page number to write
3402 */
3403static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3404{
3405        struct mtd_info *mtd = nand_to_mtd(chip);
3406        int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3407        int eccsize = chip->ecc.size, length = mtd->oobsize;
3408        int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3409        const uint8_t *bufpoi = chip->oob_poi;
3410
3411        /*
3412         * data-ecc-data-ecc ... ecc-oob
3413         * or
3414         * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3415         */
3416        if (!chip->ecc.prepad && !chip->ecc.postpad) {
3417                pos = steps * (eccsize + chunk);
3418                steps = 0;
3419        } else
3420                pos = eccsize;
3421
3422        ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3423        if (ret)
3424                return ret;
3425
3426        for (i = 0; i < steps; i++) {
3427                if (sndcmd) {
3428                        if (mtd->writesize <= 512) {
3429                                uint32_t fill = 0xFFFFFFFF;
3430
3431                                len = eccsize;
3432                                while (len > 0) {
3433                                        int num = min_t(int, len, 4);
3434
3435                                        ret = nand_write_data_op(chip, &fill,
3436                                                                 num, false);
3437                                        if (ret)
3438                                                return ret;
3439
3440                                        len -= num;
3441                                }
3442                        } else {
3443                                pos = eccsize + i * (eccsize + chunk);
3444                                ret = nand_change_write_column_op(chip, pos,
3445                                                                  NULL, 0,
3446                                                                  false);
3447                                if (ret)
3448                                        return ret;
3449                        }
3450                } else
3451                        sndcmd = 1;
3452                len = min_t(int, length, chunk);
3453
3454                ret = nand_write_data_op(chip, bufpoi, len, false);
3455                if (ret)
3456                        return ret;
3457
3458                bufpoi += len;
3459                length -= len;
3460        }
3461        if (length > 0) {
3462                ret = nand_write_data_op(chip, bufpoi, length, false);
3463                if (ret)
3464                        return ret;
3465        }
3466
3467        return nand_prog_page_end_op(chip);
3468}
3469
3470/**
3471 * nand_do_read_oob - [INTERN] NAND read out-of-band
3472 * @chip: NAND chip object
3473 * @from: offset to read from
3474 * @ops: oob operations description structure
3475 *
3476 * NAND read out-of-band data from the spare area.
3477 */
3478static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3479                            struct mtd_oob_ops *ops)
3480{
3481        struct mtd_info *mtd = nand_to_mtd(chip);
3482        unsigned int max_bitflips = 0;
3483        int page, realpage, chipnr;
3484        struct mtd_ecc_stats stats;
3485        int readlen = ops->ooblen;
3486        int len;
3487        uint8_t *buf = ops->oobbuf;
3488        int ret = 0;
3489
3490        pr_debug("%s: from = 0x%08Lx, len = %i\n",
3491                        __func__, (unsigned long long)from, readlen);
3492
3493        stats = mtd->ecc_stats;
3494
3495        len = mtd_oobavail(mtd, ops);
3496
3497        chipnr = (int)(from >> chip->chip_shift);
3498        nand_select_target(chip, chipnr);
3499
3500        /* Shift to get page */
3501        realpage = (int)(from >> chip->page_shift);
3502        page = realpage & chip->pagemask;
3503
3504        while (1) {
3505                if (ops->mode == MTD_OPS_RAW)
3506                        ret = chip->ecc.read_oob_raw(chip, page);
3507                else
3508                        ret = chip->ecc.read_oob(chip, page);
3509
3510                if (ret < 0)
3511                        break;
3512
3513                len = min(len, readlen);
3514                buf = nand_transfer_oob(chip, buf, ops, len);
3515
3516                nand_wait_readrdy(chip);
3517
3518                max_bitflips = max_t(unsigned int, max_bitflips, ret);
3519
3520                readlen -= len;
3521                if (!readlen)
3522                        break;
3523
3524                /* Increment page address */
3525                realpage++;
3526
3527                page = realpage & chip->pagemask;
3528                /* Check, if we cross a chip boundary */
3529                if (!page) {
3530                        chipnr++;
3531                        nand_deselect_target(chip);
3532                        nand_select_target(chip, chipnr);
3533                }
3534        }
3535        nand_deselect_target(chip);
3536
3537        ops->oobretlen = ops->ooblen - readlen;
3538
3539        if (ret < 0)
3540                return ret;
3541
3542        if (mtd->ecc_stats.failed - stats.failed)
3543                return -EBADMSG;
3544
3545        return max_bitflips;
3546}
3547
3548/**
3549 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3550 * @mtd: MTD device structure
3551 * @from: offset to read from
3552 * @ops: oob operation description structure
3553 *
3554 * NAND read data and/or out-of-band data.
3555 */
3556static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3557                         struct mtd_oob_ops *ops)
3558{
3559        struct nand_chip *chip = mtd_to_nand(mtd);
3560        int ret;
3561
3562        ops->retlen = 0;
3563
3564        if (ops->mode != MTD_OPS_PLACE_OOB &&
3565            ops->mode != MTD_OPS_AUTO_OOB &&
3566            ops->mode != MTD_OPS_RAW)
3567                return -ENOTSUPP;
3568
3569        ret = nand_get_device(chip);
3570        if (ret)
3571                return ret;
3572
3573        if (!ops->datbuf)
3574                ret = nand_do_read_oob(chip, from, ops);
3575        else
3576                ret = nand_do_read_ops(chip, from, ops);
3577
3578        nand_release_device(chip);
3579        return ret;
3580}
3581
3582/**
3583 * nand_write_page_raw_notsupp - dummy raw page write function
3584 * @chip: nand chip info structure
3585 * @buf: data buffer
3586 * @oob_required: must write chip->oob_poi to OOB
3587 * @page: page number to write
3588 *
3589 * Returns -ENOTSUPP unconditionally.
3590 */
3591int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3592                                int oob_required, int page)
3593{
3594        return -ENOTSUPP;
3595}
3596
3597/**
3598 * nand_write_page_raw - [INTERN] raw page write function
3599 * @chip: nand chip info structure
3600 * @buf: data buffer
3601 * @oob_required: must write chip->oob_poi to OOB
3602 * @page: page number to write
3603 *
3604 * Not for syndrome calculating ECC controllers, which use a special oob layout.
3605 */
3606int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3607                        int oob_required, int page)
3608{
3609        struct mtd_info *mtd = nand_to_mtd(chip);
3610        int ret;
3611
3612        ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3613        if (ret)
3614                return ret;
3615
3616        if (oob_required) {
3617                ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3618                                         false);
3619                if (ret)
3620                        return ret;
3621        }
3622
3623        return nand_prog_page_end_op(chip);
3624}
3625EXPORT_SYMBOL(nand_write_page_raw);
3626
3627/**
3628 * nand_write_page_raw_syndrome - [INTERN] raw page write function
3629 * @chip: nand chip info structure
3630 * @buf: data buffer
3631 * @oob_required: must write chip->oob_poi to OOB
3632 * @page: page number to write
3633 *
3634 * We need a special oob layout and handling even when ECC isn't checked.
3635 */
3636static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3637                                        const uint8_t *buf, int oob_required,
3638                                        int page)
3639{
3640        struct mtd_info *mtd = nand_to_mtd(chip);
3641        int eccsize = chip->ecc.size;
3642        int eccbytes = chip->ecc.bytes;
3643        uint8_t *oob = chip->oob_poi;
3644        int steps, size, ret;
3645
3646        ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3647        if (ret)
3648                return ret;
3649
3650        for (steps = chip->ecc.steps; steps > 0; steps--) {
3651                ret = nand_write_data_op(chip, buf, eccsize, false);
3652                if (ret)
3653                        return ret;
3654
3655                buf += eccsize;
3656
3657                if (chip->ecc.prepad) {
3658                        ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3659                                                 false);
3660                        if (ret)
3661                                return ret;
3662
3663                        oob += chip->ecc.prepad;
3664                }
3665
3666                ret = nand_write_data_op(chip, oob, eccbytes, false);
3667                if (ret)
3668                        return ret;
3669
3670                oob += eccbytes;
3671
3672                if (chip->ecc.postpad) {
3673                        ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3674                                                 false);
3675                        if (ret)
3676                                return ret;
3677
3678                        oob += chip->ecc.postpad;
3679                }
3680        }
3681
3682        size = mtd->oobsize - (oob - chip->oob_poi);
3683        if (size) {
3684                ret = nand_write_data_op(chip, oob, size, false);
3685                if (ret)
3686                        return ret;
3687        }
3688
3689        return nand_prog_page_end_op(chip);
3690}
3691/**
3692 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3693 * @chip: nand chip info structure
3694 * @buf: data buffer
3695 * @oob_required: must write chip->oob_poi to OOB
3696 * @page: page number to write
3697 */
3698static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3699                                 int oob_required, int page)
3700{
3701        struct mtd_info *mtd = nand_to_mtd(chip);
3702        int i, eccsize = chip->ecc.size, ret;
3703        int eccbytes = chip->ecc.bytes;
3704        int eccsteps = chip->ecc.steps;
3705        uint8_t *ecc_calc = chip->ecc.calc_buf;
3706        const uint8_t *p = buf;
3707
3708        /* Software ECC calculation */
3709        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3710                chip->ecc.calculate(chip, p, &ecc_calc[i]);
3711
3712        ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3713                                         chip->ecc.total);
3714        if (ret)
3715                return ret;
3716
3717        return chip->ecc.write_page_raw(chip, buf, 1, page);
3718}
3719
3720/**
3721 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3722 * @chip: nand chip info structure
3723 * @buf: data buffer
3724 * @oob_required: must write chip->oob_poi to OOB
3725 * @page: page number to write
3726 */
3727static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3728                                 int oob_required, int page)
3729{
3730        struct mtd_info *mtd = nand_to_mtd(chip);
3731        int i, eccsize = chip->ecc.size, ret;
3732        int eccbytes = chip->ecc.bytes;
3733        int eccsteps = chip->ecc.steps;
3734        uint8_t *ecc_calc = chip->ecc.calc_buf;
3735        const uint8_t *p = buf;
3736
3737        ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3738        if (ret)
3739                return ret;
3740
3741        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3742                chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3743
3744                ret = nand_write_data_op(chip, p, eccsize, false);
3745                if (ret)
3746                        return ret;
3747
3748                chip->ecc.calculate(chip, p, &ecc_calc[i]);
3749        }
3750
3751        ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3752                                         chip->ecc.total);
3753        if (ret)
3754                return ret;
3755
3756        ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3757        if (ret)
3758                return ret;
3759
3760        return nand_prog_page_end_op(chip);
3761}
3762
3763
3764/**
3765 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3766 * @chip:       nand chip info structure
3767 * @offset:     column address of subpage within the page
3768 * @data_len:   data length
3769 * @buf:        data buffer
3770 * @oob_required: must write chip->oob_poi to OOB
3771 * @page: page number to write
3772 */
3773static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3774                                    uint32_t data_len, const uint8_t *buf,
3775                                    int oob_required, int page)
3776{
3777        struct mtd_info *mtd = nand_to_mtd(chip);
3778        uint8_t *oob_buf  = chip->oob_poi;
3779        uint8_t *ecc_calc = chip->ecc.calc_buf;
3780        int ecc_size      = chip->ecc.size;
3781        int ecc_bytes     = chip->ecc.bytes;
3782        int ecc_steps     = chip->ecc.steps;
3783        uint32_t start_step = offset / ecc_size;
3784        uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3785        int oob_bytes       = mtd->oobsize / ecc_steps;
3786        int step, ret;
3787
3788        ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3789        if (ret)
3790                return ret;
3791
3792        for (step = 0; step < ecc_steps; step++) {
3793                /* configure controller for WRITE access */
3794                chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3795
3796                /* write data (untouched subpages already masked by 0xFF) */
3797                ret = nand_write_data_op(chip, buf, ecc_size, false);
3798                if (ret)
3799                        return ret;
3800
3801                /* mask ECC of un-touched subpages by padding 0xFF */
3802                if ((step < start_step) || (step > end_step))
3803                        memset(ecc_calc, 0xff, ecc_bytes);
3804                else
3805                        chip->ecc.calculate(chip, buf, ecc_calc);
3806
3807                /* mask OOB of un-touched subpages by padding 0xFF */
3808                /* if oob_required, preserve OOB metadata of written subpage */
3809                if (!oob_required || (step < start_step) || (step > end_step))
3810                        memset(oob_buf, 0xff, oob_bytes);
3811
3812                buf += ecc_size;
3813                ecc_calc += ecc_bytes;
3814                oob_buf  += oob_bytes;
3815        }
3816
3817        /* copy calculated ECC for whole page to chip->buffer->oob */
3818        /* this include masked-value(0xFF) for unwritten subpages */
3819        ecc_calc = chip->ecc.calc_buf;
3820        ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3821                                         chip->ecc.total);
3822        if (ret)
3823                return ret;
3824
3825        /* write OOB buffer to NAND device */
3826        ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3827        if (ret)
3828                return ret;
3829
3830        return nand_prog_page_end_op(chip);
3831}
3832
3833
3834/**
3835 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3836 * @chip: nand chip info structure
3837 * @buf: data buffer
3838 * @oob_required: must write chip->oob_poi to OOB
3839 * @page: page number to write
3840 *
3841 * The hw generator calculates the error syndrome automatically. Therefore we
3842 * need a special oob layout and handling.
3843 */
3844static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3845                                    int oob_required, int page)
3846{
3847        struct mtd_info *mtd = nand_to_mtd(chip);
3848        int i, eccsize = chip->ecc.size;
3849        int eccbytes = chip->ecc.bytes;
3850        int eccsteps = chip->ecc.steps;
3851        const uint8_t *p = buf;
3852        uint8_t *oob = chip->oob_poi;
3853        int ret;
3854
3855        ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3856        if (ret)
3857                return ret;
3858
3859        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3860                chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3861
3862                ret = nand_write_data_op(chip, p, eccsize, false);
3863                if (ret)
3864                        return ret;
3865
3866                if (chip->ecc.prepad) {
3867                        ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3868                                                 false);
3869                        if (ret)
3870                                return ret;
3871
3872                        oob += chip->ecc.prepad;
3873                }
3874
3875                chip->ecc.calculate(chip, p, oob);
3876
3877                ret = nand_write_data_op(chip, oob, eccbytes, false);
3878                if (ret)
3879                        return ret;
3880
3881                oob += eccbytes;
3882
3883                if (chip->ecc.postpad) {
3884                        ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3885                                                 false);
3886                        if (ret)
3887                                return ret;
3888
3889                        oob += chip->ecc.postpad;
3890                }
3891        }
3892
3893        /* Calculate remaining oob bytes */
3894        i = mtd->oobsize - (oob - chip->oob_poi);
3895        if (i) {
3896                ret = nand_write_data_op(chip, oob, i, false);
3897                if (ret)
3898                        return ret;
3899        }
3900
3901        return nand_prog_page_end_op(chip);
3902}
3903
3904/**
3905 * nand_write_page - write one page
3906 * @chip: NAND chip descriptor
3907 * @offset: address offset within the page
3908 * @data_len: length of actual data to be written
3909 * @buf: the data to write
3910 * @oob_required: must write chip->oob_poi to OOB
3911 * @page: page number to write
3912 * @raw: use _raw version of write_page
3913 */
3914static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3915                           int data_len, const uint8_t *buf, int oob_required,
3916                           int page, int raw)
3917{
3918        struct mtd_info *mtd = nand_to_mtd(chip);
3919        int status, subpage;
3920
3921        if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3922                chip->ecc.write_subpage)
3923                subpage = offset || (data_len < mtd->writesize);
3924        else
3925                subpage = 0;
3926
3927        if (unlikely(raw))
3928                status = chip->ecc.write_page_raw(chip, buf, oob_required,
3929                                                  page);
3930        else if (subpage)
3931                status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3932                                                 oob_required, page);
3933        else
3934                status = chip->ecc.write_page(chip, buf, oob_required, page);
3935
3936        if (status < 0)
3937                return status;
3938
3939        return 0;
3940}
3941
3942#define NOTALIGNED(x)   ((x & (chip->subpagesize - 1)) != 0)
3943
3944/**
3945 * nand_do_write_ops - [INTERN] NAND write with ECC
3946 * @chip: NAND chip object
3947 * @to: offset to write to
3948 * @ops: oob operations description structure
3949 *
3950 * NAND write with ECC.
3951 */
3952static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3953                             struct mtd_oob_ops *ops)
3954{
3955        struct mtd_info *mtd = nand_to_mtd(chip);
3956        int chipnr, realpage, page, column;
3957        uint32_t writelen = ops->len;
3958
3959        uint32_t oobwritelen = ops->ooblen;
3960        uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3961
3962        uint8_t *oob = ops->oobbuf;
3963        uint8_t *buf = ops->datbuf;
3964        int ret;
3965        int oob_required = oob ? 1 : 0;
3966
3967        ops->retlen = 0;
3968        if (!writelen)
3969                return 0;
3970
3971        /* Reject writes, which are not page aligned */
3972        if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3973                pr_notice("%s: attempt to write non page aligned data\n",
3974                           __func__);
3975                return -EINVAL;
3976        }
3977
3978        column = to & (mtd->writesize - 1);
3979
3980        chipnr = (int)(to >> chip->chip_shift);
3981        nand_select_target(chip, chipnr);
3982
3983        /* Check, if it is write protected */
3984        if (nand_check_wp(chip)) {
3985                ret = -EIO;
3986                goto err_out;
3987        }
3988
3989        realpage = (int)(to >> chip->page_shift);
3990        page = realpage & chip->pagemask;
3991
3992        /* Invalidate the page cache, when we write to the cached page */
3993        if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3994            ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3995                chip->pagecache.page = -1;
3996
3997        /* Don't allow multipage oob writes with offset */
3998        if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
3999                ret = -EINVAL;
4000                goto err_out;
4001        }
4002
4003        while (1) {
4004                int bytes = mtd->writesize;
4005                uint8_t *wbuf = buf;
4006                int use_bufpoi;
4007                int part_pagewr = (column || writelen < mtd->writesize);
4008
4009                if (part_pagewr)
4010                        use_bufpoi = 1;
4011                else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4012                        use_bufpoi = !virt_addr_valid(buf) ||
4013                                     !IS_ALIGNED((unsigned long)buf,
4014                                                 chip->buf_align);
4015                else
4016                        use_bufpoi = 0;
4017
4018                /* Partial page write?, or need to use bounce buffer */
4019                if (use_bufpoi) {
4020                        pr_debug("%s: using write bounce buffer for buf@%p\n",
4021                                         __func__, buf);
4022                        if (part_pagewr)
4023                                bytes = min_t(int, bytes - column, writelen);
4024                        wbuf = nand_get_data_buf(chip);
4025                        memset(wbuf, 0xff, mtd->writesize);
4026                        memcpy(&wbuf[column], buf, bytes);
4027                }
4028
4029                if (unlikely(oob)) {
4030                        size_t len = min(oobwritelen, oobmaxlen);
4031                        oob = nand_fill_oob(chip, oob, len, ops);
4032                        oobwritelen -= len;
4033                } else {
4034                        /* We still need to erase leftover OOB data */
4035                        memset(chip->oob_poi, 0xff, mtd->oobsize);
4036                }
4037
4038                ret = nand_write_page(chip, column, bytes, wbuf,
4039                                      oob_required, page,
4040                                      (ops->mode == MTD_OPS_RAW));
4041                if (ret)
4042                        break;
4043
4044                writelen -= bytes;
4045                if (!writelen)
4046                        break;
4047
4048                column = 0;
4049                buf += bytes;
4050                realpage++;
4051
4052                page = realpage & chip->pagemask;
4053                /* Check, if we cross a chip boundary */
4054                if (!page) {
4055                        chipnr++;
4056                        nand_deselect_target(chip);
4057                        nand_select_target(chip, chipnr);
4058                }
4059        }
4060
4061        ops->retlen = ops->len - writelen;
4062        if (unlikely(oob))
4063                ops->oobretlen = ops->ooblen;
4064
4065err_out:
4066        nand_deselect_target(chip);
4067        return ret;
4068}
4069
4070/**
4071 * panic_nand_write - [MTD Interface] NAND write with ECC
4072 * @mtd: MTD device structure
4073 * @to: offset to write to
4074 * @len: number of bytes to write
4075 * @retlen: pointer to variable to store the number of written bytes
4076 * @buf: the data to write
4077 *
4078 * NAND write with ECC. Used when performing writes in interrupt context, this
4079 * may for example be called by mtdoops when writing an oops while in panic.
4080 */
4081static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4082                            size_t *retlen, const uint8_t *buf)
4083{
4084        struct nand_chip *chip = mtd_to_nand(mtd);
4085        int chipnr = (int)(to >> chip->chip_shift);
4086        struct mtd_oob_ops ops;
4087        int ret;
4088
4089        nand_select_target(chip, chipnr);
4090
4091        /* Wait for the device to get ready */
4092        panic_nand_wait(chip, 400);
4093
4094        memset(&ops, 0, sizeof(ops));
4095        ops.len = len;
4096        ops.datbuf = (uint8_t *)buf;
4097        ops.mode = MTD_OPS_PLACE_OOB;
4098
4099        ret = nand_do_write_ops(chip, to, &ops);
4100
4101        *retlen = ops.retlen;
4102        return ret;
4103}
4104
4105/**
4106 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4107 * @mtd: MTD device structure
4108 * @to: offset to write to
4109 * @ops: oob operation description structure
4110 */
4111static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4112                          struct mtd_oob_ops *ops)
4113{
4114        struct nand_chip *chip = mtd_to_nand(mtd);
4115        int ret = -ENOTSUPP;
4116
4117        ops->retlen = 0;
4118
4119        ret = nand_get_device(chip);
4120        if (ret)
4121                return ret;
4122
4123        switch (ops->mode) {
4124        case MTD_OPS_PLACE_OOB:
4125        case MTD_OPS_AUTO_OOB:
4126        case MTD_OPS_RAW:
4127                break;
4128
4129        default:
4130                goto out;
4131        }
4132
4133        if (!ops->datbuf)
4134                ret = nand_do_write_oob(chip, to, ops);
4135        else
4136                ret = nand_do_write_ops(chip, to, ops);
4137
4138out:
4139        nand_release_device(chip);
4140        return ret;
4141}
4142
4143/**
4144 * nand_erase - [MTD Interface] erase block(s)
4145 * @mtd: MTD device structure
4146 * @instr: erase instruction
4147 *
4148 * Erase one ore more blocks.
4149 */
4150static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4151{
4152        return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4153}
4154
4155/**
4156 * nand_erase_nand - [INTERN] erase block(s)
4157 * @chip: NAND chip object
4158 * @instr: erase instruction
4159 * @allowbbt: allow erasing the bbt area
4160 *
4161 * Erase one ore more blocks.
4162 */
4163int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4164                    int allowbbt)
4165{
4166        int page, pages_per_block, ret, chipnr;
4167        loff_t len;
4168
4169        pr_debug("%s: start = 0x%012llx, len = %llu\n",
4170                        __func__, (unsigned long long)instr->addr,
4171                        (unsigned long long)instr->len);
4172
4173        if (check_offs_len(chip, instr->addr, instr->len))
4174                return -EINVAL;
4175
4176        /* Grab the lock and see if the device is available */
4177        ret = nand_get_device(chip);
4178        if (ret)
4179                return ret;
4180
4181        /* Shift to get first page */
4182        page = (int)(instr->addr >> chip->page_shift);
4183        chipnr = (int)(instr->addr >> chip->chip_shift);
4184
4185        /* Calculate pages in each block */
4186        pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4187
4188        /* Select the NAND device */
4189        nand_select_target(chip, chipnr);
4190
4191        /* Check, if it is write protected */
4192        if (nand_check_wp(chip)) {
4193                pr_debug("%s: device is write protected!\n",
4194                                __func__);
4195                ret = -EIO;
4196                goto erase_exit;
4197        }
4198
4199        /* Loop through the pages */
4200        len = instr->len;
4201
4202        while (len) {
4203                /* Check if we have a bad block, we do not erase bad blocks! */
4204                if (nand_block_checkbad(chip, ((loff_t) page) <<
4205                                        chip->page_shift, allowbbt)) {
4206                        pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4207                                    __func__, page);
4208                        ret = -EIO;
4209                        goto erase_exit;
4210                }
4211
4212                /*
4213                 * Invalidate the page cache, if we erase the block which
4214                 * contains the current cached page.
4215                 */
4216                if (page <= chip->pagecache.page && chip->pagecache.page <
4217                    (page + pages_per_block))
4218                        chip->pagecache.page = -1;
4219
4220                ret = nand_erase_op(chip, (page & chip->pagemask) >>
4221                                    (chip->phys_erase_shift - chip->page_shift));
4222                if (ret) {
4223                        pr_debug("%s: failed erase, page 0x%08x\n",
4224                                        __func__, page);
4225                        instr->fail_addr =
4226                                ((loff_t)page << chip->page_shift);
4227                        goto erase_exit;
4228                }
4229
4230                /* Increment page address and decrement length */
4231                len -= (1ULL << chip->phys_erase_shift);
4232                page += pages_per_block;
4233
4234                /* Check, if we cross a chip boundary */
4235                if (len && !(page & chip->pagemask)) {
4236                        chipnr++;
4237                        nand_deselect_target(chip);
4238                        nand_select_target(chip, chipnr);
4239                }
4240        }
4241
4242        ret = 0;
4243erase_exit:
4244
4245        /* Deselect and wake up anyone waiting on the device */
4246        nand_deselect_target(chip);
4247        nand_release_device(chip);
4248
4249        /* Return more or less happy */
4250        return ret;
4251}
4252
4253/**
4254 * nand_sync - [MTD Interface] sync
4255 * @mtd: MTD device structure
4256 *
4257 * Sync is actually a wait for chip ready function.
4258 */
4259static void nand_sync(struct mtd_info *mtd)
4260{
4261        struct nand_chip *chip = mtd_to_nand(mtd);
4262
4263        pr_debug("%s: called\n", __func__);
4264
4265        /* Grab the lock and see if the device is available */
4266        WARN_ON(nand_get_device(chip));
4267        /* Release it and go back */
4268        nand_release_device(chip);
4269}
4270
4271/**
4272 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4273 * @mtd: MTD device structure
4274 * @offs: offset relative to mtd start
4275 */
4276static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4277{
4278        struct nand_chip *chip = mtd_to_nand(mtd);
4279        int chipnr = (int)(offs >> chip->chip_shift);
4280        int ret;
4281
4282        /* Select the NAND device */
4283        ret = nand_get_device(chip);
4284        if (ret)
4285                return ret;
4286
4287        nand_select_target(chip, chipnr);
4288
4289        ret = nand_block_checkbad(chip, offs, 0);
4290
4291        nand_deselect_target(chip);
4292        nand_release_device(chip);
4293
4294        return ret;
4295}
4296
4297/**
4298 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4299 * @mtd: MTD device structure
4300 * @ofs: offset relative to mtd start
4301 */
4302static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4303{
4304        int ret;
4305
4306        ret = nand_block_isbad(mtd, ofs);
4307        if (ret) {
4308                /* If it was bad already, return success and do nothing */
4309                if (ret > 0)
4310                        return 0;
4311                return ret;
4312        }
4313
4314        return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4315}
4316
4317/**
4318 * nand_suspend - [MTD Interface] Suspend the NAND flash
4319 * @mtd: MTD device structure
4320 */
4321static int nand_suspend(struct mtd_info *mtd)
4322{
4323        struct nand_chip *chip = mtd_to_nand(mtd);
4324
4325        mutex_lock(&chip->lock);
4326        chip->suspended = 1;
4327        mutex_unlock(&chip->lock);
4328
4329        return 0;
4330}
4331
4332/**
4333 * nand_resume - [MTD Interface] Resume the NAND flash
4334 * @mtd: MTD device structure
4335 */
4336static void nand_resume(struct mtd_info *mtd)
4337{
4338        struct nand_chip *chip = mtd_to_nand(mtd);
4339
4340        mutex_lock(&chip->lock);
4341        if (chip->suspended)
4342                chip->suspended = 0;
4343        else
4344                pr_err("%s called for a chip which is not in suspended state\n",
4345                        __func__);
4346        mutex_unlock(&chip->lock);
4347}
4348
4349/**
4350 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4351 *                 prevent further operations
4352 * @mtd: MTD device structure
4353 */
4354static void nand_shutdown(struct mtd_info *mtd)
4355{
4356        nand_suspend(mtd);
4357}
4358
4359/* Set default functions */
4360static void nand_set_defaults(struct nand_chip *chip)
4361{
4362        /* If no controller is provided, use the dummy, legacy one. */
4363        if (!chip->controller) {
4364                chip->controller = &chip->legacy.dummy_controller;
4365                nand_controller_init(chip->controller);
4366        }
4367
4368        nand_legacy_set_defaults(chip);
4369
4370        if (!chip->buf_align)
4371                chip->buf_align = 1;
4372}
4373
4374/* Sanitize ONFI strings so we can safely print them */
4375void sanitize_string(uint8_t *s, size_t len)
4376{
4377        ssize_t i;
4378
4379        /* Null terminate */
4380        s[len - 1] = 0;
4381
4382        /* Remove non printable chars */
4383        for (i = 0; i < len - 1; i++) {
4384                if (s[i] < ' ' || s[i] > 127)
4385                        s[i] = '?';
4386        }
4387
4388        /* Remove trailing spaces */
4389        strim(s);
4390}
4391
4392/*
4393 * nand_id_has_period - Check if an ID string has a given wraparound period
4394 * @id_data: the ID string
4395 * @arrlen: the length of the @id_data array
4396 * @period: the period of repitition
4397 *
4398 * Check if an ID string is repeated within a given sequence of bytes at
4399 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4400 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4401 * if the repetition has a period of @period; otherwise, returns zero.
4402 */
4403static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4404{
4405        int i, j;
4406        for (i = 0; i < period; i++)
4407                for (j = i + period; j < arrlen; j += period)
4408                        if (id_data[i] != id_data[j])
4409                                return 0;
4410        return 1;
4411}
4412
4413/*
4414 * nand_id_len - Get the length of an ID string returned by CMD_READID
4415 * @id_data: the ID string
4416 * @arrlen: the length of the @id_data array
4417
4418 * Returns the length of the ID string, according to known wraparound/trailing
4419 * zero patterns. If no pattern exists, returns the length of the array.
4420 */
4421static int nand_id_len(u8 *id_data, int arrlen)
4422{
4423        int last_nonzero, period;
4424
4425        /* Find last non-zero byte */
4426        for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4427                if (id_data[last_nonzero])
4428                        break;
4429
4430        /* All zeros */
4431        if (last_nonzero < 0)
4432                return 0;
4433
4434        /* Calculate wraparound period */
4435        for (period = 1; period < arrlen; period++)
4436                if (nand_id_has_period(id_data, arrlen, period))
4437                        break;
4438
4439        /* There's a repeated pattern */
4440        if (period < arrlen)
4441                return period;
4442
4443        /* There are trailing zeros */
4444        if (last_nonzero < arrlen - 1)
4445                return last_nonzero + 1;
4446
4447        /* No pattern detected */
4448        return arrlen;
4449}
4450
4451/* Extract the bits of per cell from the 3rd byte of the extended ID */
4452static int nand_get_bits_per_cell(u8 cellinfo)
4453{
4454        int bits;
4455
4456        bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4457        bits >>= NAND_CI_CELLTYPE_SHIFT;
4458        return bits + 1;
4459}
4460
4461/*
4462 * Many new NAND share similar device ID codes, which represent the size of the
4463 * chip. The rest of the parameters must be decoded according to generic or
4464 * manufacturer-specific "extended ID" decoding patterns.
4465 */
4466void nand_decode_ext_id(struct nand_chip *chip)
4467{
4468        struct nand_memory_organization *memorg;
4469        struct mtd_info *mtd = nand_to_mtd(chip);
4470        int extid;
4471        u8 *id_data = chip->id.data;
4472
4473        memorg = nanddev_get_memorg(&chip->base);
4474
4475        /* The 3rd id byte holds MLC / multichip data */
4476        memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4477        /* The 4th id byte is the important one */
4478        extid = id_data[3];
4479
4480        /* Calc pagesize */
4481        memorg->pagesize = 1024 << (extid & 0x03);
4482        mtd->writesize = memorg->pagesize;
4483        extid >>= 2;
4484        /* Calc oobsize */
4485        memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4486        mtd->oobsize = memorg->oobsize;
4487        extid >>= 2;
4488        /* Calc blocksize. Blocksize is multiples of 64KiB */
4489        memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4490                                       memorg->pagesize;
4491        mtd->erasesize = (64 * 1024) << (extid & 0x03);
4492        extid >>= 2;
4493        /* Get buswidth information */
4494        if (extid & 0x1)
4495                chip->options |= NAND_BUSWIDTH_16;
4496}
4497EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4498
4499/*
4500 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4501 * decodes a matching ID table entry and assigns the MTD size parameters for
4502 * the chip.
4503 */
4504static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4505{
4506        struct mtd_info *mtd = nand_to_mtd(chip);
4507        struct nand_memory_organization *memorg;
4508
4509        memorg = nanddev_get_memorg(&chip->base);
4510
4511        memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4512        mtd->erasesize = type->erasesize;
4513        memorg->pagesize = type->pagesize;
4514        mtd->writesize = memorg->pagesize;
4515        memorg->oobsize = memorg->pagesize / 32;
4516        mtd->oobsize = memorg->oobsize;
4517
4518        /* All legacy ID NAND are small-page, SLC */
4519        memorg->bits_per_cell = 1;
4520}
4521
4522/*
4523 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4524 * heuristic patterns using various detected parameters (e.g., manufacturer,
4525 * page size, cell-type information).
4526 */
4527static void nand_decode_bbm_options(struct nand_chip *chip)
4528{
4529        struct mtd_info *mtd = nand_to_mtd(chip);
4530
4531        /* Set the bad block position */
4532        if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4533                chip->badblockpos = NAND_BBM_POS_LARGE;
4534        else
4535                chip->badblockpos = NAND_BBM_POS_SMALL;
4536}
4537
4538static inline bool is_full_id_nand(struct nand_flash_dev *type)
4539{
4540        return type->id_len;
4541}
4542
4543static bool find_full_id_nand(struct nand_chip *chip,
4544                              struct nand_flash_dev *type)
4545{
4546        struct mtd_info *mtd = nand_to_mtd(chip);
4547        struct nand_memory_organization *memorg;
4548        u8 *id_data = chip->id.data;
4549
4550        memorg = nanddev_get_memorg(&chip->base);
4551
4552        if (!strncmp(type->id, id_data, type->id_len)) {
4553                memorg->pagesize = type->pagesize;
4554                mtd->writesize = memorg->pagesize;
4555                memorg->pages_per_eraseblock = type->erasesize /
4556                                               type->pagesize;
4557                mtd->erasesize = type->erasesize;
4558                memorg->oobsize = type->oobsize;
4559                mtd->oobsize = memorg->oobsize;
4560
4561                memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4562                memorg->eraseblocks_per_lun =
4563                        DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4564                                           memorg->pagesize *
4565                                           memorg->pages_per_eraseblock);
4566                chip->options |= type->options;
4567                chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
4568                chip->base.eccreq.step_size = NAND_ECC_STEP(type);
4569                chip->onfi_timing_mode_default =
4570                                        type->onfi_timing_mode_default;
4571
4572                chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4573                if (!chip->parameters.model)
4574                        return false;
4575
4576                return true;
4577        }
4578        return false;
4579}
4580
4581/*
4582 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4583 * compliant and does not have a full-id or legacy-id entry in the nand_ids
4584 * table.
4585 */
4586static void nand_manufacturer_detect(struct nand_chip *chip)
4587{
4588        /*
4589         * Try manufacturer detection if available and use
4590         * nand_decode_ext_id() otherwise.
4591         */
4592        if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4593            chip->manufacturer.desc->ops->detect) {
4594                struct nand_memory_organization *memorg;
4595
4596                memorg = nanddev_get_memorg(&chip->base);
4597
4598                /* The 3rd id byte holds MLC / multichip data */
4599                memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4600                chip->manufacturer.desc->ops->detect(chip);
4601        } else {
4602                nand_decode_ext_id(chip);
4603        }
4604}
4605
4606/*
4607 * Manufacturer initialization. This function is called for all NANDs including
4608 * ONFI and JEDEC compliant ones.
4609 * Manufacturer drivers should put all their specific initialization code in
4610 * their ->init() hook.
4611 */
4612static int nand_manufacturer_init(struct nand_chip *chip)
4613{
4614        if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4615            !chip->manufacturer.desc->ops->init)
4616                return 0;
4617
4618        return chip->manufacturer.desc->ops->init(chip);
4619}
4620
4621/*
4622 * Manufacturer cleanup. This function is called for all NANDs including
4623 * ONFI and JEDEC compliant ones.
4624 * Manufacturer drivers should put all their specific cleanup code in their
4625 * ->cleanup() hook.
4626 */
4627static void nand_manufacturer_cleanup(struct nand_chip *chip)
4628{
4629        /* Release manufacturer private data */
4630        if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4631            chip->manufacturer.desc->ops->cleanup)
4632                chip->manufacturer.desc->ops->cleanup(chip);
4633}
4634
4635static const char *
4636nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4637{
4638        return manufacturer ? manufacturer->name : "Unknown";
4639}
4640
4641/*
4642 * Get the flash and manufacturer id and lookup if the type is supported.
4643 */
4644static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4645{
4646        const struct nand_manufacturer *manufacturer;
4647        struct mtd_info *mtd = nand_to_mtd(chip);
4648        struct nand_memory_organization *memorg;
4649        int busw, ret;
4650        u8 *id_data = chip->id.data;
4651        u8 maf_id, dev_id;
4652        u64 targetsize;
4653
4654        /*
4655         * Let's start by initializing memorg fields that might be left
4656         * unassigned by the ID-based detection logic.
4657         */
4658        memorg = nanddev_get_memorg(&chip->base);
4659        memorg->planes_per_lun = 1;
4660        memorg->luns_per_target = 1;
4661
4662        /*
4663         * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4664         * after power-up.
4665         */
4666        ret = nand_reset(chip, 0);
4667        if (ret)
4668                return ret;
4669
4670        /* Select the device */
4671        nand_select_target(chip, 0);
4672
4673        /* Send the command for reading device ID */
4674        ret = nand_readid_op(chip, 0, id_data, 2);
4675        if (ret)
4676                return ret;
4677
4678        /* Read manufacturer and device IDs */
4679        maf_id = id_data[0];
4680        dev_id = id_data[1];
4681
4682        /*
4683         * Try again to make sure, as some systems the bus-hold or other
4684         * interface concerns can cause random data which looks like a
4685         * possibly credible NAND flash to appear. If the two results do
4686         * not match, ignore the device completely.
4687         */
4688
4689        /* Read entire ID string */
4690        ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4691        if (ret)
4692                return ret;
4693
4694        if (id_data[0] != maf_id || id_data[1] != dev_id) {
4695                pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4696                        maf_id, dev_id, id_data[0], id_data[1]);
4697                return -ENODEV;
4698        }
4699
4700        chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4701
4702        /* Try to identify manufacturer */
4703        manufacturer = nand_get_manufacturer(maf_id);
4704        chip->manufacturer.desc = manufacturer;
4705
4706        if (!type)
4707                type = nand_flash_ids;
4708
4709        /*
4710         * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4711         * override it.
4712         * This is required to make sure initial NAND bus width set by the
4713         * NAND controller driver is coherent with the real NAND bus width
4714         * (extracted by auto-detection code).
4715         */
4716        busw = chip->options & NAND_BUSWIDTH_16;
4717
4718        /*
4719         * The flag is only set (never cleared), reset it to its default value
4720         * before starting auto-detection.
4721         */
4722        chip->options &= ~NAND_BUSWIDTH_16;
4723
4724        for (; type->name != NULL; type++) {
4725                if (is_full_id_nand(type)) {
4726                        if (find_full_id_nand(chip, type))
4727                                goto ident_done;
4728                } else if (dev_id == type->dev_id) {
4729                        break;
4730                }
4731        }
4732
4733        if (!type->name || !type->pagesize) {
4734                /* Check if the chip is ONFI compliant */
4735                ret = nand_onfi_detect(chip);
4736                if (ret < 0)
4737                        return ret;
4738                else if (ret)
4739                        goto ident_done;
4740
4741                /* Check if the chip is JEDEC compliant */
4742                ret = nand_jedec_detect(chip);
4743                if (ret < 0)
4744                        return ret;
4745                else if (ret)
4746                        goto ident_done;
4747        }
4748
4749        if (!type->name)
4750                return -ENODEV;
4751
4752        chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4753        if (!chip->parameters.model)
4754                return -ENOMEM;
4755
4756        if (!type->pagesize)
4757                nand_manufacturer_detect(chip);
4758        else
4759                nand_decode_id(chip, type);
4760
4761        /* Get chip options */
4762        chip->options |= type->options;
4763
4764        memorg->eraseblocks_per_lun =
4765                        DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4766                                           memorg->pagesize *
4767                                           memorg->pages_per_eraseblock);
4768
4769ident_done:
4770        if (!mtd->name)
4771                mtd->name = chip->parameters.model;
4772
4773        if (chip->options & NAND_BUSWIDTH_AUTO) {
4774                WARN_ON(busw & NAND_BUSWIDTH_16);
4775                nand_set_defaults(chip);
4776        } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4777                /*
4778                 * Check, if buswidth is correct. Hardware drivers should set
4779                 * chip correct!
4780                 */
4781                pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4782                        maf_id, dev_id);
4783                pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4784                        mtd->name);
4785                pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4786                        (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4787                ret = -EINVAL;
4788
4789                goto free_detect_allocation;
4790        }
4791
4792        nand_decode_bbm_options(chip);
4793
4794        /* Calculate the address shift from the page size */
4795        chip->page_shift = ffs(mtd->writesize) - 1;
4796        /* Convert chipsize to number of pages per chip -1 */
4797        targetsize = nanddev_target_size(&chip->base);
4798        chip->pagemask = (targetsize >> chip->page_shift) - 1;
4799
4800        chip->bbt_erase_shift = chip->phys_erase_shift =
4801                ffs(mtd->erasesize) - 1;
4802        if (targetsize & 0xffffffff)
4803                chip->chip_shift = ffs((unsigned)targetsize) - 1;
4804        else {
4805                chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4806                chip->chip_shift += 32 - 1;
4807        }
4808
4809        if (chip->chip_shift - chip->page_shift > 16)
4810                chip->options |= NAND_ROW_ADDR_3;
4811
4812        chip->badblockbits = 8;
4813
4814        nand_legacy_adjust_cmdfunc(chip);
4815
4816        pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4817                maf_id, dev_id);
4818        pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4819                chip->parameters.model);
4820        pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4821                (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4822                mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4823        return 0;
4824
4825free_detect_allocation:
4826        kfree(chip->parameters.model);
4827
4828        return ret;
4829}
4830
4831static const char * const nand_ecc_modes[] = {
4832        [NAND_ECC_NONE]         = "none",
4833        [NAND_ECC_SOFT]         = "soft",
4834        [NAND_ECC_HW]           = "hw",
4835        [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
4836        [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4837        [NAND_ECC_ON_DIE]       = "on-die",
4838};
4839
4840static int of_get_nand_ecc_mode(struct device_node *np)
4841{
4842        const char *pm;
4843        int err, i;
4844
4845        err = of_property_read_string(np, "nand-ecc-mode", &pm);
4846        if (err < 0)
4847                return err;
4848
4849        for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4850                if (!strcasecmp(pm, nand_ecc_modes[i]))
4851                        return i;
4852
4853        /*
4854         * For backward compatibility we support few obsoleted values that don't
4855         * have their mappings into nand_ecc_modes_t anymore (they were merged
4856         * with other enums).
4857         */
4858        if (!strcasecmp(pm, "soft_bch"))
4859                return NAND_ECC_SOFT;
4860
4861        return -ENODEV;
4862}
4863
4864static const char * const nand_ecc_algos[] = {
4865        [NAND_ECC_HAMMING]      = "hamming",
4866        [NAND_ECC_BCH]          = "bch",
4867        [NAND_ECC_RS]           = "rs",
4868};
4869
4870static int of_get_nand_ecc_algo(struct device_node *np)
4871{
4872        const char *pm;
4873        int err, i;
4874
4875        err = of_property_read_string(np, "nand-ecc-algo", &pm);
4876        if (!err) {
4877                for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4878                        if (!strcasecmp(pm, nand_ecc_algos[i]))
4879                                return i;
4880                return -ENODEV;
4881        }
4882
4883        /*
4884         * For backward compatibility we also read "nand-ecc-mode" checking
4885         * for some obsoleted values that were specifying ECC algorithm.
4886         */
4887        err = of_property_read_string(np, "nand-ecc-mode", &pm);
4888        if (err < 0)
4889                return err;
4890
4891        if (!strcasecmp(pm, "soft"))
4892                return NAND_ECC_HAMMING;
4893        else if (!strcasecmp(pm, "soft_bch"))
4894                return NAND_ECC_BCH;
4895
4896        return -ENODEV;
4897}
4898
4899static int of_get_nand_ecc_step_size(struct device_node *np)
4900{
4901        int ret;
4902        u32 val;
4903
4904        ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4905        return ret ? ret : val;
4906}
4907
4908static int of_get_nand_ecc_strength(struct device_node *np)
4909{
4910        int ret;
4911        u32 val;
4912
4913        ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4914        return ret ? ret : val;
4915}
4916
4917static int of_get_nand_bus_width(struct device_node *np)
4918{
4919        u32 val;
4920
4921        if (of_property_read_u32(np, "nand-bus-width", &val))
4922                return 8;
4923
4924        switch (val) {
4925        case 8:
4926        case 16:
4927                return val;
4928        default:
4929                return -EIO;
4930        }
4931}
4932
4933static bool of_get_nand_on_flash_bbt(struct device_node *np)
4934{
4935        return of_property_read_bool(np, "nand-on-flash-bbt");
4936}
4937
4938static int nand_dt_init(struct nand_chip *chip)
4939{
4940        struct device_node *dn = nand_get_flash_node(chip);
4941        int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4942
4943        if (!dn)
4944                return 0;
4945
4946        if (of_get_nand_bus_width(dn) == 16)
4947                chip->options |= NAND_BUSWIDTH_16;
4948
4949        if (of_property_read_bool(dn, "nand-is-boot-medium"))
4950                chip->options |= NAND_IS_BOOT_MEDIUM;
4951
4952        if (of_get_nand_on_flash_bbt(dn))
4953                chip->bbt_options |= NAND_BBT_USE_FLASH;
4954
4955        ecc_mode = of_get_nand_ecc_mode(dn);
4956        ecc_algo = of_get_nand_ecc_algo(dn);
4957        ecc_strength = of_get_nand_ecc_strength(dn);
4958        ecc_step = of_get_nand_ecc_step_size(dn);
4959
4960        if (ecc_mode >= 0)
4961                chip->ecc.mode = ecc_mode;
4962
4963        if (ecc_algo >= 0)
4964                chip->ecc.algo = ecc_algo;
4965
4966        if (ecc_strength >= 0)
4967                chip->ecc.strength = ecc_strength;
4968
4969        if (ecc_step > 0)
4970                chip->ecc.size = ecc_step;
4971
4972        if (of_property_read_bool(dn, "nand-ecc-maximize"))
4973                chip->ecc.options |= NAND_ECC_MAXIMIZE;
4974
4975        return 0;
4976}
4977
4978/**
4979 * nand_scan_ident - Scan for the NAND device
4980 * @chip: NAND chip object
4981 * @maxchips: number of chips to scan for
4982 * @table: alternative NAND ID table
4983 *
4984 * This is the first phase of the normal nand_scan() function. It reads the
4985 * flash ID and sets up MTD fields accordingly.
4986 *
4987 * This helper used to be called directly from controller drivers that needed
4988 * to tweak some ECC-related parameters before nand_scan_tail(). This separation
4989 * prevented dynamic allocations during this phase which was unconvenient and
4990 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
4991 */
4992static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
4993                           struct nand_flash_dev *table)
4994{
4995        struct mtd_info *mtd = nand_to_mtd(chip);
4996        struct nand_memory_organization *memorg;
4997        int nand_maf_id, nand_dev_id;
4998        unsigned int i;
4999        int ret;
5000
5001        memorg = nanddev_get_memorg(&chip->base);
5002
5003        /* Assume all dies are deselected when we enter nand_scan_ident(). */
5004        chip->cur_cs = -1;
5005
5006        mutex_init(&chip->lock);
5007
5008        /* Enforce the right timings for reset/detection */
5009        onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5010
5011        ret = nand_dt_init(chip);
5012        if (ret)
5013                return ret;
5014
5015        if (!mtd->name && mtd->dev.parent)
5016                mtd->name = dev_name(mtd->dev.parent);
5017
5018        /* Set the default functions */
5019        nand_set_defaults(chip);
5020
5021        ret = nand_legacy_check_hooks(chip);
5022        if (ret)
5023                return ret;
5024
5025        memorg->ntargets = maxchips;
5026
5027        /* Read the flash type */
5028        ret = nand_detect(chip, table);
5029        if (ret) {
5030                if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5031                        pr_warn("No NAND device found\n");
5032                nand_deselect_target(chip);
5033                return ret;
5034        }
5035
5036        nand_maf_id = chip->id.data[0];
5037        nand_dev_id = chip->id.data[1];
5038
5039        nand_deselect_target(chip);
5040
5041        /* Check for a chip array */
5042        for (i = 1; i < maxchips; i++) {
5043                u8 id[2];
5044
5045                /* See comment in nand_get_flash_type for reset */
5046                ret = nand_reset(chip, i);
5047                if (ret)
5048                        break;
5049
5050                nand_select_target(chip, i);
5051                /* Send the command for reading device ID */
5052                ret = nand_readid_op(chip, 0, id, sizeof(id));
5053                if (ret)
5054                        break;
5055                /* Read manufacturer and device IDs */
5056                if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5057                        nand_deselect_target(chip);
5058                        break;
5059                }
5060                nand_deselect_target(chip);
5061        }
5062        if (i > 1)
5063                pr_info("%d chips detected\n", i);
5064
5065        /* Store the number of chips and calc total size for mtd */
5066        memorg->ntargets = i;
5067        mtd->size = i * nanddev_target_size(&chip->base);
5068
5069        return 0;
5070}
5071
5072static void nand_scan_ident_cleanup(struct nand_chip *chip)
5073{
5074        kfree(chip->parameters.model);
5075        kfree(chip->parameters.onfi);
5076}
5077
5078static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5079{
5080        struct mtd_info *mtd = nand_to_mtd(chip);
5081        struct nand_ecc_ctrl *ecc = &chip->ecc;
5082
5083        if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5084                return -EINVAL;
5085
5086        switch (ecc->algo) {
5087        case NAND_ECC_HAMMING:
5088                ecc->calculate = nand_calculate_ecc;
5089                ecc->correct = nand_correct_data;
5090                ecc->read_page = nand_read_page_swecc;
5091                ecc->read_subpage = nand_read_subpage;
5092                ecc->write_page = nand_write_page_swecc;
5093                ecc->read_page_raw = nand_read_page_raw;
5094                ecc->write_page_raw = nand_write_page_raw;
5095                ecc->read_oob = nand_read_oob_std;
5096                ecc->write_oob = nand_write_oob_std;
5097                if (!ecc->size)
5098                        ecc->size = 256;
5099                ecc->bytes = 3;
5100                ecc->strength = 1;
5101
5102                if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5103                        ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5104
5105                return 0;
5106        case NAND_ECC_BCH:
5107                if (!mtd_nand_has_bch()) {
5108                        WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5109                        return -EINVAL;
5110                }
5111                ecc->calculate = nand_bch_calculate_ecc;
5112                ecc->correct = nand_bch_correct_data;
5113                ecc->read_page = nand_read_page_swecc;
5114                ecc->read_subpage = nand_read_subpage;
5115                ecc->write_page = nand_write_page_swecc;
5116                ecc->read_page_raw = nand_read_page_raw;
5117                ecc->write_page_raw = nand_write_page_raw;
5118                ecc->read_oob = nand_read_oob_std;
5119                ecc->write_oob = nand_write_oob_std;
5120
5121                /*
5122                * Board driver should supply ecc.size and ecc.strength
5123                * values to select how many bits are correctable.
5124                * Otherwise, default to 4 bits for large page devices.
5125                */
5126                if (!ecc->size && (mtd->oobsize >= 64)) {
5127                        ecc->size = 512;
5128                        ecc->strength = 4;
5129                }
5130
5131                /*
5132                 * if no ecc placement scheme was provided pickup the default
5133                 * large page one.
5134                 */
5135                if (!mtd->ooblayout) {
5136                        /* handle large page devices only */
5137                        if (mtd->oobsize < 64) {
5138                                WARN(1, "OOB layout is required when using software BCH on small pages\n");
5139                                return -EINVAL;
5140                        }
5141
5142                        mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5143
5144                }
5145
5146                /*
5147                 * We can only maximize ECC config when the default layout is
5148                 * used, otherwise we don't know how many bytes can really be
5149                 * used.
5150                 */
5151                if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5152                    ecc->options & NAND_ECC_MAXIMIZE) {
5153                        int steps, bytes;
5154
5155                        /* Always prefer 1k blocks over 512bytes ones */
5156                        ecc->size = 1024;
5157                        steps = mtd->writesize / ecc->size;
5158
5159                        /* Reserve 2 bytes for the BBM */
5160                        bytes = (mtd->oobsize - 2) / steps;
5161                        ecc->strength = bytes * 8 / fls(8 * ecc->size);
5162                }
5163
5164                /* See nand_bch_init() for details. */
5165                ecc->bytes = 0;
5166                ecc->priv = nand_bch_init(mtd);
5167                if (!ecc->priv) {
5168                        WARN(1, "BCH ECC initialization failed!\n");
5169                        return -EINVAL;
5170                }
5171                return 0;
5172        default:
5173                WARN(1, "Unsupported ECC algorithm!\n");
5174                return -EINVAL;
5175        }
5176}
5177
5178/**
5179 * nand_check_ecc_caps - check the sanity of preset ECC settings
5180 * @chip: nand chip info structure
5181 * @caps: ECC caps info structure
5182 * @oobavail: OOB size that the ECC engine can use
5183 *
5184 * When ECC step size and strength are already set, check if they are supported
5185 * by the controller and the calculated ECC bytes fit within the chip's OOB.
5186 * On success, the calculated ECC bytes is set.
5187 */
5188static int
5189nand_check_ecc_caps(struct nand_chip *chip,
5190                    const struct nand_ecc_caps *caps, int oobavail)
5191{
5192        struct mtd_info *mtd = nand_to_mtd(chip);
5193        const struct nand_ecc_step_info *stepinfo;
5194        int preset_step = chip->ecc.size;
5195        int preset_strength = chip->ecc.strength;
5196        int ecc_bytes, nsteps = mtd->writesize / preset_step;
5197        int i, j;
5198
5199        for (i = 0; i < caps->nstepinfos; i++) {
5200                stepinfo = &caps->stepinfos[i];
5201
5202                if (stepinfo->stepsize != preset_step)
5203                        continue;
5204
5205                for (j = 0; j < stepinfo->nstrengths; j++) {
5206                        if (stepinfo->strengths[j] != preset_strength)
5207                                continue;
5208
5209                        ecc_bytes = caps->calc_ecc_bytes(preset_step,
5210                                                         preset_strength);
5211                        if (WARN_ON_ONCE(ecc_bytes < 0))
5212                                return ecc_bytes;
5213
5214                        if (ecc_bytes * nsteps > oobavail) {
5215                                pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5216                                       preset_step, preset_strength);
5217                                return -ENOSPC;
5218                        }
5219
5220                        chip->ecc.bytes = ecc_bytes;
5221
5222                        return 0;
5223                }
5224        }
5225
5226        pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5227               preset_step, preset_strength);
5228
5229        return -ENOTSUPP;
5230}
5231
5232/**
5233 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5234 * @chip: nand chip info structure
5235 * @caps: ECC engine caps info structure
5236 * @oobavail: OOB size that the ECC engine can use
5237 *
5238 * If a chip's ECC requirement is provided, try to meet it with the least
5239 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5240 * On success, the chosen ECC settings are set.
5241 */
5242static int
5243nand_match_ecc_req(struct nand_chip *chip,
5244                   const struct nand_ecc_caps *caps, int oobavail)
5245{
5246        struct mtd_info *mtd = nand_to_mtd(chip);
5247        const struct nand_ecc_step_info *stepinfo;
5248        int req_step = chip->base.eccreq.step_size;
5249        int req_strength = chip->base.eccreq.strength;
5250        int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5251        int best_step, best_strength, best_ecc_bytes;
5252        int best_ecc_bytes_total = INT_MAX;
5253        int i, j;
5254
5255        /* No information provided by the NAND chip */
5256        if (!req_step || !req_strength)
5257                return -ENOTSUPP;
5258
5259        /* number of correctable bits the chip requires in a page */
5260        req_corr = mtd->writesize / req_step * req_strength;
5261
5262        for (i = 0; i < caps->nstepinfos; i++) {
5263                stepinfo = &caps->stepinfos[i];
5264                step_size = stepinfo->stepsize;
5265
5266                for (j = 0; j < stepinfo->nstrengths; j++) {
5267                        strength = stepinfo->strengths[j];
5268
5269                        /*
5270                         * If both step size and strength are smaller than the
5271                         * chip's requirement, it is not easy to compare the
5272                         * resulted reliability.
5273                         */
5274                        if (step_size < req_step && strength < req_strength)
5275                                continue;
5276
5277                        if (mtd->writesize % step_size)
5278                                continue;
5279
5280                        nsteps = mtd->writesize / step_size;
5281
5282                        ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5283                        if (WARN_ON_ONCE(ecc_bytes < 0))
5284                                continue;
5285                        ecc_bytes_total = ecc_bytes * nsteps;
5286
5287                        if (ecc_bytes_total > oobavail ||
5288                            strength * nsteps < req_corr)
5289                                continue;
5290
5291                        /*
5292                         * We assume the best is to meet the chip's requrement
5293                         * with the least number of ECC bytes.
5294                         */
5295                        if (ecc_bytes_total < best_ecc_bytes_total) {
5296                                best_ecc_bytes_total = ecc_bytes_total;
5297                                best_step = step_size;
5298                                best_strength = strength;
5299                                best_ecc_bytes = ecc_bytes;
5300                        }
5301                }
5302        }
5303
5304        if (best_ecc_bytes_total == INT_MAX)
5305                return -ENOTSUPP;
5306
5307        chip->ecc.size = best_step;
5308        chip->ecc.strength = best_strength;
5309        chip->ecc.bytes = best_ecc_bytes;
5310
5311        return 0;
5312}
5313
5314/**
5315 * nand_maximize_ecc - choose the max ECC strength available
5316 * @chip: nand chip info structure
5317 * @caps: ECC engine caps info structure
5318 * @oobavail: OOB size that the ECC engine can use
5319 *
5320 * Choose the max ECC strength that is supported on the controller, and can fit
5321 * within the chip's OOB.  On success, the chosen ECC settings are set.
5322 */
5323static int
5324nand_maximize_ecc(struct nand_chip *chip,
5325                  const struct nand_ecc_caps *caps, int oobavail)
5326{
5327        struct mtd_info *mtd = nand_to_mtd(chip);
5328        const struct nand_ecc_step_info *stepinfo;
5329        int step_size, strength, nsteps, ecc_bytes, corr;
5330        int best_corr = 0;
5331        int best_step = 0;
5332        int best_strength, best_ecc_bytes;
5333        int i, j;
5334
5335        for (i = 0; i < caps->nstepinfos; i++) {
5336                stepinfo = &caps->stepinfos[i];
5337                step_size = stepinfo->stepsize;
5338
5339                /* If chip->ecc.size is already set, respect it */
5340                if (chip->ecc.size && step_size != chip->ecc.size)
5341                        continue;
5342
5343                for (j = 0; j < stepinfo->nstrengths; j++) {
5344                        strength = stepinfo->strengths[j];
5345
5346                        if (mtd->writesize % step_size)
5347                                continue;
5348
5349                        nsteps = mtd->writesize / step_size;
5350
5351                        ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5352                        if (WARN_ON_ONCE(ecc_bytes < 0))
5353                                continue;
5354
5355                        if (ecc_bytes * nsteps > oobavail)
5356                                continue;
5357
5358                        corr = strength * nsteps;
5359
5360                        /*
5361                         * If the number of correctable bits is the same,
5362                         * bigger step_size has more reliability.
5363                         */
5364                        if (corr > best_corr ||
5365                            (corr == best_corr && step_size > best_step)) {
5366                                best_corr = corr;
5367                                best_step = step_size;
5368                                best_strength = strength;
5369                                best_ecc_bytes = ecc_bytes;
5370                        }
5371                }
5372        }
5373
5374        if (!best_corr)
5375                return -ENOTSUPP;
5376
5377        chip->ecc.size = best_step;
5378        chip->ecc.strength = best_strength;
5379        chip->ecc.bytes = best_ecc_bytes;
5380
5381        return 0;
5382}
5383
5384/**
5385 * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5386 * @chip: nand chip info structure
5387 * @caps: ECC engine caps info structure
5388 * @oobavail: OOB size that the ECC engine can use
5389 *
5390 * Choose the ECC configuration according to following logic
5391 *
5392 * 1. If both ECC step size and ECC strength are already set (usually by DT)
5393 *    then check if it is supported by this controller.
5394 * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
5395 * 3. Otherwise, try to match the ECC step size and ECC strength closest
5396 *    to the chip's requirement. If available OOB size can't fit the chip
5397 *    requirement then fallback to the maximum ECC step size and ECC strength.
5398 *
5399 * On success, the chosen ECC settings are set.
5400 */
5401int nand_ecc_choose_conf(struct nand_chip *chip,
5402                         const struct nand_ecc_caps *caps, int oobavail)
5403{
5404        struct mtd_info *mtd = nand_to_mtd(chip);
5405
5406        if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5407                return -EINVAL;
5408
5409        if (chip->ecc.size && chip->ecc.strength)
5410                return nand_check_ecc_caps(chip, caps, oobavail);
5411
5412        if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5413                return nand_maximize_ecc(chip, caps, oobavail);
5414
5415        if (!nand_match_ecc_req(chip, caps, oobavail))
5416                return 0;
5417
5418        return nand_maximize_ecc(chip, caps, oobavail);
5419}
5420EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5421
5422/*
5423 * Check if the chip configuration meet the datasheet requirements.
5424
5425 * If our configuration corrects A bits per B bytes and the minimum
5426 * required correction level is X bits per Y bytes, then we must ensure
5427 * both of the following are true:
5428 *
5429 * (1) A / B >= X / Y
5430 * (2) A >= X
5431 *
5432 * Requirement (1) ensures we can correct for the required bitflip density.
5433 * Requirement (2) ensures we can correct even when all bitflips are clumped
5434 * in the same sector.
5435 */
5436static bool nand_ecc_strength_good(struct nand_chip *chip)
5437{
5438        struct mtd_info *mtd = nand_to_mtd(chip);
5439        struct nand_ecc_ctrl *ecc = &chip->ecc;
5440        int corr, ds_corr;
5441
5442        if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
5443                /* Not enough information */
5444                return true;
5445
5446        /*
5447         * We get the number of corrected bits per page to compare
5448         * the correction density.
5449         */
5450        corr = (mtd->writesize * ecc->strength) / ecc->size;
5451        ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
5452                  chip->base.eccreq.step_size;
5453
5454        return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
5455}
5456
5457static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5458{
5459        struct nand_chip *chip = container_of(nand, struct nand_chip,
5460                                              base);
5461        unsigned int eb = nanddev_pos_to_row(nand, pos);
5462        int ret;
5463
5464        eb >>= nand->rowconv.eraseblock_addr_shift;
5465
5466        nand_select_target(chip, pos->target);
5467        ret = nand_erase_op(chip, eb);
5468        nand_deselect_target(chip);
5469
5470        return ret;
5471}
5472
5473static int rawnand_markbad(struct nand_device *nand,
5474                           const struct nand_pos *pos)
5475{
5476        struct nand_chip *chip = container_of(nand, struct nand_chip,
5477                                              base);
5478
5479        return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5480}
5481
5482static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5483{
5484        struct nand_chip *chip = container_of(nand, struct nand_chip,
5485                                              base);
5486        int ret;
5487
5488        nand_select_target(chip, pos->target);
5489        ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5490        nand_deselect_target(chip);
5491
5492        return ret;
5493}
5494
5495static const struct nand_ops rawnand_ops = {
5496        .erase = rawnand_erase,
5497        .markbad = rawnand_markbad,
5498        .isbad = rawnand_isbad,
5499};
5500
5501/**
5502 * nand_scan_tail - Scan for the NAND device
5503 * @chip: NAND chip object
5504 *
5505 * This is the second phase of the normal nand_scan() function. It fills out
5506 * all the uninitialized function pointers with the defaults and scans for a
5507 * bad block table if appropriate.
5508 */
5509static int nand_scan_tail(struct nand_chip *chip)
5510{
5511        struct mtd_info *mtd = nand_to_mtd(chip);
5512        struct nand_ecc_ctrl *ecc = &chip->ecc;
5513        int ret, i;
5514
5515        /* New bad blocks should be marked in OOB, flash-based BBT, or both */
5516        if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5517                   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5518                return -EINVAL;
5519        }
5520
5521        chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5522        if (!chip->data_buf)
5523                return -ENOMEM;
5524
5525        /*
5526         * FIXME: some NAND manufacturer drivers expect the first die to be
5527         * selected when manufacturer->init() is called. They should be fixed
5528         * to explictly select the relevant die when interacting with the NAND
5529         * chip.
5530         */
5531        nand_select_target(chip, 0);
5532        ret = nand_manufacturer_init(chip);
5533        nand_deselect_target(chip);
5534        if (ret)
5535                goto err_free_buf;
5536
5537        /* Set the internal oob buffer location, just after the page data */
5538        chip->oob_poi = chip->data_buf + mtd->writesize;
5539
5540        /*
5541         * If no default placement scheme is given, select an appropriate one.
5542         */
5543        if (!mtd->ooblayout &&
5544            !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5545                switch (mtd->oobsize) {
5546                case 8:
5547                case 16:
5548                        mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5549                        break;
5550                case 64:
5551                case 128:
5552                        mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5553                        break;
5554                default:
5555                        /*
5556                         * Expose the whole OOB area to users if ECC_NONE
5557                         * is passed. We could do that for all kind of
5558                         * ->oobsize, but we must keep the old large/small
5559                         * page with ECC layout when ->oobsize <= 128 for
5560                         * compatibility reasons.
5561                         */
5562                        if (ecc->mode == NAND_ECC_NONE) {
5563                                mtd_set_ooblayout(mtd,
5564                                                &nand_ooblayout_lp_ops);
5565                                break;
5566                        }
5567
5568                        WARN(1, "No oob scheme defined for oobsize %d\n",
5569                                mtd->oobsize);
5570                        ret = -EINVAL;
5571                        goto err_nand_manuf_cleanup;
5572                }
5573        }
5574
5575        /*
5576         * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5577         * selected and we have 256 byte pagesize fallback to software ECC
5578         */
5579
5580        switch (ecc->mode) {
5581        case NAND_ECC_HW_OOB_FIRST:
5582                /* Similar to NAND_ECC_HW, but a separate read_page handle */
5583                if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5584                        WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5585                        ret = -EINVAL;
5586                        goto err_nand_manuf_cleanup;
5587                }
5588                if (!ecc->read_page)
5589                        ecc->read_page = nand_read_page_hwecc_oob_first;
5590                /* fall through */
5591
5592        case NAND_ECC_HW:
5593                /* Use standard hwecc read page function? */
5594                if (!ecc->read_page)
5595                        ecc->read_page = nand_read_page_hwecc;
5596                if (!ecc->write_page)
5597                        ecc->write_page = nand_write_page_hwecc;
5598                if (!ecc->read_page_raw)
5599                        ecc->read_page_raw = nand_read_page_raw;
5600                if (!ecc->write_page_raw)
5601                        ecc->write_page_raw = nand_write_page_raw;
5602                if (!ecc->read_oob)
5603                        ecc->read_oob = nand_read_oob_std;
5604                if (!ecc->write_oob)
5605                        ecc->write_oob = nand_write_oob_std;
5606                if (!ecc->read_subpage)
5607                        ecc->read_subpage = nand_read_subpage;
5608                if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5609                        ecc->write_subpage = nand_write_subpage_hwecc;
5610                /* fall through */
5611
5612        case NAND_ECC_HW_SYNDROME:
5613                if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5614                    (!ecc->read_page ||
5615                     ecc->read_page == nand_read_page_hwecc ||
5616                     !ecc->write_page ||
5617                     ecc->write_page == nand_write_page_hwecc)) {
5618                        WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5619                        ret = -EINVAL;
5620                        goto err_nand_manuf_cleanup;
5621                }
5622                /* Use standard syndrome read/write page function? */
5623                if (!ecc->read_page)
5624                        ecc->read_page = nand_read_page_syndrome;
5625                if (!ecc->write_page)
5626                        ecc->write_page = nand_write_page_syndrome;
5627                if (!ecc->read_page_raw)
5628                        ecc->read_page_raw = nand_read_page_raw_syndrome;
5629                if (!ecc->write_page_raw)
5630                        ecc->write_page_raw = nand_write_page_raw_syndrome;
5631                if (!ecc->read_oob)
5632                        ecc->read_oob = nand_read_oob_syndrome;
5633                if (!ecc->write_oob)
5634                        ecc->write_oob = nand_write_oob_syndrome;
5635
5636                if (mtd->writesize >= ecc->size) {
5637                        if (!ecc->strength) {
5638                                WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5639                                ret = -EINVAL;
5640                                goto err_nand_manuf_cleanup;
5641                        }
5642                        break;
5643                }
5644                pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5645                        ecc->size, mtd->writesize);
5646                ecc->mode = NAND_ECC_SOFT;
5647                ecc->algo = NAND_ECC_HAMMING;
5648                /* fall through */
5649
5650        case NAND_ECC_SOFT:
5651                ret = nand_set_ecc_soft_ops(chip);
5652                if (ret) {
5653                        ret = -EINVAL;
5654                        goto err_nand_manuf_cleanup;
5655                }
5656                break;
5657
5658        case NAND_ECC_ON_DIE:
5659                if (!ecc->read_page || !ecc->write_page) {
5660                        WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5661                        ret = -EINVAL;
5662                        goto err_nand_manuf_cleanup;
5663                }
5664                if (!ecc->read_oob)
5665                        ecc->read_oob = nand_read_oob_std;
5666                if (!ecc->write_oob)
5667                        ecc->write_oob = nand_write_oob_std;
5668                break;
5669
5670        case NAND_ECC_NONE:
5671                pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5672                ecc->read_page = nand_read_page_raw;
5673                ecc->write_page = nand_write_page_raw;
5674                ecc->read_oob = nand_read_oob_std;
5675                ecc->read_page_raw = nand_read_page_raw;
5676                ecc->write_page_raw = nand_write_page_raw;
5677                ecc->write_oob = nand_write_oob_std;
5678                ecc->size = mtd->writesize;
5679                ecc->bytes = 0;
5680                ecc->strength = 0;
5681                break;
5682
5683        default:
5684                WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5685                ret = -EINVAL;
5686                goto err_nand_manuf_cleanup;
5687        }
5688
5689        if (ecc->correct || ecc->calculate) {
5690                ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5691                ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5692                if (!ecc->calc_buf || !ecc->code_buf) {
5693                        ret = -ENOMEM;
5694                        goto err_nand_manuf_cleanup;
5695                }
5696        }
5697
5698        /* For many systems, the standard OOB write also works for raw */
5699        if (!ecc->read_oob_raw)
5700                ecc->read_oob_raw = ecc->read_oob;
5701        if (!ecc->write_oob_raw)
5702                ecc->write_oob_raw = ecc->write_oob;
5703
5704        /* propagate ecc info to mtd_info */
5705        mtd->ecc_strength = ecc->strength;
5706        mtd->ecc_step_size = ecc->size;
5707
5708        /*
5709         * Set the number of read / write steps for one page depending on ECC
5710         * mode.
5711         */
5712        ecc->steps = mtd->writesize / ecc->size;
5713        if (ecc->steps * ecc->size != mtd->writesize) {
5714                WARN(1, "Invalid ECC parameters\n");
5715                ret = -EINVAL;
5716                goto err_nand_manuf_cleanup;
5717        }
5718        ecc->total = ecc->steps * ecc->bytes;
5719        if (ecc->total > mtd->oobsize) {
5720                WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5721                ret = -EINVAL;
5722                goto err_nand_manuf_cleanup;
5723        }
5724
5725        /*
5726         * The number of bytes available for a client to place data into
5727         * the out of band area.
5728         */
5729        ret = mtd_ooblayout_count_freebytes(mtd);
5730        if (ret < 0)
5731                ret = 0;
5732
5733        mtd->oobavail = ret;
5734
5735        /* ECC sanity check: warn if it's too weak */
5736        if (!nand_ecc_strength_good(chip))
5737                pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5738                        mtd->name);
5739
5740        /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5741        if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5742                switch (ecc->steps) {
5743                case 2:
5744                        mtd->subpage_sft = 1;
5745                        break;
5746                case 4:
5747                case 8:
5748                case 16:
5749                        mtd->subpage_sft = 2;
5750                        break;
5751                }
5752        }
5753        chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5754
5755        /* Invalidate the pagebuffer reference */
5756        chip->pagecache.page = -1;
5757
5758        /* Large page NAND with SOFT_ECC should support subpage reads */
5759        switch (ecc->mode) {
5760        case NAND_ECC_SOFT:
5761                if (chip->page_shift > 9)
5762                        chip->options |= NAND_SUBPAGE_READ;
5763                break;
5764
5765        default:
5766                break;
5767        }
5768
5769        ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5770        if (ret)
5771                goto err_nand_manuf_cleanup;
5772
5773        /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
5774        if (chip->options & NAND_ROM)
5775                mtd->flags = MTD_CAP_ROM;
5776
5777        /* Fill in remaining MTD driver data */
5778        mtd->_erase = nand_erase;
5779        mtd->_point = NULL;
5780        mtd->_unpoint = NULL;
5781        mtd->_panic_write = panic_nand_write;
5782        mtd->_read_oob = nand_read_oob;
5783        mtd->_write_oob = nand_write_oob;
5784        mtd->_sync = nand_sync;
5785        mtd->_lock = NULL;
5786        mtd->_unlock = NULL;
5787        mtd->_suspend = nand_suspend;
5788        mtd->_resume = nand_resume;
5789        mtd->_reboot = nand_shutdown;
5790        mtd->_block_isreserved = nand_block_isreserved;
5791        mtd->_block_isbad = nand_block_isbad;
5792        mtd->_block_markbad = nand_block_markbad;
5793        mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5794
5795        /*
5796         * Initialize bitflip_threshold to its default prior scan_bbt() call.
5797         * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5798         * properly set.
5799         */
5800        if (!mtd->bitflip_threshold)
5801                mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5802
5803        /* Initialize the ->data_interface field. */
5804        ret = nand_init_data_interface(chip);
5805        if (ret)
5806                goto err_nanddev_cleanup;
5807
5808        /* Enter fastest possible mode on all dies. */
5809        for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5810                ret = nand_setup_data_interface(chip, i);
5811                if (ret)
5812                        goto err_nanddev_cleanup;
5813        }
5814
5815        /* Check, if we should skip the bad block table scan */
5816        if (chip->options & NAND_SKIP_BBTSCAN)
5817                return 0;
5818
5819        /* Build bad block table */
5820        ret = nand_create_bbt(chip);
5821        if (ret)
5822                goto err_nanddev_cleanup;
5823
5824        return 0;
5825
5826
5827err_nanddev_cleanup:
5828        nanddev_cleanup(&chip->base);
5829
5830err_nand_manuf_cleanup:
5831        nand_manufacturer_cleanup(chip);
5832
5833err_free_buf:
5834        kfree(chip->data_buf);
5835        kfree(ecc->code_buf);
5836        kfree(ecc->calc_buf);
5837
5838        return ret;
5839}
5840
5841static int nand_attach(struct nand_chip *chip)
5842{
5843        if (chip->controller->ops && chip->controller->ops->attach_chip)
5844                return chip->controller->ops->attach_chip(chip);
5845
5846        return 0;
5847}
5848
5849static void nand_detach(struct nand_chip *chip)
5850{
5851        if (chip->controller->ops && chip->controller->ops->detach_chip)
5852                chip->controller->ops->detach_chip(chip);
5853}
5854
5855/**
5856 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5857 * @chip: NAND chip object
5858 * @maxchips: number of chips to scan for.
5859 * @ids: optional flash IDs table
5860 *
5861 * This fills out all the uninitialized function pointers with the defaults.
5862 * The flash ID is read and the mtd/chip structures are filled with the
5863 * appropriate values.
5864 */
5865int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5866                       struct nand_flash_dev *ids)
5867{
5868        int ret;
5869
5870        if (!maxchips)
5871                return -EINVAL;
5872
5873        ret = nand_scan_ident(chip, maxchips, ids);
5874        if (ret)
5875                return ret;
5876
5877        ret = nand_attach(chip);
5878        if (ret)
5879                goto cleanup_ident;
5880
5881        ret = nand_scan_tail(chip);
5882        if (ret)
5883                goto detach_chip;
5884
5885        return 0;
5886
5887detach_chip:
5888        nand_detach(chip);
5889cleanup_ident:
5890        nand_scan_ident_cleanup(chip);
5891
5892        return ret;
5893}
5894EXPORT_SYMBOL(nand_scan_with_ids);
5895
5896/**
5897 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5898 * @chip: NAND chip object
5899 */
5900void nand_cleanup(struct nand_chip *chip)
5901{
5902        if (chip->ecc.mode == NAND_ECC_SOFT &&
5903            chip->ecc.algo == NAND_ECC_BCH)
5904                nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5905
5906        /* Free bad block table memory */
5907        kfree(chip->bbt);
5908        kfree(chip->data_buf);
5909        kfree(chip->ecc.code_buf);
5910        kfree(chip->ecc.calc_buf);
5911
5912        /* Free bad block descriptor memory */
5913        if (chip->badblock_pattern && chip->badblock_pattern->options
5914                        & NAND_BBT_DYNAMICSTRUCT)
5915                kfree(chip->badblock_pattern);
5916
5917        /* Free manufacturer priv data. */
5918        nand_manufacturer_cleanup(chip);
5919
5920        /* Free controller specific allocations after chip identification */
5921        nand_detach(chip);
5922
5923        /* Free identification phase allocations */
5924        nand_scan_ident_cleanup(chip);
5925}
5926
5927EXPORT_SYMBOL_GPL(nand_cleanup);
5928
5929/**
5930 * nand_release - [NAND Interface] Unregister the MTD device and free resources
5931 *                held by the NAND device
5932 * @chip: NAND chip object
5933 */
5934void nand_release(struct nand_chip *chip)
5935{
5936        mtd_device_unregister(nand_to_mtd(chip));
5937        nand_cleanup(chip);
5938}
5939EXPORT_SYMBOL_GPL(nand_release);
5940
5941MODULE_LICENSE("GPL");
5942MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5943MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5944MODULE_DESCRIPTION("Generic NAND flash driver code");
5945