uboot/drivers/mtd/nand/spi/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016-2017 Micron Technology, Inc.
   4 *
   5 * Authors:
   6 *      Peter Pan <peterpandong@micron.com>
   7 *      Boris Brezillon <boris.brezillon@bootlin.com>
   8 */
   9
  10#define pr_fmt(fmt)     "spi-nand: " fmt
  11
  12#ifndef __UBOOT__
  13#include <linux/device.h>
  14#include <linux/jiffies.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/mtd/spinand.h>
  18#include <linux/of.h>
  19#include <linux/slab.h>
  20#include <linux/spi/spi.h>
  21#include <linux/spi/spi-mem.h>
  22#else
  23#include <common.h>
  24#include <errno.h>
  25#include <spi.h>
  26#include <spi-mem.h>
  27#include <linux/mtd/spinand.h>
  28#endif
  29
  30/* SPI NAND index visible in MTD names */
  31static int spi_nand_idx;
  32
  33static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
  34                                          const struct nand_page_io_req *req,
  35                                          u16 *column)
  36{
  37        struct nand_device *nand = spinand_to_nand(spinand);
  38        unsigned int shift;
  39
  40        if (nand->memorg.planes_per_lun < 2)
  41                return;
  42
  43        /* The plane number is passed in MSB just above the column address */
  44        shift = fls(nand->memorg.pagesize);
  45        *column |= req->pos.plane << shift;
  46}
  47
  48static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
  49{
  50        struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
  51                                                      spinand->scratchbuf);
  52        int ret;
  53
  54        ret = spi_mem_exec_op(spinand->slave, &op);
  55        if (ret)
  56                return ret;
  57
  58        *val = *spinand->scratchbuf;
  59        return 0;
  60}
  61
  62static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
  63{
  64        struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
  65                                                      spinand->scratchbuf);
  66
  67        *spinand->scratchbuf = val;
  68        return spi_mem_exec_op(spinand->slave, &op);
  69}
  70
  71static int spinand_read_status(struct spinand_device *spinand, u8 *status)
  72{
  73        return spinand_read_reg_op(spinand, REG_STATUS, status);
  74}
  75
  76static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
  77{
  78        struct nand_device *nand = spinand_to_nand(spinand);
  79
  80        if (WARN_ON(spinand->cur_target < 0 ||
  81                    spinand->cur_target >= nand->memorg.ntargets))
  82                return -EINVAL;
  83
  84        *cfg = spinand->cfg_cache[spinand->cur_target];
  85        return 0;
  86}
  87
  88static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
  89{
  90        struct nand_device *nand = spinand_to_nand(spinand);
  91        int ret;
  92
  93        if (WARN_ON(spinand->cur_target < 0 ||
  94                    spinand->cur_target >= nand->memorg.ntargets))
  95                return -EINVAL;
  96
  97        if (spinand->cfg_cache[spinand->cur_target] == cfg)
  98                return 0;
  99
 100        ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
 101        if (ret)
 102                return ret;
 103
 104        spinand->cfg_cache[spinand->cur_target] = cfg;
 105        return 0;
 106}
 107
 108/**
 109 * spinand_upd_cfg() - Update the configuration register
 110 * @spinand: the spinand device
 111 * @mask: the mask encoding the bits to update in the config reg
 112 * @val: the new value to apply
 113 *
 114 * Update the configuration register.
 115 *
 116 * Return: 0 on success, a negative error code otherwise.
 117 */
 118int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
 119{
 120        int ret;
 121        u8 cfg;
 122
 123        ret = spinand_get_cfg(spinand, &cfg);
 124        if (ret)
 125                return ret;
 126
 127        cfg &= ~mask;
 128        cfg |= val;
 129
 130        return spinand_set_cfg(spinand, cfg);
 131}
 132
 133/**
 134 * spinand_select_target() - Select a specific NAND target/die
 135 * @spinand: the spinand device
 136 * @target: the target/die to select
 137 *
 138 * Select a new target/die. If chip only has one die, this function is a NOOP.
 139 *
 140 * Return: 0 on success, a negative error code otherwise.
 141 */
 142int spinand_select_target(struct spinand_device *spinand, unsigned int target)
 143{
 144        struct nand_device *nand = spinand_to_nand(spinand);
 145        int ret;
 146
 147        if (WARN_ON(target >= nand->memorg.ntargets))
 148                return -EINVAL;
 149
 150        if (spinand->cur_target == target)
 151                return 0;
 152
 153        if (nand->memorg.ntargets == 1) {
 154                spinand->cur_target = target;
 155                return 0;
 156        }
 157
 158        ret = spinand->select_target(spinand, target);
 159        if (ret)
 160                return ret;
 161
 162        spinand->cur_target = target;
 163        return 0;
 164}
 165
 166static int spinand_init_cfg_cache(struct spinand_device *spinand)
 167{
 168        struct nand_device *nand = spinand_to_nand(spinand);
 169        struct udevice *dev = spinand->slave->dev;
 170        unsigned int target;
 171        int ret;
 172
 173        spinand->cfg_cache = devm_kzalloc(dev,
 174                                          sizeof(*spinand->cfg_cache) *
 175                                          nand->memorg.ntargets,
 176                                          GFP_KERNEL);
 177        if (!spinand->cfg_cache)
 178                return -ENOMEM;
 179
 180        for (target = 0; target < nand->memorg.ntargets; target++) {
 181                ret = spinand_select_target(spinand, target);
 182                if (ret)
 183                        return ret;
 184
 185                /*
 186                 * We use spinand_read_reg_op() instead of spinand_get_cfg()
 187                 * here to bypass the config cache.
 188                 */
 189                ret = spinand_read_reg_op(spinand, REG_CFG,
 190                                          &spinand->cfg_cache[target]);
 191                if (ret)
 192                        return ret;
 193        }
 194
 195        return 0;
 196}
 197
 198static int spinand_init_quad_enable(struct spinand_device *spinand)
 199{
 200        bool enable = false;
 201
 202        if (!(spinand->flags & SPINAND_HAS_QE_BIT))
 203                return 0;
 204
 205        if (spinand->op_templates.read_cache->data.buswidth == 4 ||
 206            spinand->op_templates.write_cache->data.buswidth == 4 ||
 207            spinand->op_templates.update_cache->data.buswidth == 4)
 208                enable = true;
 209
 210        return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
 211                               enable ? CFG_QUAD_ENABLE : 0);
 212}
 213
 214static int spinand_ecc_enable(struct spinand_device *spinand,
 215                              bool enable)
 216{
 217        return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
 218                               enable ? CFG_ECC_ENABLE : 0);
 219}
 220
 221static int spinand_write_enable_op(struct spinand_device *spinand)
 222{
 223        struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
 224
 225        return spi_mem_exec_op(spinand->slave, &op);
 226}
 227
 228static int spinand_load_page_op(struct spinand_device *spinand,
 229                                const struct nand_page_io_req *req)
 230{
 231        struct nand_device *nand = spinand_to_nand(spinand);
 232        unsigned int row = nanddev_pos_to_row(nand, &req->pos);
 233        struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
 234
 235        return spi_mem_exec_op(spinand->slave, &op);
 236}
 237
 238static int spinand_read_from_cache_op(struct spinand_device *spinand,
 239                                      const struct nand_page_io_req *req)
 240{
 241        struct spi_mem_op op = *spinand->op_templates.read_cache;
 242        struct nand_device *nand = spinand_to_nand(spinand);
 243        struct mtd_info *mtd = nanddev_to_mtd(nand);
 244        struct nand_page_io_req adjreq = *req;
 245        unsigned int nbytes = 0;
 246        void *buf = NULL;
 247        u16 column = 0;
 248        int ret;
 249
 250        if (req->datalen) {
 251                adjreq.datalen = nanddev_page_size(nand);
 252                adjreq.dataoffs = 0;
 253                adjreq.databuf.in = spinand->databuf;
 254                buf = spinand->databuf;
 255                nbytes = adjreq.datalen;
 256        }
 257
 258        if (req->ooblen) {
 259                adjreq.ooblen = nanddev_per_page_oobsize(nand);
 260                adjreq.ooboffs = 0;
 261                adjreq.oobbuf.in = spinand->oobbuf;
 262                nbytes += nanddev_per_page_oobsize(nand);
 263                if (!buf) {
 264                        buf = spinand->oobbuf;
 265                        column = nanddev_page_size(nand);
 266                }
 267        }
 268
 269        spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
 270        op.addr.val = column;
 271
 272        /*
 273         * Some controllers are limited in term of max RX data size. In this
 274         * case, just repeat the READ_CACHE operation after updating the
 275         * column.
 276         */
 277        while (nbytes) {
 278                op.data.buf.in = buf;
 279                op.data.nbytes = nbytes;
 280                ret = spi_mem_adjust_op_size(spinand->slave, &op);
 281                if (ret)
 282                        return ret;
 283
 284                ret = spi_mem_exec_op(spinand->slave, &op);
 285                if (ret)
 286                        return ret;
 287
 288                buf += op.data.nbytes;
 289                nbytes -= op.data.nbytes;
 290                op.addr.val += op.data.nbytes;
 291        }
 292
 293        if (req->datalen)
 294                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
 295                       req->datalen);
 296
 297        if (req->ooblen) {
 298                if (req->mode == MTD_OPS_AUTO_OOB)
 299                        mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
 300                                                    spinand->oobbuf,
 301                                                    req->ooboffs,
 302                                                    req->ooblen);
 303                else
 304                        memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
 305                               req->ooblen);
 306        }
 307
 308        return 0;
 309}
 310
 311static int spinand_write_to_cache_op(struct spinand_device *spinand,
 312                                     const struct nand_page_io_req *req)
 313{
 314        struct spi_mem_op op = *spinand->op_templates.write_cache;
 315        struct nand_device *nand = spinand_to_nand(spinand);
 316        struct mtd_info *mtd = nanddev_to_mtd(nand);
 317        struct nand_page_io_req adjreq = *req;
 318        unsigned int nbytes = 0;
 319        void *buf = NULL;
 320        u16 column = 0;
 321        int ret;
 322
 323        memset(spinand->databuf, 0xff,
 324               nanddev_page_size(nand) +
 325               nanddev_per_page_oobsize(nand));
 326
 327        if (req->datalen) {
 328                memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
 329                       req->datalen);
 330                adjreq.dataoffs = 0;
 331                adjreq.datalen = nanddev_page_size(nand);
 332                adjreq.databuf.out = spinand->databuf;
 333                nbytes = adjreq.datalen;
 334                buf = spinand->databuf;
 335        }
 336
 337        if (req->ooblen) {
 338                if (req->mode == MTD_OPS_AUTO_OOB)
 339                        mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
 340                                                    spinand->oobbuf,
 341                                                    req->ooboffs,
 342                                                    req->ooblen);
 343                else
 344                        memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
 345                               req->ooblen);
 346
 347                adjreq.ooblen = nanddev_per_page_oobsize(nand);
 348                adjreq.ooboffs = 0;
 349                nbytes += nanddev_per_page_oobsize(nand);
 350                if (!buf) {
 351                        buf = spinand->oobbuf;
 352                        column = nanddev_page_size(nand);
 353                }
 354        }
 355
 356        spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
 357
 358        op = *spinand->op_templates.write_cache;
 359        op.addr.val = column;
 360
 361        /*
 362         * Some controllers are limited in term of max TX data size. In this
 363         * case, split the operation into one LOAD CACHE and one or more
 364         * LOAD RANDOM CACHE.
 365         */
 366        while (nbytes) {
 367                op.data.buf.out = buf;
 368                op.data.nbytes = nbytes;
 369
 370                ret = spi_mem_adjust_op_size(spinand->slave, &op);
 371                if (ret)
 372                        return ret;
 373
 374                ret = spi_mem_exec_op(spinand->slave, &op);
 375                if (ret)
 376                        return ret;
 377
 378                buf += op.data.nbytes;
 379                nbytes -= op.data.nbytes;
 380                op.addr.val += op.data.nbytes;
 381
 382                /*
 383                 * We need to use the RANDOM LOAD CACHE operation if there's
 384                 * more than one iteration, because the LOAD operation resets
 385                 * the cache to 0xff.
 386                 */
 387                if (nbytes) {
 388                        column = op.addr.val;
 389                        op = *spinand->op_templates.update_cache;
 390                        op.addr.val = column;
 391                }
 392        }
 393
 394        return 0;
 395}
 396
 397static int spinand_program_op(struct spinand_device *spinand,
 398                              const struct nand_page_io_req *req)
 399{
 400        struct nand_device *nand = spinand_to_nand(spinand);
 401        unsigned int row = nanddev_pos_to_row(nand, &req->pos);
 402        struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
 403
 404        return spi_mem_exec_op(spinand->slave, &op);
 405}
 406
 407static int spinand_erase_op(struct spinand_device *spinand,
 408                            const struct nand_pos *pos)
 409{
 410        struct nand_device *nand = &spinand->base;
 411        unsigned int row = nanddev_pos_to_row(nand, pos);
 412        struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
 413
 414        return spi_mem_exec_op(spinand->slave, &op);
 415}
 416
 417static int spinand_wait(struct spinand_device *spinand, u8 *s)
 418{
 419        unsigned long start, stop;
 420        u8 status;
 421        int ret;
 422
 423        start = get_timer(0);
 424        stop = 400;
 425        do {
 426                ret = spinand_read_status(spinand, &status);
 427                if (ret)
 428                        return ret;
 429
 430                if (!(status & STATUS_BUSY))
 431                        goto out;
 432        } while (get_timer(start) < stop);
 433
 434        /*
 435         * Extra read, just in case the STATUS_READY bit has changed
 436         * since our last check
 437         */
 438        ret = spinand_read_status(spinand, &status);
 439        if (ret)
 440                return ret;
 441
 442out:
 443        if (s)
 444                *s = status;
 445
 446        return status & STATUS_BUSY ? -ETIMEDOUT : 0;
 447}
 448
 449static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
 450{
 451        struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
 452                                                 SPINAND_MAX_ID_LEN);
 453        int ret;
 454
 455        ret = spi_mem_exec_op(spinand->slave, &op);
 456        if (!ret)
 457                memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
 458
 459        return ret;
 460}
 461
 462static int spinand_reset_op(struct spinand_device *spinand)
 463{
 464        struct spi_mem_op op = SPINAND_RESET_OP;
 465        int ret;
 466
 467        ret = spi_mem_exec_op(spinand->slave, &op);
 468        if (ret)
 469                return ret;
 470
 471        return spinand_wait(spinand, NULL);
 472}
 473
 474static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
 475{
 476        return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
 477}
 478
 479static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
 480{
 481        struct nand_device *nand = spinand_to_nand(spinand);
 482
 483        if (spinand->eccinfo.get_status)
 484                return spinand->eccinfo.get_status(spinand, status);
 485
 486        switch (status & STATUS_ECC_MASK) {
 487        case STATUS_ECC_NO_BITFLIPS:
 488                return 0;
 489
 490        case STATUS_ECC_HAS_BITFLIPS:
 491                /*
 492                 * We have no way to know exactly how many bitflips have been
 493                 * fixed, so let's return the maximum possible value so that
 494                 * wear-leveling layers move the data immediately.
 495                 */
 496                return nand->eccreq.strength;
 497
 498        case STATUS_ECC_UNCOR_ERROR:
 499                return -EBADMSG;
 500
 501        default:
 502                break;
 503        }
 504
 505        return -EINVAL;
 506}
 507
 508static int spinand_read_page(struct spinand_device *spinand,
 509                             const struct nand_page_io_req *req,
 510                             bool ecc_enabled)
 511{
 512        u8 status;
 513        int ret;
 514
 515        ret = spinand_load_page_op(spinand, req);
 516        if (ret)
 517                return ret;
 518
 519        ret = spinand_wait(spinand, &status);
 520        if (ret < 0)
 521                return ret;
 522
 523        ret = spinand_read_from_cache_op(spinand, req);
 524        if (ret)
 525                return ret;
 526
 527        if (!ecc_enabled)
 528                return 0;
 529
 530        return spinand_check_ecc_status(spinand, status);
 531}
 532
 533static int spinand_write_page(struct spinand_device *spinand,
 534                              const struct nand_page_io_req *req)
 535{
 536        u8 status;
 537        int ret;
 538
 539        ret = spinand_write_enable_op(spinand);
 540        if (ret)
 541                return ret;
 542
 543        ret = spinand_write_to_cache_op(spinand, req);
 544        if (ret)
 545                return ret;
 546
 547        ret = spinand_program_op(spinand, req);
 548        if (ret)
 549                return ret;
 550
 551        ret = spinand_wait(spinand, &status);
 552        if (!ret && (status & STATUS_PROG_FAILED))
 553                ret = -EIO;
 554
 555        return ret;
 556}
 557
 558static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
 559                            struct mtd_oob_ops *ops)
 560{
 561        struct spinand_device *spinand = mtd_to_spinand(mtd);
 562        struct nand_device *nand = mtd_to_nanddev(mtd);
 563        unsigned int max_bitflips = 0;
 564        struct nand_io_iter iter;
 565        bool enable_ecc = false;
 566        bool ecc_failed = false;
 567        int ret = 0;
 568
 569        if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
 570                enable_ecc = true;
 571
 572#ifndef __UBOOT__
 573        mutex_lock(&spinand->lock);
 574#endif
 575
 576        nanddev_io_for_each_page(nand, from, ops, &iter) {
 577                ret = spinand_select_target(spinand, iter.req.pos.target);
 578                if (ret)
 579                        break;
 580
 581                ret = spinand_ecc_enable(spinand, enable_ecc);
 582                if (ret)
 583                        break;
 584
 585                ret = spinand_read_page(spinand, &iter.req, enable_ecc);
 586                if (ret < 0 && ret != -EBADMSG)
 587                        break;
 588
 589                if (ret == -EBADMSG) {
 590                        ecc_failed = true;
 591                        mtd->ecc_stats.failed++;
 592                        ret = 0;
 593                } else {
 594                        mtd->ecc_stats.corrected += ret;
 595                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
 596                }
 597
 598                ops->retlen += iter.req.datalen;
 599                ops->oobretlen += iter.req.ooblen;
 600        }
 601
 602#ifndef __UBOOT__
 603        mutex_unlock(&spinand->lock);
 604#endif
 605        if (ecc_failed && !ret)
 606                ret = -EBADMSG;
 607
 608        return ret ? ret : max_bitflips;
 609}
 610
 611static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
 612                             struct mtd_oob_ops *ops)
 613{
 614        struct spinand_device *spinand = mtd_to_spinand(mtd);
 615        struct nand_device *nand = mtd_to_nanddev(mtd);
 616        struct nand_io_iter iter;
 617        bool enable_ecc = false;
 618        int ret = 0;
 619
 620        if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
 621                enable_ecc = true;
 622
 623#ifndef __UBOOT__
 624        mutex_lock(&spinand->lock);
 625#endif
 626
 627        nanddev_io_for_each_page(nand, to, ops, &iter) {
 628                ret = spinand_select_target(spinand, iter.req.pos.target);
 629                if (ret)
 630                        break;
 631
 632                ret = spinand_ecc_enable(spinand, enable_ecc);
 633                if (ret)
 634                        break;
 635
 636                ret = spinand_write_page(spinand, &iter.req);
 637                if (ret)
 638                        break;
 639
 640                ops->retlen += iter.req.datalen;
 641                ops->oobretlen += iter.req.ooblen;
 642        }
 643
 644#ifndef __UBOOT__
 645        mutex_unlock(&spinand->lock);
 646#endif
 647
 648        return ret;
 649}
 650
 651static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
 652{
 653        struct spinand_device *spinand = nand_to_spinand(nand);
 654        struct nand_page_io_req req = {
 655                .pos = *pos,
 656                .ooblen = 2,
 657                .ooboffs = 0,
 658                .oobbuf.in = spinand->oobbuf,
 659                .mode = MTD_OPS_RAW,
 660        };
 661        int ret;
 662
 663        memset(spinand->oobbuf, 0, 2);
 664        ret = spinand_select_target(spinand, pos->target);
 665        if (ret)
 666                return ret;
 667
 668        ret = spinand_read_page(spinand, &req, false);
 669        if (ret)
 670                return ret;
 671
 672        if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
 673                return true;
 674
 675        return false;
 676}
 677
 678static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
 679{
 680        struct nand_device *nand = mtd_to_nanddev(mtd);
 681#ifndef __UBOOT__
 682        struct spinand_device *spinand = nand_to_spinand(nand);
 683#endif
 684        struct nand_pos pos;
 685        int ret;
 686
 687        nanddev_offs_to_pos(nand, offs, &pos);
 688#ifndef __UBOOT__
 689        mutex_lock(&spinand->lock);
 690#endif
 691        ret = nanddev_isbad(nand, &pos);
 692#ifndef __UBOOT__
 693        mutex_unlock(&spinand->lock);
 694#endif
 695        return ret;
 696}
 697
 698static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
 699{
 700        struct spinand_device *spinand = nand_to_spinand(nand);
 701        struct nand_page_io_req req = {
 702                .pos = *pos,
 703                .ooboffs = 0,
 704                .ooblen = 2,
 705                .oobbuf.out = spinand->oobbuf,
 706        };
 707        int ret;
 708
 709        /* Erase block before marking it bad. */
 710        ret = spinand_select_target(spinand, pos->target);
 711        if (ret)
 712                return ret;
 713
 714        ret = spinand_write_enable_op(spinand);
 715        if (ret)
 716                return ret;
 717
 718        ret = spinand_erase_op(spinand, pos);
 719        if (ret)
 720                return ret;
 721
 722        memset(spinand->oobbuf, 0, 2);
 723        return spinand_write_page(spinand, &req);
 724}
 725
 726static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
 727{
 728        struct nand_device *nand = mtd_to_nanddev(mtd);
 729#ifndef __UBOOT__
 730        struct spinand_device *spinand = nand_to_spinand(nand);
 731#endif
 732        struct nand_pos pos;
 733        int ret;
 734
 735        nanddev_offs_to_pos(nand, offs, &pos);
 736#ifndef __UBOOT__
 737        mutex_lock(&spinand->lock);
 738#endif
 739        ret = nanddev_markbad(nand, &pos);
 740#ifndef __UBOOT__
 741        mutex_unlock(&spinand->lock);
 742#endif
 743        return ret;
 744}
 745
 746static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
 747{
 748        struct spinand_device *spinand = nand_to_spinand(nand);
 749        u8 status;
 750        int ret;
 751
 752        ret = spinand_select_target(spinand, pos->target);
 753        if (ret)
 754                return ret;
 755
 756        ret = spinand_write_enable_op(spinand);
 757        if (ret)
 758                return ret;
 759
 760        ret = spinand_erase_op(spinand, pos);
 761        if (ret)
 762                return ret;
 763
 764        ret = spinand_wait(spinand, &status);
 765        if (!ret && (status & STATUS_ERASE_FAILED))
 766                ret = -EIO;
 767
 768        return ret;
 769}
 770
 771static int spinand_mtd_erase(struct mtd_info *mtd,
 772                             struct erase_info *einfo)
 773{
 774#ifndef __UBOOT__
 775        struct spinand_device *spinand = mtd_to_spinand(mtd);
 776#endif
 777        int ret;
 778
 779#ifndef __UBOOT__
 780        mutex_lock(&spinand->lock);
 781#endif
 782        ret = nanddev_mtd_erase(mtd, einfo);
 783#ifndef __UBOOT__
 784        mutex_unlock(&spinand->lock);
 785#endif
 786
 787        return ret;
 788}
 789
 790static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
 791{
 792#ifndef __UBOOT__
 793        struct spinand_device *spinand = mtd_to_spinand(mtd);
 794#endif
 795        struct nand_device *nand = mtd_to_nanddev(mtd);
 796        struct nand_pos pos;
 797        int ret;
 798
 799        nanddev_offs_to_pos(nand, offs, &pos);
 800#ifndef __UBOOT__
 801        mutex_lock(&spinand->lock);
 802#endif
 803        ret = nanddev_isreserved(nand, &pos);
 804#ifndef __UBOOT__
 805        mutex_unlock(&spinand->lock);
 806#endif
 807
 808        return ret;
 809}
 810
 811const struct spi_mem_op *
 812spinand_find_supported_op(struct spinand_device *spinand,
 813                          const struct spi_mem_op *ops,
 814                          unsigned int nops)
 815{
 816        unsigned int i;
 817
 818        for (i = 0; i < nops; i++) {
 819                if (spi_mem_supports_op(spinand->slave, &ops[i]))
 820                        return &ops[i];
 821        }
 822
 823        return NULL;
 824}
 825
 826static const struct nand_ops spinand_ops = {
 827        .erase = spinand_erase,
 828        .markbad = spinand_markbad,
 829        .isbad = spinand_isbad,
 830};
 831
 832static const struct spinand_manufacturer *spinand_manufacturers[] = {
 833        &gigadevice_spinand_manufacturer,
 834        &macronix_spinand_manufacturer,
 835        &micron_spinand_manufacturer,
 836        &winbond_spinand_manufacturer,
 837};
 838
 839static int spinand_manufacturer_detect(struct spinand_device *spinand)
 840{
 841        unsigned int i;
 842        int ret;
 843
 844        for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
 845                ret = spinand_manufacturers[i]->ops->detect(spinand);
 846                if (ret > 0) {
 847                        spinand->manufacturer = spinand_manufacturers[i];
 848                        return 0;
 849                } else if (ret < 0) {
 850                        return ret;
 851                }
 852        }
 853
 854        return -ENOTSUPP;
 855}
 856
 857static int spinand_manufacturer_init(struct spinand_device *spinand)
 858{
 859        if (spinand->manufacturer->ops->init)
 860                return spinand->manufacturer->ops->init(spinand);
 861
 862        return 0;
 863}
 864
 865static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
 866{
 867        /* Release manufacturer private data */
 868        if (spinand->manufacturer->ops->cleanup)
 869                return spinand->manufacturer->ops->cleanup(spinand);
 870}
 871
 872static const struct spi_mem_op *
 873spinand_select_op_variant(struct spinand_device *spinand,
 874                          const struct spinand_op_variants *variants)
 875{
 876        struct nand_device *nand = spinand_to_nand(spinand);
 877        unsigned int i;
 878
 879        for (i = 0; i < variants->nops; i++) {
 880                struct spi_mem_op op = variants->ops[i];
 881                unsigned int nbytes;
 882                int ret;
 883
 884                nbytes = nanddev_per_page_oobsize(nand) +
 885                         nanddev_page_size(nand);
 886
 887                while (nbytes) {
 888                        op.data.nbytes = nbytes;
 889                        ret = spi_mem_adjust_op_size(spinand->slave, &op);
 890                        if (ret)
 891                                break;
 892
 893                        if (!spi_mem_supports_op(spinand->slave, &op))
 894                                break;
 895
 896                        nbytes -= op.data.nbytes;
 897                }
 898
 899                if (!nbytes)
 900                        return &variants->ops[i];
 901        }
 902
 903        return NULL;
 904}
 905
 906/**
 907 * spinand_match_and_init() - Try to find a match between a device ID and an
 908 *                            entry in a spinand_info table
 909 * @spinand: SPI NAND object
 910 * @table: SPI NAND device description table
 911 * @table_size: size of the device description table
 912 *
 913 * Should be used by SPI NAND manufacturer drivers when they want to find a
 914 * match between a device ID retrieved through the READ_ID command and an
 915 * entry in the SPI NAND description table. If a match is found, the spinand
 916 * object will be initialized with information provided by the matching
 917 * spinand_info entry.
 918 *
 919 * Return: 0 on success, a negative error code otherwise.
 920 */
 921int spinand_match_and_init(struct spinand_device *spinand,
 922                           const struct spinand_info *table,
 923                           unsigned int table_size, u8 devid)
 924{
 925        struct nand_device *nand = spinand_to_nand(spinand);
 926        unsigned int i;
 927
 928        for (i = 0; i < table_size; i++) {
 929                const struct spinand_info *info = &table[i];
 930                const struct spi_mem_op *op;
 931
 932                if (devid != info->devid)
 933                        continue;
 934
 935                nand->memorg = table[i].memorg;
 936                nand->eccreq = table[i].eccreq;
 937                spinand->eccinfo = table[i].eccinfo;
 938                spinand->flags = table[i].flags;
 939                spinand->select_target = table[i].select_target;
 940
 941                op = spinand_select_op_variant(spinand,
 942                                               info->op_variants.read_cache);
 943                if (!op)
 944                        return -ENOTSUPP;
 945
 946                spinand->op_templates.read_cache = op;
 947
 948                op = spinand_select_op_variant(spinand,
 949                                               info->op_variants.write_cache);
 950                if (!op)
 951                        return -ENOTSUPP;
 952
 953                spinand->op_templates.write_cache = op;
 954
 955                op = spinand_select_op_variant(spinand,
 956                                               info->op_variants.update_cache);
 957                spinand->op_templates.update_cache = op;
 958
 959                return 0;
 960        }
 961
 962        return -ENOTSUPP;
 963}
 964
 965static int spinand_detect(struct spinand_device *spinand)
 966{
 967        struct nand_device *nand = spinand_to_nand(spinand);
 968        int ret;
 969
 970        ret = spinand_reset_op(spinand);
 971        if (ret)
 972                return ret;
 973
 974        ret = spinand_read_id_op(spinand, spinand->id.data);
 975        if (ret)
 976                return ret;
 977
 978        spinand->id.len = SPINAND_MAX_ID_LEN;
 979
 980        ret = spinand_manufacturer_detect(spinand);
 981        if (ret) {
 982                dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
 983                        spinand->id.data);
 984                return ret;
 985        }
 986
 987        if (nand->memorg.ntargets > 1 && !spinand->select_target) {
 988                dev_err(dev,
 989                        "SPI NANDs with more than one die must implement ->select_target()\n");
 990                return -EINVAL;
 991        }
 992
 993        dev_info(spinand->slave->dev,
 994                 "%s SPI NAND was found.\n", spinand->manufacturer->name);
 995        dev_info(spinand->slave->dev,
 996                 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
 997                 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
 998                 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
 999
1000        return 0;
1001}
1002
1003static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1004                                       struct mtd_oob_region *region)
1005{
1006        return -ERANGE;
1007}
1008
1009static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1010                                        struct mtd_oob_region *region)
1011{
1012        if (section)
1013                return -ERANGE;
1014
1015        /* Reserve 2 bytes for the BBM. */
1016        region->offset = 2;
1017        region->length = 62;
1018
1019        return 0;
1020}
1021
1022static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1023        .ecc = spinand_noecc_ooblayout_ecc,
1024        .free = spinand_noecc_ooblayout_free,
1025};
1026
1027static int spinand_init(struct spinand_device *spinand)
1028{
1029        struct mtd_info *mtd = spinand_to_mtd(spinand);
1030        struct nand_device *nand = mtd_to_nanddev(mtd);
1031        int ret, i;
1032
1033        /*
1034         * We need a scratch buffer because the spi_mem interface requires that
1035         * buf passed in spi_mem_op->data.buf be DMA-able.
1036         */
1037        spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1038        if (!spinand->scratchbuf)
1039                return -ENOMEM;
1040
1041        ret = spinand_detect(spinand);
1042        if (ret)
1043                goto err_free_bufs;
1044
1045        /*
1046         * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1047         * may use this buffer for DMA access.
1048         * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1049         */
1050        spinand->databuf = kzalloc(nanddev_page_size(nand) +
1051                               nanddev_per_page_oobsize(nand),
1052                               GFP_KERNEL);
1053        if (!spinand->databuf) {
1054                ret = -ENOMEM;
1055                goto err_free_bufs;
1056        }
1057
1058        spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1059
1060        ret = spinand_init_cfg_cache(spinand);
1061        if (ret)
1062                goto err_free_bufs;
1063
1064        ret = spinand_init_quad_enable(spinand);
1065        if (ret)
1066                goto err_free_bufs;
1067
1068        ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1069        if (ret)
1070                goto err_free_bufs;
1071
1072        ret = spinand_manufacturer_init(spinand);
1073        if (ret) {
1074                dev_err(dev,
1075                        "Failed to initialize the SPI NAND chip (err = %d)\n",
1076                        ret);
1077                goto err_free_bufs;
1078        }
1079
1080        /* After power up, all blocks are locked, so unlock them here. */
1081        for (i = 0; i < nand->memorg.ntargets; i++) {
1082                ret = spinand_select_target(spinand, i);
1083                if (ret)
1084                        goto err_free_bufs;
1085
1086                ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1087                if (ret)
1088                        goto err_free_bufs;
1089        }
1090
1091        ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1092        if (ret)
1093                goto err_manuf_cleanup;
1094
1095        /*
1096         * Right now, we don't support ECC, so let the whole oob
1097         * area is available for user.
1098         */
1099        mtd->_read_oob = spinand_mtd_read;
1100        mtd->_write_oob = spinand_mtd_write;
1101        mtd->_block_isbad = spinand_mtd_block_isbad;
1102        mtd->_block_markbad = spinand_mtd_block_markbad;
1103        mtd->_block_isreserved = spinand_mtd_block_isreserved;
1104        mtd->_erase = spinand_mtd_erase;
1105
1106        if (spinand->eccinfo.ooblayout)
1107                mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1108        else
1109                mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1110
1111        ret = mtd_ooblayout_count_freebytes(mtd);
1112        if (ret < 0)
1113                goto err_cleanup_nanddev;
1114
1115        mtd->oobavail = ret;
1116
1117        return 0;
1118
1119err_cleanup_nanddev:
1120        nanddev_cleanup(nand);
1121
1122err_manuf_cleanup:
1123        spinand_manufacturer_cleanup(spinand);
1124
1125err_free_bufs:
1126        kfree(spinand->databuf);
1127        kfree(spinand->scratchbuf);
1128        return ret;
1129}
1130
1131static void spinand_cleanup(struct spinand_device *spinand)
1132{
1133        struct nand_device *nand = spinand_to_nand(spinand);
1134
1135        nanddev_cleanup(nand);
1136        spinand_manufacturer_cleanup(spinand);
1137        kfree(spinand->databuf);
1138        kfree(spinand->scratchbuf);
1139}
1140
1141static int spinand_probe(struct udevice *dev)
1142{
1143        struct spinand_device *spinand = dev_get_priv(dev);
1144        struct spi_slave *slave = dev_get_parent_priv(dev);
1145        struct mtd_info *mtd = dev_get_uclass_priv(dev);
1146        struct nand_device *nand = spinand_to_nand(spinand);
1147        int ret;
1148
1149#ifndef __UBOOT__
1150        spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1151                               GFP_KERNEL);
1152        if (!spinand)
1153                return -ENOMEM;
1154
1155        spinand->spimem = mem;
1156        spi_mem_set_drvdata(mem, spinand);
1157        spinand_set_of_node(spinand, mem->spi->dev.of_node);
1158        mutex_init(&spinand->lock);
1159
1160        mtd = spinand_to_mtd(spinand);
1161        mtd->dev.parent = &mem->spi->dev;
1162#else
1163        nand->mtd = mtd;
1164        mtd->priv = nand;
1165        mtd->dev = dev;
1166        mtd->name = malloc(20);
1167        if (!mtd->name)
1168                return -ENOMEM;
1169        sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1170        spinand->slave = slave;
1171        spinand_set_of_node(spinand, dev->node.np);
1172#endif
1173
1174        ret = spinand_init(spinand);
1175        if (ret)
1176                return ret;
1177
1178#ifndef __UBOOT__
1179        ret = mtd_device_register(mtd, NULL, 0);
1180#else
1181        ret = add_mtd_device(mtd);
1182#endif
1183        if (ret)
1184                goto err_spinand_cleanup;
1185
1186        return 0;
1187
1188err_spinand_cleanup:
1189        spinand_cleanup(spinand);
1190
1191        return ret;
1192}
1193
1194#ifndef __UBOOT__
1195static int spinand_remove(struct udevice *slave)
1196{
1197        struct spinand_device *spinand;
1198        struct mtd_info *mtd;
1199        int ret;
1200
1201        spinand = spi_mem_get_drvdata(slave);
1202        mtd = spinand_to_mtd(spinand);
1203        free(mtd->name);
1204
1205        ret = mtd_device_unregister(mtd);
1206        if (ret)
1207                return ret;
1208
1209        spinand_cleanup(spinand);
1210
1211        return 0;
1212}
1213
1214static const struct spi_device_id spinand_ids[] = {
1215        { .name = "spi-nand" },
1216        { /* sentinel */ },
1217};
1218
1219#ifdef CONFIG_OF
1220static const struct of_device_id spinand_of_ids[] = {
1221        { .compatible = "spi-nand" },
1222        { /* sentinel */ },
1223};
1224#endif
1225
1226static struct spi_mem_driver spinand_drv = {
1227        .spidrv = {
1228                .id_table = spinand_ids,
1229                .driver = {
1230                        .name = "spi-nand",
1231                        .of_match_table = of_match_ptr(spinand_of_ids),
1232                },
1233        },
1234        .probe = spinand_probe,
1235        .remove = spinand_remove,
1236};
1237module_spi_mem_driver(spinand_drv);
1238
1239MODULE_DESCRIPTION("SPI NAND framework");
1240MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1241MODULE_LICENSE("GPL v2");
1242#endif /* __UBOOT__ */
1243
1244static const struct udevice_id spinand_ids[] = {
1245        { .compatible = "spi-nand" },
1246        { /* sentinel */ },
1247};
1248
1249U_BOOT_DRIVER(spinand) = {
1250        .name = "spi_nand",
1251        .id = UCLASS_MTD,
1252        .of_match = spinand_ids,
1253        .priv_auto_alloc_size = sizeof(struct spinand_device),
1254        .probe = spinand_probe,
1255};
1256