linux/drivers/mtd/nand/spi/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016-2017 Micron Technology, Inc.
   4 *
   5 * Authors:
   6 *      Peter Pan <peterpandong@micron.com>
   7 *      Boris Brezillon <boris.brezillon@bootlin.com>
   8 */
   9
  10#define pr_fmt(fmt)     "spi-nand: " fmt
  11
  12#include <linux/device.h>
  13#include <linux/jiffies.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/mtd/spinand.h>
  17#include <linux/of.h>
  18#include <linux/slab.h>
  19#include <linux/spi/spi.h>
  20#include <linux/spi/spi-mem.h>
  21
  22static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
  23                                          const struct nand_page_io_req *req,
  24                                          u16 *column)
  25{
  26        struct nand_device *nand = spinand_to_nand(spinand);
  27        unsigned int shift;
  28
  29        if (nand->memorg.planes_per_lun < 2)
  30                return;
  31
  32        /* The plane number is passed in MSB just above the column address */
  33        shift = fls(nand->memorg.pagesize);
  34        *column |= req->pos.plane << shift;
  35}
  36
  37static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
  38{
  39        struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
  40                                                      spinand->scratchbuf);
  41        int ret;
  42
  43        ret = spi_mem_exec_op(spinand->spimem, &op);
  44        if (ret)
  45                return ret;
  46
  47        *val = *spinand->scratchbuf;
  48        return 0;
  49}
  50
  51static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
  52{
  53        struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
  54                                                      spinand->scratchbuf);
  55
  56        *spinand->scratchbuf = val;
  57        return spi_mem_exec_op(spinand->spimem, &op);
  58}
  59
  60static int spinand_read_status(struct spinand_device *spinand, u8 *status)
  61{
  62        return spinand_read_reg_op(spinand, REG_STATUS, status);
  63}
  64
  65static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
  66{
  67        struct nand_device *nand = spinand_to_nand(spinand);
  68
  69        if (WARN_ON(spinand->cur_target < 0 ||
  70                    spinand->cur_target >= nand->memorg.ntargets))
  71                return -EINVAL;
  72
  73        *cfg = spinand->cfg_cache[spinand->cur_target];
  74        return 0;
  75}
  76
  77static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
  78{
  79        struct nand_device *nand = spinand_to_nand(spinand);
  80        int ret;
  81
  82        if (WARN_ON(spinand->cur_target < 0 ||
  83                    spinand->cur_target >= nand->memorg.ntargets))
  84                return -EINVAL;
  85
  86        if (spinand->cfg_cache[spinand->cur_target] == cfg)
  87                return 0;
  88
  89        ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
  90        if (ret)
  91                return ret;
  92
  93        spinand->cfg_cache[spinand->cur_target] = cfg;
  94        return 0;
  95}
  96
  97/**
  98 * spinand_upd_cfg() - Update the configuration register
  99 * @spinand: the spinand device
 100 * @mask: the mask encoding the bits to update in the config reg
 101 * @val: the new value to apply
 102 *
 103 * Update the configuration register.
 104 *
 105 * Return: 0 on success, a negative error code otherwise.
 106 */
 107int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
 108{
 109        int ret;
 110        u8 cfg;
 111
 112        ret = spinand_get_cfg(spinand, &cfg);
 113        if (ret)
 114                return ret;
 115
 116        cfg &= ~mask;
 117        cfg |= val;
 118
 119        return spinand_set_cfg(spinand, cfg);
 120}
 121
 122/**
 123 * spinand_select_target() - Select a specific NAND target/die
 124 * @spinand: the spinand device
 125 * @target: the target/die to select
 126 *
 127 * Select a new target/die. If chip only has one die, this function is a NOOP.
 128 *
 129 * Return: 0 on success, a negative error code otherwise.
 130 */
 131int spinand_select_target(struct spinand_device *spinand, unsigned int target)
 132{
 133        struct nand_device *nand = spinand_to_nand(spinand);
 134        int ret;
 135
 136        if (WARN_ON(target >= nand->memorg.ntargets))
 137                return -EINVAL;
 138
 139        if (spinand->cur_target == target)
 140                return 0;
 141
 142        if (nand->memorg.ntargets == 1) {
 143                spinand->cur_target = target;
 144                return 0;
 145        }
 146
 147        ret = spinand->select_target(spinand, target);
 148        if (ret)
 149                return ret;
 150
 151        spinand->cur_target = target;
 152        return 0;
 153}
 154
 155static int spinand_init_cfg_cache(struct spinand_device *spinand)
 156{
 157        struct nand_device *nand = spinand_to_nand(spinand);
 158        struct device *dev = &spinand->spimem->spi->dev;
 159        unsigned int target;
 160        int ret;
 161
 162        spinand->cfg_cache = devm_kcalloc(dev,
 163                                          nand->memorg.ntargets,
 164                                          sizeof(*spinand->cfg_cache),
 165                                          GFP_KERNEL);
 166        if (!spinand->cfg_cache)
 167                return -ENOMEM;
 168
 169        for (target = 0; target < nand->memorg.ntargets; target++) {
 170                ret = spinand_select_target(spinand, target);
 171                if (ret)
 172                        return ret;
 173
 174                /*
 175                 * We use spinand_read_reg_op() instead of spinand_get_cfg()
 176                 * here to bypass the config cache.
 177                 */
 178                ret = spinand_read_reg_op(spinand, REG_CFG,
 179                                          &spinand->cfg_cache[target]);
 180                if (ret)
 181                        return ret;
 182        }
 183
 184        return 0;
 185}
 186
 187static int spinand_init_quad_enable(struct spinand_device *spinand)
 188{
 189        bool enable = false;
 190
 191        if (!(spinand->flags & SPINAND_HAS_QE_BIT))
 192                return 0;
 193
 194        if (spinand->op_templates.read_cache->data.buswidth == 4 ||
 195            spinand->op_templates.write_cache->data.buswidth == 4 ||
 196            spinand->op_templates.update_cache->data.buswidth == 4)
 197                enable = true;
 198
 199        return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
 200                               enable ? CFG_QUAD_ENABLE : 0);
 201}
 202
 203static int spinand_ecc_enable(struct spinand_device *spinand,
 204                              bool enable)
 205{
 206        return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
 207                               enable ? CFG_ECC_ENABLE : 0);
 208}
 209
 210static int spinand_write_enable_op(struct spinand_device *spinand)
 211{
 212        struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
 213
 214        return spi_mem_exec_op(spinand->spimem, &op);
 215}
 216
 217static int spinand_load_page_op(struct spinand_device *spinand,
 218                                const struct nand_page_io_req *req)
 219{
 220        struct nand_device *nand = spinand_to_nand(spinand);
 221        unsigned int row = nanddev_pos_to_row(nand, &req->pos);
 222        struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
 223
 224        return spi_mem_exec_op(spinand->spimem, &op);
 225}
 226
 227static int spinand_read_from_cache_op(struct spinand_device *spinand,
 228                                      const struct nand_page_io_req *req)
 229{
 230        struct spi_mem_op op = *spinand->op_templates.read_cache;
 231        struct nand_device *nand = spinand_to_nand(spinand);
 232        struct mtd_info *mtd = nanddev_to_mtd(nand);
 233        struct nand_page_io_req adjreq = *req;
 234        unsigned int nbytes = 0;
 235        void *buf = NULL;
 236        u16 column = 0;
 237        int ret;
 238
 239        if (req->datalen) {
 240                adjreq.datalen = nanddev_page_size(nand);
 241                adjreq.dataoffs = 0;
 242                adjreq.databuf.in = spinand->databuf;
 243                buf = spinand->databuf;
 244                nbytes = adjreq.datalen;
 245        }
 246
 247        if (req->ooblen) {
 248                adjreq.ooblen = nanddev_per_page_oobsize(nand);
 249                adjreq.ooboffs = 0;
 250                adjreq.oobbuf.in = spinand->oobbuf;
 251                nbytes += nanddev_per_page_oobsize(nand);
 252                if (!buf) {
 253                        buf = spinand->oobbuf;
 254                        column = nanddev_page_size(nand);
 255                }
 256        }
 257
 258        spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
 259        op.addr.val = column;
 260
 261        /*
 262         * Some controllers are limited in term of max RX data size. In this
 263         * case, just repeat the READ_CACHE operation after updating the
 264         * column.
 265         */
 266        while (nbytes) {
 267                op.data.buf.in = buf;
 268                op.data.nbytes = nbytes;
 269                ret = spi_mem_adjust_op_size(spinand->spimem, &op);
 270                if (ret)
 271                        return ret;
 272
 273                ret = spi_mem_exec_op(spinand->spimem, &op);
 274                if (ret)
 275                        return ret;
 276
 277                buf += op.data.nbytes;
 278                nbytes -= op.data.nbytes;
 279                op.addr.val += op.data.nbytes;
 280        }
 281
 282        if (req->datalen)
 283                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
 284                       req->datalen);
 285
 286        if (req->ooblen) {
 287                if (req->mode == MTD_OPS_AUTO_OOB)
 288                        mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
 289                                                    spinand->oobbuf,
 290                                                    req->ooboffs,
 291                                                    req->ooblen);
 292                else
 293                        memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
 294                               req->ooblen);
 295        }
 296
 297        return 0;
 298}
 299
 300static int spinand_write_to_cache_op(struct spinand_device *spinand,
 301                                     const struct nand_page_io_req *req)
 302{
 303        struct spi_mem_op op = *spinand->op_templates.write_cache;
 304        struct nand_device *nand = spinand_to_nand(spinand);
 305        struct mtd_info *mtd = nanddev_to_mtd(nand);
 306        struct nand_page_io_req adjreq = *req;
 307        unsigned int nbytes = 0;
 308        void *buf = NULL;
 309        u16 column = 0;
 310        int ret;
 311
 312        memset(spinand->databuf, 0xff,
 313               nanddev_page_size(nand) +
 314               nanddev_per_page_oobsize(nand));
 315
 316        if (req->datalen) {
 317                memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
 318                       req->datalen);
 319                adjreq.dataoffs = 0;
 320                adjreq.datalen = nanddev_page_size(nand);
 321                adjreq.databuf.out = spinand->databuf;
 322                nbytes = adjreq.datalen;
 323                buf = spinand->databuf;
 324        }
 325
 326        if (req->ooblen) {
 327                if (req->mode == MTD_OPS_AUTO_OOB)
 328                        mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
 329                                                    spinand->oobbuf,
 330                                                    req->ooboffs,
 331                                                    req->ooblen);
 332                else
 333                        memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
 334                               req->ooblen);
 335
 336                adjreq.ooblen = nanddev_per_page_oobsize(nand);
 337                adjreq.ooboffs = 0;
 338                nbytes += nanddev_per_page_oobsize(nand);
 339                if (!buf) {
 340                        buf = spinand->oobbuf;
 341                        column = nanddev_page_size(nand);
 342                }
 343        }
 344
 345        spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
 346
 347        op = *spinand->op_templates.write_cache;
 348        op.addr.val = column;
 349
 350        /*
 351         * Some controllers are limited in term of max TX data size. In this
 352         * case, split the operation into one LOAD CACHE and one or more
 353         * LOAD RANDOM CACHE.
 354         */
 355        while (nbytes) {
 356                op.data.buf.out = buf;
 357                op.data.nbytes = nbytes;
 358
 359                ret = spi_mem_adjust_op_size(spinand->spimem, &op);
 360                if (ret)
 361                        return ret;
 362
 363                ret = spi_mem_exec_op(spinand->spimem, &op);
 364                if (ret)
 365                        return ret;
 366
 367                buf += op.data.nbytes;
 368                nbytes -= op.data.nbytes;
 369                op.addr.val += op.data.nbytes;
 370
 371                /*
 372                 * We need to use the RANDOM LOAD CACHE operation if there's
 373                 * more than one iteration, because the LOAD operation resets
 374                 * the cache to 0xff.
 375                 */
 376                if (nbytes) {
 377                        column = op.addr.val;
 378                        op = *spinand->op_templates.update_cache;
 379                        op.addr.val = column;
 380                }
 381        }
 382
 383        return 0;
 384}
 385
 386static int spinand_program_op(struct spinand_device *spinand,
 387                              const struct nand_page_io_req *req)
 388{
 389        struct nand_device *nand = spinand_to_nand(spinand);
 390        unsigned int row = nanddev_pos_to_row(nand, &req->pos);
 391        struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
 392
 393        return spi_mem_exec_op(spinand->spimem, &op);
 394}
 395
 396static int spinand_erase_op(struct spinand_device *spinand,
 397                            const struct nand_pos *pos)
 398{
 399        struct nand_device *nand = spinand_to_nand(spinand);
 400        unsigned int row = nanddev_pos_to_row(nand, pos);
 401        struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
 402
 403        return spi_mem_exec_op(spinand->spimem, &op);
 404}
 405
 406static int spinand_wait(struct spinand_device *spinand, u8 *s)
 407{
 408        unsigned long timeo =  jiffies + msecs_to_jiffies(400);
 409        u8 status;
 410        int ret;
 411
 412        do {
 413                ret = spinand_read_status(spinand, &status);
 414                if (ret)
 415                        return ret;
 416
 417                if (!(status & STATUS_BUSY))
 418                        goto out;
 419        } while (time_before(jiffies, timeo));
 420
 421        /*
 422         * Extra read, just in case the STATUS_READY bit has changed
 423         * since our last check
 424         */
 425        ret = spinand_read_status(spinand, &status);
 426        if (ret)
 427                return ret;
 428
 429out:
 430        if (s)
 431                *s = status;
 432
 433        return status & STATUS_BUSY ? -ETIMEDOUT : 0;
 434}
 435
 436static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
 437{
 438        struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
 439                                                 SPINAND_MAX_ID_LEN);
 440        int ret;
 441
 442        ret = spi_mem_exec_op(spinand->spimem, &op);
 443        if (!ret)
 444                memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
 445
 446        return ret;
 447}
 448
 449static int spinand_reset_op(struct spinand_device *spinand)
 450{
 451        struct spi_mem_op op = SPINAND_RESET_OP;
 452        int ret;
 453
 454        ret = spi_mem_exec_op(spinand->spimem, &op);
 455        if (ret)
 456                return ret;
 457
 458        return spinand_wait(spinand, NULL);
 459}
 460
 461static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
 462{
 463        return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
 464}
 465
 466static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
 467{
 468        struct nand_device *nand = spinand_to_nand(spinand);
 469
 470        if (spinand->eccinfo.get_status)
 471                return spinand->eccinfo.get_status(spinand, status);
 472
 473        switch (status & STATUS_ECC_MASK) {
 474        case STATUS_ECC_NO_BITFLIPS:
 475                return 0;
 476
 477        case STATUS_ECC_HAS_BITFLIPS:
 478                /*
 479                 * We have no way to know exactly how many bitflips have been
 480                 * fixed, so let's return the maximum possible value so that
 481                 * wear-leveling layers move the data immediately.
 482                 */
 483                return nand->eccreq.strength;
 484
 485        case STATUS_ECC_UNCOR_ERROR:
 486                return -EBADMSG;
 487
 488        default:
 489                break;
 490        }
 491
 492        return -EINVAL;
 493}
 494
 495static int spinand_read_page(struct spinand_device *spinand,
 496                             const struct nand_page_io_req *req,
 497                             bool ecc_enabled)
 498{
 499        u8 status;
 500        int ret;
 501
 502        ret = spinand_load_page_op(spinand, req);
 503        if (ret)
 504                return ret;
 505
 506        ret = spinand_wait(spinand, &status);
 507        if (ret < 0)
 508                return ret;
 509
 510        ret = spinand_read_from_cache_op(spinand, req);
 511        if (ret)
 512                return ret;
 513
 514        if (!ecc_enabled)
 515                return 0;
 516
 517        return spinand_check_ecc_status(spinand, status);
 518}
 519
 520static int spinand_write_page(struct spinand_device *spinand,
 521                              const struct nand_page_io_req *req)
 522{
 523        u8 status;
 524        int ret;
 525
 526        ret = spinand_write_enable_op(spinand);
 527        if (ret)
 528                return ret;
 529
 530        ret = spinand_write_to_cache_op(spinand, req);
 531        if (ret)
 532                return ret;
 533
 534        ret = spinand_program_op(spinand, req);
 535        if (ret)
 536                return ret;
 537
 538        ret = spinand_wait(spinand, &status);
 539        if (!ret && (status & STATUS_PROG_FAILED))
 540                ret = -EIO;
 541
 542        return ret;
 543}
 544
 545static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
 546                            struct mtd_oob_ops *ops)
 547{
 548        struct spinand_device *spinand = mtd_to_spinand(mtd);
 549        struct nand_device *nand = mtd_to_nanddev(mtd);
 550        unsigned int max_bitflips = 0;
 551        struct nand_io_iter iter;
 552        bool enable_ecc = false;
 553        bool ecc_failed = false;
 554        int ret = 0;
 555
 556        if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
 557                enable_ecc = true;
 558
 559        mutex_lock(&spinand->lock);
 560
 561        nanddev_io_for_each_page(nand, from, ops, &iter) {
 562                ret = spinand_select_target(spinand, iter.req.pos.target);
 563                if (ret)
 564                        break;
 565
 566                ret = spinand_ecc_enable(spinand, enable_ecc);
 567                if (ret)
 568                        break;
 569
 570                ret = spinand_read_page(spinand, &iter.req, enable_ecc);
 571                if (ret < 0 && ret != -EBADMSG)
 572                        break;
 573
 574                if (ret == -EBADMSG) {
 575                        ecc_failed = true;
 576                        mtd->ecc_stats.failed++;
 577                        ret = 0;
 578                } else {
 579                        mtd->ecc_stats.corrected += ret;
 580                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
 581                }
 582
 583                ops->retlen += iter.req.datalen;
 584                ops->oobretlen += iter.req.ooblen;
 585        }
 586
 587        mutex_unlock(&spinand->lock);
 588
 589        if (ecc_failed && !ret)
 590                ret = -EBADMSG;
 591
 592        return ret ? ret : max_bitflips;
 593}
 594
 595static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
 596                             struct mtd_oob_ops *ops)
 597{
 598        struct spinand_device *spinand = mtd_to_spinand(mtd);
 599        struct nand_device *nand = mtd_to_nanddev(mtd);
 600        struct nand_io_iter iter;
 601        bool enable_ecc = false;
 602        int ret = 0;
 603
 604        if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
 605                enable_ecc = true;
 606
 607        mutex_lock(&spinand->lock);
 608
 609        nanddev_io_for_each_page(nand, to, ops, &iter) {
 610                ret = spinand_select_target(spinand, iter.req.pos.target);
 611                if (ret)
 612                        break;
 613
 614                ret = spinand_ecc_enable(spinand, enable_ecc);
 615                if (ret)
 616                        break;
 617
 618                ret = spinand_write_page(spinand, &iter.req);
 619                if (ret)
 620                        break;
 621
 622                ops->retlen += iter.req.datalen;
 623                ops->oobretlen += iter.req.ooblen;
 624        }
 625
 626        mutex_unlock(&spinand->lock);
 627
 628        return ret;
 629}
 630
 631static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
 632{
 633        struct spinand_device *spinand = nand_to_spinand(nand);
 634        struct nand_page_io_req req = {
 635                .pos = *pos,
 636                .ooblen = 2,
 637                .ooboffs = 0,
 638                .oobbuf.in = spinand->oobbuf,
 639                .mode = MTD_OPS_RAW,
 640        };
 641
 642        memset(spinand->oobbuf, 0, 2);
 643        spinand_select_target(spinand, pos->target);
 644        spinand_read_page(spinand, &req, false);
 645        if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
 646                return true;
 647
 648        return false;
 649}
 650
 651static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
 652{
 653        struct nand_device *nand = mtd_to_nanddev(mtd);
 654        struct spinand_device *spinand = nand_to_spinand(nand);
 655        struct nand_pos pos;
 656        int ret;
 657
 658        nanddev_offs_to_pos(nand, offs, &pos);
 659        mutex_lock(&spinand->lock);
 660        ret = nanddev_isbad(nand, &pos);
 661        mutex_unlock(&spinand->lock);
 662
 663        return ret;
 664}
 665
 666static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
 667{
 668        struct spinand_device *spinand = nand_to_spinand(nand);
 669        struct nand_page_io_req req = {
 670                .pos = *pos,
 671                .ooboffs = 0,
 672                .ooblen = 2,
 673                .oobbuf.out = spinand->oobbuf,
 674        };
 675        int ret;
 676
 677        /* Erase block before marking it bad. */
 678        ret = spinand_select_target(spinand, pos->target);
 679        if (ret)
 680                return ret;
 681
 682        ret = spinand_write_enable_op(spinand);
 683        if (ret)
 684                return ret;
 685
 686        spinand_erase_op(spinand, pos);
 687
 688        memset(spinand->oobbuf, 0, 2);
 689        return spinand_write_page(spinand, &req);
 690}
 691
 692static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
 693{
 694        struct nand_device *nand = mtd_to_nanddev(mtd);
 695        struct spinand_device *spinand = nand_to_spinand(nand);
 696        struct nand_pos pos;
 697        int ret;
 698
 699        nanddev_offs_to_pos(nand, offs, &pos);
 700        mutex_lock(&spinand->lock);
 701        ret = nanddev_markbad(nand, &pos);
 702        mutex_unlock(&spinand->lock);
 703
 704        return ret;
 705}
 706
 707static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
 708{
 709        struct spinand_device *spinand = nand_to_spinand(nand);
 710        u8 status;
 711        int ret;
 712
 713        ret = spinand_select_target(spinand, pos->target);
 714        if (ret)
 715                return ret;
 716
 717        ret = spinand_write_enable_op(spinand);
 718        if (ret)
 719                return ret;
 720
 721        ret = spinand_erase_op(spinand, pos);
 722        if (ret)
 723                return ret;
 724
 725        ret = spinand_wait(spinand, &status);
 726        if (!ret && (status & STATUS_ERASE_FAILED))
 727                ret = -EIO;
 728
 729        return ret;
 730}
 731
 732static int spinand_mtd_erase(struct mtd_info *mtd,
 733                             struct erase_info *einfo)
 734{
 735        struct spinand_device *spinand = mtd_to_spinand(mtd);
 736        int ret;
 737
 738        mutex_lock(&spinand->lock);
 739        ret = nanddev_mtd_erase(mtd, einfo);
 740        mutex_unlock(&spinand->lock);
 741
 742        return ret;
 743}
 744
 745static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
 746{
 747        struct spinand_device *spinand = mtd_to_spinand(mtd);
 748        struct nand_device *nand = mtd_to_nanddev(mtd);
 749        struct nand_pos pos;
 750        int ret;
 751
 752        nanddev_offs_to_pos(nand, offs, &pos);
 753        mutex_lock(&spinand->lock);
 754        ret = nanddev_isreserved(nand, &pos);
 755        mutex_unlock(&spinand->lock);
 756
 757        return ret;
 758}
 759
 760static const struct nand_ops spinand_ops = {
 761        .erase = spinand_erase,
 762        .markbad = spinand_markbad,
 763        .isbad = spinand_isbad,
 764};
 765
 766static const struct spinand_manufacturer *spinand_manufacturers[] = {
 767        &macronix_spinand_manufacturer,
 768        &micron_spinand_manufacturer,
 769        &winbond_spinand_manufacturer,
 770};
 771
 772static int spinand_manufacturer_detect(struct spinand_device *spinand)
 773{
 774        unsigned int i;
 775        int ret;
 776
 777        for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
 778                ret = spinand_manufacturers[i]->ops->detect(spinand);
 779                if (ret > 0) {
 780                        spinand->manufacturer = spinand_manufacturers[i];
 781                        return 0;
 782                } else if (ret < 0) {
 783                        return ret;
 784                }
 785        }
 786
 787        return -ENOTSUPP;
 788}
 789
 790static int spinand_manufacturer_init(struct spinand_device *spinand)
 791{
 792        if (spinand->manufacturer->ops->init)
 793                return spinand->manufacturer->ops->init(spinand);
 794
 795        return 0;
 796}
 797
 798static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
 799{
 800        /* Release manufacturer private data */
 801        if (spinand->manufacturer->ops->cleanup)
 802                return spinand->manufacturer->ops->cleanup(spinand);
 803}
 804
 805static const struct spi_mem_op *
 806spinand_select_op_variant(struct spinand_device *spinand,
 807                          const struct spinand_op_variants *variants)
 808{
 809        struct nand_device *nand = spinand_to_nand(spinand);
 810        unsigned int i;
 811
 812        for (i = 0; i < variants->nops; i++) {
 813                struct spi_mem_op op = variants->ops[i];
 814                unsigned int nbytes;
 815                int ret;
 816
 817                nbytes = nanddev_per_page_oobsize(nand) +
 818                         nanddev_page_size(nand);
 819
 820                while (nbytes) {
 821                        op.data.nbytes = nbytes;
 822                        ret = spi_mem_adjust_op_size(spinand->spimem, &op);
 823                        if (ret)
 824                                break;
 825
 826                        if (!spi_mem_supports_op(spinand->spimem, &op))
 827                                break;
 828
 829                        nbytes -= op.data.nbytes;
 830                }
 831
 832                if (!nbytes)
 833                        return &variants->ops[i];
 834        }
 835
 836        return NULL;
 837}
 838
 839/**
 840 * spinand_match_and_init() - Try to find a match between a device ID and an
 841 *                            entry in a spinand_info table
 842 * @spinand: SPI NAND object
 843 * @table: SPI NAND device description table
 844 * @table_size: size of the device description table
 845 *
 846 * Should be used by SPI NAND manufacturer drivers when they want to find a
 847 * match between a device ID retrieved through the READ_ID command and an
 848 * entry in the SPI NAND description table. If a match is found, the spinand
 849 * object will be initialized with information provided by the matching
 850 * spinand_info entry.
 851 *
 852 * Return: 0 on success, a negative error code otherwise.
 853 */
 854int spinand_match_and_init(struct spinand_device *spinand,
 855                           const struct spinand_info *table,
 856                           unsigned int table_size, u8 devid)
 857{
 858        struct nand_device *nand = spinand_to_nand(spinand);
 859        unsigned int i;
 860
 861        for (i = 0; i < table_size; i++) {
 862                const struct spinand_info *info = &table[i];
 863                const struct spi_mem_op *op;
 864
 865                if (devid != info->devid)
 866                        continue;
 867
 868                nand->memorg = table[i].memorg;
 869                nand->eccreq = table[i].eccreq;
 870                spinand->eccinfo = table[i].eccinfo;
 871                spinand->flags = table[i].flags;
 872                spinand->select_target = table[i].select_target;
 873
 874                op = spinand_select_op_variant(spinand,
 875                                               info->op_variants.read_cache);
 876                if (!op)
 877                        return -ENOTSUPP;
 878
 879                spinand->op_templates.read_cache = op;
 880
 881                op = spinand_select_op_variant(spinand,
 882                                               info->op_variants.write_cache);
 883                if (!op)
 884                        return -ENOTSUPP;
 885
 886                spinand->op_templates.write_cache = op;
 887
 888                op = spinand_select_op_variant(spinand,
 889                                               info->op_variants.update_cache);
 890                spinand->op_templates.update_cache = op;
 891
 892                return 0;
 893        }
 894
 895        return -ENOTSUPP;
 896}
 897
 898static int spinand_detect(struct spinand_device *spinand)
 899{
 900        struct device *dev = &spinand->spimem->spi->dev;
 901        struct nand_device *nand = spinand_to_nand(spinand);
 902        int ret;
 903
 904        ret = spinand_reset_op(spinand);
 905        if (ret)
 906                return ret;
 907
 908        ret = spinand_read_id_op(spinand, spinand->id.data);
 909        if (ret)
 910                return ret;
 911
 912        spinand->id.len = SPINAND_MAX_ID_LEN;
 913
 914        ret = spinand_manufacturer_detect(spinand);
 915        if (ret) {
 916                dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
 917                        spinand->id.data);
 918                return ret;
 919        }
 920
 921        if (nand->memorg.ntargets > 1 && !spinand->select_target) {
 922                dev_err(dev,
 923                        "SPI NANDs with more than one die must implement ->select_target()\n");
 924                return -EINVAL;
 925        }
 926
 927        dev_info(&spinand->spimem->spi->dev,
 928                 "%s SPI NAND was found.\n", spinand->manufacturer->name);
 929        dev_info(&spinand->spimem->spi->dev,
 930                 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
 931                 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
 932                 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
 933
 934        return 0;
 935}
 936
 937static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
 938                                       struct mtd_oob_region *region)
 939{
 940        return -ERANGE;
 941}
 942
 943static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
 944                                        struct mtd_oob_region *region)
 945{
 946        if (section)
 947                return -ERANGE;
 948
 949        /* Reserve 2 bytes for the BBM. */
 950        region->offset = 2;
 951        region->length = 62;
 952
 953        return 0;
 954}
 955
 956static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
 957        .ecc = spinand_noecc_ooblayout_ecc,
 958        .free = spinand_noecc_ooblayout_free,
 959};
 960
 961static int spinand_init(struct spinand_device *spinand)
 962{
 963        struct device *dev = &spinand->spimem->spi->dev;
 964        struct mtd_info *mtd = spinand_to_mtd(spinand);
 965        struct nand_device *nand = mtd_to_nanddev(mtd);
 966        int ret, i;
 967
 968        /*
 969         * We need a scratch buffer because the spi_mem interface requires that
 970         * buf passed in spi_mem_op->data.buf be DMA-able.
 971         */
 972        spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
 973        if (!spinand->scratchbuf)
 974                return -ENOMEM;
 975
 976        ret = spinand_detect(spinand);
 977        if (ret)
 978                goto err_free_bufs;
 979
 980        /*
 981         * Use kzalloc() instead of devm_kzalloc() here, because some drivers
 982         * may use this buffer for DMA access.
 983         * Memory allocated by devm_ does not guarantee DMA-safe alignment.
 984         */
 985        spinand->databuf = kzalloc(nanddev_page_size(nand) +
 986                               nanddev_per_page_oobsize(nand),
 987                               GFP_KERNEL);
 988        if (!spinand->databuf) {
 989                ret = -ENOMEM;
 990                goto err_free_bufs;
 991        }
 992
 993        spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
 994
 995        ret = spinand_init_cfg_cache(spinand);
 996        if (ret)
 997                goto err_free_bufs;
 998
 999        ret = spinand_init_quad_enable(spinand);
1000        if (ret)
1001                goto err_free_bufs;
1002
1003        ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1004        if (ret)
1005                goto err_free_bufs;
1006
1007        ret = spinand_manufacturer_init(spinand);
1008        if (ret) {
1009                dev_err(dev,
1010                        "Failed to initialize the SPI NAND chip (err = %d)\n",
1011                        ret);
1012                goto err_free_bufs;
1013        }
1014
1015        /* After power up, all blocks are locked, so unlock them here. */
1016        for (i = 0; i < nand->memorg.ntargets; i++) {
1017                ret = spinand_select_target(spinand, i);
1018                if (ret)
1019                        goto err_free_bufs;
1020
1021                ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1022                if (ret)
1023                        goto err_free_bufs;
1024        }
1025
1026        ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1027        if (ret)
1028                goto err_manuf_cleanup;
1029
1030        /*
1031         * Right now, we don't support ECC, so let the whole oob
1032         * area is available for user.
1033         */
1034        mtd->_read_oob = spinand_mtd_read;
1035        mtd->_write_oob = spinand_mtd_write;
1036        mtd->_block_isbad = spinand_mtd_block_isbad;
1037        mtd->_block_markbad = spinand_mtd_block_markbad;
1038        mtd->_block_isreserved = spinand_mtd_block_isreserved;
1039        mtd->_erase = spinand_mtd_erase;
1040
1041        if (spinand->eccinfo.ooblayout)
1042                mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1043        else
1044                mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1045
1046        ret = mtd_ooblayout_count_freebytes(mtd);
1047        if (ret < 0)
1048                goto err_cleanup_nanddev;
1049
1050        mtd->oobavail = ret;
1051
1052        return 0;
1053
1054err_cleanup_nanddev:
1055        nanddev_cleanup(nand);
1056
1057err_manuf_cleanup:
1058        spinand_manufacturer_cleanup(spinand);
1059
1060err_free_bufs:
1061        kfree(spinand->databuf);
1062        kfree(spinand->scratchbuf);
1063        return ret;
1064}
1065
1066static void spinand_cleanup(struct spinand_device *spinand)
1067{
1068        struct nand_device *nand = spinand_to_nand(spinand);
1069
1070        nanddev_cleanup(nand);
1071        spinand_manufacturer_cleanup(spinand);
1072        kfree(spinand->databuf);
1073        kfree(spinand->scratchbuf);
1074}
1075
1076static int spinand_probe(struct spi_mem *mem)
1077{
1078        struct spinand_device *spinand;
1079        struct mtd_info *mtd;
1080        int ret;
1081
1082        spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1083                               GFP_KERNEL);
1084        if (!spinand)
1085                return -ENOMEM;
1086
1087        spinand->spimem = mem;
1088        spi_mem_set_drvdata(mem, spinand);
1089        spinand_set_of_node(spinand, mem->spi->dev.of_node);
1090        mutex_init(&spinand->lock);
1091        mtd = spinand_to_mtd(spinand);
1092        mtd->dev.parent = &mem->spi->dev;
1093
1094        ret = spinand_init(spinand);
1095        if (ret)
1096                return ret;
1097
1098        ret = mtd_device_register(mtd, NULL, 0);
1099        if (ret)
1100                goto err_spinand_cleanup;
1101
1102        return 0;
1103
1104err_spinand_cleanup:
1105        spinand_cleanup(spinand);
1106
1107        return ret;
1108}
1109
1110static int spinand_remove(struct spi_mem *mem)
1111{
1112        struct spinand_device *spinand;
1113        struct mtd_info *mtd;
1114        int ret;
1115
1116        spinand = spi_mem_get_drvdata(mem);
1117        mtd = spinand_to_mtd(spinand);
1118
1119        ret = mtd_device_unregister(mtd);
1120        if (ret)
1121                return ret;
1122
1123        spinand_cleanup(spinand);
1124
1125        return 0;
1126}
1127
1128static const struct spi_device_id spinand_ids[] = {
1129        { .name = "spi-nand" },
1130        { /* sentinel */ },
1131};
1132
1133#ifdef CONFIG_OF
1134static const struct of_device_id spinand_of_ids[] = {
1135        { .compatible = "spi-nand" },
1136        { /* sentinel */ },
1137};
1138#endif
1139
1140static struct spi_mem_driver spinand_drv = {
1141        .spidrv = {
1142                .id_table = spinand_ids,
1143                .driver = {
1144                        .name = "spi-nand",
1145                        .of_match_table = of_match_ptr(spinand_of_ids),
1146                },
1147        },
1148        .probe = spinand_probe,
1149        .remove = spinand_remove,
1150};
1151module_spi_mem_driver(spinand_drv);
1152
1153MODULE_DESCRIPTION("SPI NAND framework");
1154MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1155MODULE_LICENSE("GPL v2");
1156