linux/drivers/mtd/spi-nor/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
   4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
   5 *
   6 * Copyright (C) 2005, Intec Automation Inc.
   7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
   8 */
   9
  10#include <linux/err.h>
  11#include <linux/errno.h>
  12#include <linux/module.h>
  13#include <linux/device.h>
  14#include <linux/mutex.h>
  15#include <linux/math64.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18
  19#include <linux/mtd/mtd.h>
  20#include <linux/of_platform.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/spi/flash.h>
  23#include <linux/mtd/spi-nor.h>
  24
  25#include "core.h"
  26
  27/* Define max times to check status register before we give up. */
  28
  29/*
  30 * For everything but full-chip erase; probably could be much smaller, but kept
  31 * around for safety for now
  32 */
  33#define DEFAULT_READY_WAIT_JIFFIES              (40UL * HZ)
  34
  35/*
  36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  37 * for larger flash
  38 */
  39#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES       (40UL * HZ)
  40
  41#define SPI_NOR_MAX_ADDR_WIDTH  4
  42
  43/**
  44 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
  45 *                           transfer
  46 * @nor:        pointer to 'struct spi_nor'
  47 * @op:         pointer to 'struct spi_mem_op' template for transfer
  48 *
  49 * If we have to use the bounce buffer, the data field in @op will be updated.
  50 *
  51 * Return: true if the bounce buffer is needed, false if not
  52 */
  53static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
  54{
  55        /* op->data.buf.in occupies the same memory as op->data.buf.out */
  56        if (object_is_on_stack(op->data.buf.in) ||
  57            !virt_addr_valid(op->data.buf.in)) {
  58                if (op->data.nbytes > nor->bouncebuf_size)
  59                        op->data.nbytes = nor->bouncebuf_size;
  60                op->data.buf.in = nor->bouncebuf;
  61                return true;
  62        }
  63
  64        return false;
  65}
  66
  67/**
  68 * spi_nor_spimem_exec_op() - execute a memory operation
  69 * @nor:        pointer to 'struct spi_nor'
  70 * @op:         pointer to 'struct spi_mem_op' template for transfer
  71 *
  72 * Return: 0 on success, -error otherwise.
  73 */
  74static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
  75{
  76        int error;
  77
  78        error = spi_mem_adjust_op_size(nor->spimem, op);
  79        if (error)
  80                return error;
  81
  82        return spi_mem_exec_op(nor->spimem, op);
  83}
  84
  85/**
  86 * spi_nor_spimem_read_data() - read data from flash's memory region via
  87 *                              spi-mem
  88 * @nor:        pointer to 'struct spi_nor'
  89 * @from:       offset to read from
  90 * @len:        number of bytes to read
  91 * @buf:        pointer to dst buffer
  92 *
  93 * Return: number of bytes read successfully, -errno otherwise
  94 */
  95static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
  96                                        size_t len, u8 *buf)
  97{
  98        struct spi_mem_op op =
  99                SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
 100                           SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
 101                           SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
 102                           SPI_MEM_OP_DATA_IN(len, buf, 1));
 103        bool usebouncebuf;
 104        ssize_t nbytes;
 105        int error;
 106
 107        /* get transfer protocols. */
 108        op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
 109        op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
 110        op.dummy.buswidth = op.addr.buswidth;
 111        op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
 112
 113        /* convert the dummy cycles to the number of bytes */
 114        op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
 115
 116        usebouncebuf = spi_nor_spimem_bounce(nor, &op);
 117
 118        if (nor->dirmap.rdesc) {
 119                nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
 120                                             op.data.nbytes, op.data.buf.in);
 121        } else {
 122                error = spi_nor_spimem_exec_op(nor, &op);
 123                if (error)
 124                        return error;
 125                nbytes = op.data.nbytes;
 126        }
 127
 128        if (usebouncebuf && nbytes > 0)
 129                memcpy(buf, op.data.buf.in, nbytes);
 130
 131        return nbytes;
 132}
 133
 134/**
 135 * spi_nor_read_data() - read data from flash memory
 136 * @nor:        pointer to 'struct spi_nor'
 137 * @from:       offset to read from
 138 * @len:        number of bytes to read
 139 * @buf:        pointer to dst buffer
 140 *
 141 * Return: number of bytes read successfully, -errno otherwise
 142 */
 143ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
 144{
 145        if (nor->spimem)
 146                return spi_nor_spimem_read_data(nor, from, len, buf);
 147
 148        return nor->controller_ops->read(nor, from, len, buf);
 149}
 150
 151/**
 152 * spi_nor_spimem_write_data() - write data to flash memory via
 153 *                               spi-mem
 154 * @nor:        pointer to 'struct spi_nor'
 155 * @to:         offset to write to
 156 * @len:        number of bytes to write
 157 * @buf:        pointer to src buffer
 158 *
 159 * Return: number of bytes written successfully, -errno otherwise
 160 */
 161static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
 162                                         size_t len, const u8 *buf)
 163{
 164        struct spi_mem_op op =
 165                SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
 166                           SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
 167                           SPI_MEM_OP_NO_DUMMY,
 168                           SPI_MEM_OP_DATA_OUT(len, buf, 1));
 169        ssize_t nbytes;
 170        int error;
 171
 172        op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
 173        op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
 174        op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
 175
 176        if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
 177                op.addr.nbytes = 0;
 178
 179        if (spi_nor_spimem_bounce(nor, &op))
 180                memcpy(nor->bouncebuf, buf, op.data.nbytes);
 181
 182        if (nor->dirmap.wdesc) {
 183                nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
 184                                              op.data.nbytes, op.data.buf.out);
 185        } else {
 186                error = spi_nor_spimem_exec_op(nor, &op);
 187                if (error)
 188                        return error;
 189                nbytes = op.data.nbytes;
 190        }
 191
 192        return nbytes;
 193}
 194
 195/**
 196 * spi_nor_write_data() - write data to flash memory
 197 * @nor:        pointer to 'struct spi_nor'
 198 * @to:         offset to write to
 199 * @len:        number of bytes to write
 200 * @buf:        pointer to src buffer
 201 *
 202 * Return: number of bytes written successfully, -errno otherwise
 203 */
 204ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
 205                           const u8 *buf)
 206{
 207        if (nor->spimem)
 208                return spi_nor_spimem_write_data(nor, to, len, buf);
 209
 210        return nor->controller_ops->write(nor, to, len, buf);
 211}
 212
 213/**
 214 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
 215 * @nor:        pointer to 'struct spi_nor'.
 216 *
 217 * Return: 0 on success, -errno otherwise.
 218 */
 219int spi_nor_write_enable(struct spi_nor *nor)
 220{
 221        int ret;
 222
 223        if (nor->spimem) {
 224                struct spi_mem_op op =
 225                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
 226                                   SPI_MEM_OP_NO_ADDR,
 227                                   SPI_MEM_OP_NO_DUMMY,
 228                                   SPI_MEM_OP_NO_DATA);
 229
 230                ret = spi_mem_exec_op(nor->spimem, &op);
 231        } else {
 232                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
 233                                                     NULL, 0);
 234        }
 235
 236        if (ret)
 237                dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
 238
 239        return ret;
 240}
 241
 242/**
 243 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
 244 * @nor:        pointer to 'struct spi_nor'.
 245 *
 246 * Return: 0 on success, -errno otherwise.
 247 */
 248int spi_nor_write_disable(struct spi_nor *nor)
 249{
 250        int ret;
 251
 252        if (nor->spimem) {
 253                struct spi_mem_op op =
 254                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
 255                                   SPI_MEM_OP_NO_ADDR,
 256                                   SPI_MEM_OP_NO_DUMMY,
 257                                   SPI_MEM_OP_NO_DATA);
 258
 259                ret = spi_mem_exec_op(nor->spimem, &op);
 260        } else {
 261                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
 262                                                     NULL, 0);
 263        }
 264
 265        if (ret)
 266                dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
 267
 268        return ret;
 269}
 270
 271/**
 272 * spi_nor_read_sr() - Read the Status Register.
 273 * @nor:        pointer to 'struct spi_nor'.
 274 * @sr:         pointer to a DMA-able buffer where the value of the
 275 *              Status Register will be written.
 276 *
 277 * Return: 0 on success, -errno otherwise.
 278 */
 279static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
 280{
 281        int ret;
 282
 283        if (nor->spimem) {
 284                struct spi_mem_op op =
 285                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
 286                                   SPI_MEM_OP_NO_ADDR,
 287                                   SPI_MEM_OP_NO_DUMMY,
 288                                   SPI_MEM_OP_DATA_IN(1, sr, 1));
 289
 290                ret = spi_mem_exec_op(nor->spimem, &op);
 291        } else {
 292                ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
 293                                                    sr, 1);
 294        }
 295
 296        if (ret)
 297                dev_dbg(nor->dev, "error %d reading SR\n", ret);
 298
 299        return ret;
 300}
 301
 302/**
 303 * spi_nor_read_fsr() - Read the Flag Status Register.
 304 * @nor:        pointer to 'struct spi_nor'
 305 * @fsr:        pointer to a DMA-able buffer where the value of the
 306 *              Flag Status Register will be written.
 307 *
 308 * Return: 0 on success, -errno otherwise.
 309 */
 310static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
 311{
 312        int ret;
 313
 314        if (nor->spimem) {
 315                struct spi_mem_op op =
 316                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
 317                                   SPI_MEM_OP_NO_ADDR,
 318                                   SPI_MEM_OP_NO_DUMMY,
 319                                   SPI_MEM_OP_DATA_IN(1, fsr, 1));
 320
 321                ret = spi_mem_exec_op(nor->spimem, &op);
 322        } else {
 323                ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
 324                                                    fsr, 1);
 325        }
 326
 327        if (ret)
 328                dev_dbg(nor->dev, "error %d reading FSR\n", ret);
 329
 330        return ret;
 331}
 332
 333/**
 334 * spi_nor_read_cr() - Read the Configuration Register using the
 335 * SPINOR_OP_RDCR (35h) command.
 336 * @nor:        pointer to 'struct spi_nor'
 337 * @cr:         pointer to a DMA-able buffer where the value of the
 338 *              Configuration Register will be written.
 339 *
 340 * Return: 0 on success, -errno otherwise.
 341 */
 342static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
 343{
 344        int ret;
 345
 346        if (nor->spimem) {
 347                struct spi_mem_op op =
 348                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
 349                                   SPI_MEM_OP_NO_ADDR,
 350                                   SPI_MEM_OP_NO_DUMMY,
 351                                   SPI_MEM_OP_DATA_IN(1, cr, 1));
 352
 353                ret = spi_mem_exec_op(nor->spimem, &op);
 354        } else {
 355                ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
 356        }
 357
 358        if (ret)
 359                dev_dbg(nor->dev, "error %d reading CR\n", ret);
 360
 361        return ret;
 362}
 363
 364/**
 365 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
 366 * @nor:        pointer to 'struct spi_nor'.
 367 * @enable:     true to enter the 4-byte address mode, false to exit the 4-byte
 368 *              address mode.
 369 *
 370 * Return: 0 on success, -errno otherwise.
 371 */
 372int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
 373{
 374        int ret;
 375
 376        if (nor->spimem) {
 377                struct spi_mem_op op =
 378                        SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
 379                                                  SPINOR_OP_EN4B :
 380                                                  SPINOR_OP_EX4B,
 381                                                  1),
 382                                  SPI_MEM_OP_NO_ADDR,
 383                                  SPI_MEM_OP_NO_DUMMY,
 384                                  SPI_MEM_OP_NO_DATA);
 385
 386                ret = spi_mem_exec_op(nor->spimem, &op);
 387        } else {
 388                ret = nor->controller_ops->write_reg(nor,
 389                                                     enable ? SPINOR_OP_EN4B :
 390                                                              SPINOR_OP_EX4B,
 391                                                     NULL, 0);
 392        }
 393
 394        if (ret)
 395                dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
 396
 397        return ret;
 398}
 399
 400/**
 401 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
 402 * flashes.
 403 * @nor:        pointer to 'struct spi_nor'.
 404 * @enable:     true to enter the 4-byte address mode, false to exit the 4-byte
 405 *              address mode.
 406 *
 407 * Return: 0 on success, -errno otherwise.
 408 */
 409static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
 410{
 411        int ret;
 412
 413        nor->bouncebuf[0] = enable << 7;
 414
 415        if (nor->spimem) {
 416                struct spi_mem_op op =
 417                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
 418                                   SPI_MEM_OP_NO_ADDR,
 419                                   SPI_MEM_OP_NO_DUMMY,
 420                                   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 421
 422                ret = spi_mem_exec_op(nor->spimem, &op);
 423        } else {
 424                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
 425                                                     nor->bouncebuf, 1);
 426        }
 427
 428        if (ret)
 429                dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
 430
 431        return ret;
 432}
 433
 434/**
 435 * spi_nor_write_ear() - Write Extended Address Register.
 436 * @nor:        pointer to 'struct spi_nor'.
 437 * @ear:        value to write to the Extended Address Register.
 438 *
 439 * Return: 0 on success, -errno otherwise.
 440 */
 441int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
 442{
 443        int ret;
 444
 445        nor->bouncebuf[0] = ear;
 446
 447        if (nor->spimem) {
 448                struct spi_mem_op op =
 449                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
 450                                   SPI_MEM_OP_NO_ADDR,
 451                                   SPI_MEM_OP_NO_DUMMY,
 452                                   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 453
 454                ret = spi_mem_exec_op(nor->spimem, &op);
 455        } else {
 456                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
 457                                                     nor->bouncebuf, 1);
 458        }
 459
 460        if (ret)
 461                dev_dbg(nor->dev, "error %d writing EAR\n", ret);
 462
 463        return ret;
 464}
 465
 466/**
 467 * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
 468 * @nor:        pointer to 'struct spi_nor'.
 469 * @sr:         pointer to a DMA-able buffer where the value of the
 470 *              Status Register will be written.
 471 *
 472 * Return: 0 on success, -errno otherwise.
 473 */
 474int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
 475{
 476        int ret;
 477
 478        if (nor->spimem) {
 479                struct spi_mem_op op =
 480                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
 481                                   SPI_MEM_OP_NO_ADDR,
 482                                   SPI_MEM_OP_NO_DUMMY,
 483                                   SPI_MEM_OP_DATA_IN(1, sr, 1));
 484
 485                ret = spi_mem_exec_op(nor->spimem, &op);
 486        } else {
 487                ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
 488                                                    sr, 1);
 489        }
 490
 491        if (ret)
 492                dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
 493
 494        return ret;
 495}
 496
 497/**
 498 * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
 499 * the flash is ready for new commands.
 500 * @nor:        pointer to 'struct spi_nor'.
 501 *
 502 * Return: 0 on success, -errno otherwise.
 503 */
 504static int spi_nor_xsr_ready(struct spi_nor *nor)
 505{
 506        int ret;
 507
 508        ret = spi_nor_xread_sr(nor, nor->bouncebuf);
 509        if (ret)
 510                return ret;
 511
 512        return !!(nor->bouncebuf[0] & XSR_RDY);
 513}
 514
 515/**
 516 * spi_nor_clear_sr() - Clear the Status Register.
 517 * @nor:        pointer to 'struct spi_nor'.
 518 */
 519static void spi_nor_clear_sr(struct spi_nor *nor)
 520{
 521        int ret;
 522
 523        if (nor->spimem) {
 524                struct spi_mem_op op =
 525                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
 526                                   SPI_MEM_OP_NO_ADDR,
 527                                   SPI_MEM_OP_NO_DUMMY,
 528                                   SPI_MEM_OP_NO_DATA);
 529
 530                ret = spi_mem_exec_op(nor->spimem, &op);
 531        } else {
 532                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
 533                                                     NULL, 0);
 534        }
 535
 536        if (ret)
 537                dev_dbg(nor->dev, "error %d clearing SR\n", ret);
 538}
 539
 540/**
 541 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
 542 * for new commands.
 543 * @nor:        pointer to 'struct spi_nor'.
 544 *
 545 * Return: 0 on success, -errno otherwise.
 546 */
 547static int spi_nor_sr_ready(struct spi_nor *nor)
 548{
 549        int ret = spi_nor_read_sr(nor, nor->bouncebuf);
 550
 551        if (ret)
 552                return ret;
 553
 554        if (nor->flags & SNOR_F_USE_CLSR &&
 555            nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
 556                if (nor->bouncebuf[0] & SR_E_ERR)
 557                        dev_err(nor->dev, "Erase Error occurred\n");
 558                else
 559                        dev_err(nor->dev, "Programming Error occurred\n");
 560
 561                spi_nor_clear_sr(nor);
 562
 563                /*
 564                 * WEL bit remains set to one when an erase or page program
 565                 * error occurs. Issue a Write Disable command to protect
 566                 * against inadvertent writes that can possibly corrupt the
 567                 * contents of the memory.
 568                 */
 569                ret = spi_nor_write_disable(nor);
 570                if (ret)
 571                        return ret;
 572
 573                return -EIO;
 574        }
 575
 576        return !(nor->bouncebuf[0] & SR_WIP);
 577}
 578
 579/**
 580 * spi_nor_clear_fsr() - Clear the Flag Status Register.
 581 * @nor:        pointer to 'struct spi_nor'.
 582 */
 583static void spi_nor_clear_fsr(struct spi_nor *nor)
 584{
 585        int ret;
 586
 587        if (nor->spimem) {
 588                struct spi_mem_op op =
 589                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
 590                                   SPI_MEM_OP_NO_ADDR,
 591                                   SPI_MEM_OP_NO_DUMMY,
 592                                   SPI_MEM_OP_NO_DATA);
 593
 594                ret = spi_mem_exec_op(nor->spimem, &op);
 595        } else {
 596                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
 597                                                     NULL, 0);
 598        }
 599
 600        if (ret)
 601                dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
 602}
 603
 604/**
 605 * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
 606 * ready for new commands.
 607 * @nor:        pointer to 'struct spi_nor'.
 608 *
 609 * Return: 0 on success, -errno otherwise.
 610 */
 611static int spi_nor_fsr_ready(struct spi_nor *nor)
 612{
 613        int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
 614
 615        if (ret)
 616                return ret;
 617
 618        if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
 619                if (nor->bouncebuf[0] & FSR_E_ERR)
 620                        dev_err(nor->dev, "Erase operation failed.\n");
 621                else
 622                        dev_err(nor->dev, "Program operation failed.\n");
 623
 624                if (nor->bouncebuf[0] & FSR_PT_ERR)
 625                        dev_err(nor->dev,
 626                        "Attempted to modify a protected sector.\n");
 627
 628                spi_nor_clear_fsr(nor);
 629
 630                /*
 631                 * WEL bit remains set to one when an erase or page program
 632                 * error occurs. Issue a Write Disable command to protect
 633                 * against inadvertent writes that can possibly corrupt the
 634                 * contents of the memory.
 635                 */
 636                ret = spi_nor_write_disable(nor);
 637                if (ret)
 638                        return ret;
 639
 640                return -EIO;
 641        }
 642
 643        return nor->bouncebuf[0] & FSR_READY;
 644}
 645
 646/**
 647 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
 648 * @nor:        pointer to 'struct spi_nor'.
 649 *
 650 * Return: 0 on success, -errno otherwise.
 651 */
 652static int spi_nor_ready(struct spi_nor *nor)
 653{
 654        int sr, fsr;
 655
 656        if (nor->flags & SNOR_F_READY_XSR_RDY)
 657                sr = spi_nor_xsr_ready(nor);
 658        else
 659                sr = spi_nor_sr_ready(nor);
 660        if (sr < 0)
 661                return sr;
 662        fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
 663        if (fsr < 0)
 664                return fsr;
 665        return sr && fsr;
 666}
 667
 668/**
 669 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
 670 * Status Register until ready, or timeout occurs.
 671 * @nor:                pointer to "struct spi_nor".
 672 * @timeout_jiffies:    jiffies to wait until timeout.
 673 *
 674 * Return: 0 on success, -errno otherwise.
 675 */
 676static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
 677                                                unsigned long timeout_jiffies)
 678{
 679        unsigned long deadline;
 680        int timeout = 0, ret;
 681
 682        deadline = jiffies + timeout_jiffies;
 683
 684        while (!timeout) {
 685                if (time_after_eq(jiffies, deadline))
 686                        timeout = 1;
 687
 688                ret = spi_nor_ready(nor);
 689                if (ret < 0)
 690                        return ret;
 691                if (ret)
 692                        return 0;
 693
 694                cond_resched();
 695        }
 696
 697        dev_dbg(nor->dev, "flash operation timed out\n");
 698
 699        return -ETIMEDOUT;
 700}
 701
 702/**
 703 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
 704 * flash to be ready, or timeout occurs.
 705 * @nor:        pointer to "struct spi_nor".
 706 *
 707 * Return: 0 on success, -errno otherwise.
 708 */
 709int spi_nor_wait_till_ready(struct spi_nor *nor)
 710{
 711        return spi_nor_wait_till_ready_with_timeout(nor,
 712                                                    DEFAULT_READY_WAIT_JIFFIES);
 713}
 714
 715/**
 716 * spi_nor_write_sr() - Write the Status Register.
 717 * @nor:        pointer to 'struct spi_nor'.
 718 * @sr:         pointer to DMA-able buffer to write to the Status Register.
 719 * @len:        number of bytes to write to the Status Register.
 720 *
 721 * Return: 0 on success, -errno otherwise.
 722 */
 723static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
 724{
 725        int ret;
 726
 727        ret = spi_nor_write_enable(nor);
 728        if (ret)
 729                return ret;
 730
 731        if (nor->spimem) {
 732                struct spi_mem_op op =
 733                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
 734                                   SPI_MEM_OP_NO_ADDR,
 735                                   SPI_MEM_OP_NO_DUMMY,
 736                                   SPI_MEM_OP_DATA_OUT(len, sr, 1));
 737
 738                ret = spi_mem_exec_op(nor->spimem, &op);
 739        } else {
 740                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
 741                                                     sr, len);
 742        }
 743
 744        if (ret) {
 745                dev_dbg(nor->dev, "error %d writing SR\n", ret);
 746                return ret;
 747        }
 748
 749        return spi_nor_wait_till_ready(nor);
 750}
 751
 752/**
 753 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
 754 * ensure that the byte written match the received value.
 755 * @nor:        pointer to a 'struct spi_nor'.
 756 * @sr1:        byte value to be written to the Status Register.
 757 *
 758 * Return: 0 on success, -errno otherwise.
 759 */
 760static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
 761{
 762        int ret;
 763
 764        nor->bouncebuf[0] = sr1;
 765
 766        ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
 767        if (ret)
 768                return ret;
 769
 770        ret = spi_nor_read_sr(nor, nor->bouncebuf);
 771        if (ret)
 772                return ret;
 773
 774        if (nor->bouncebuf[0] != sr1) {
 775                dev_dbg(nor->dev, "SR1: read back test failed\n");
 776                return -EIO;
 777        }
 778
 779        return 0;
 780}
 781
 782/**
 783 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
 784 * Status Register 2 in one shot. Ensure that the byte written in the Status
 785 * Register 1 match the received value, and that the 16-bit Write did not
 786 * affect what was already in the Status Register 2.
 787 * @nor:        pointer to a 'struct spi_nor'.
 788 * @sr1:        byte value to be written to the Status Register 1.
 789 *
 790 * Return: 0 on success, -errno otherwise.
 791 */
 792static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
 793{
 794        int ret;
 795        u8 *sr_cr = nor->bouncebuf;
 796        u8 cr_written;
 797
 798        /* Make sure we don't overwrite the contents of Status Register 2. */
 799        if (!(nor->flags & SNOR_F_NO_READ_CR)) {
 800                ret = spi_nor_read_cr(nor, &sr_cr[1]);
 801                if (ret)
 802                        return ret;
 803        } else if (nor->params->quad_enable) {
 804                /*
 805                 * If the Status Register 2 Read command (35h) is not
 806                 * supported, we should at least be sure we don't
 807                 * change the value of the SR2 Quad Enable bit.
 808                 *
 809                 * We can safely assume that when the Quad Enable method is
 810                 * set, the value of the QE bit is one, as a consequence of the
 811                 * nor->params->quad_enable() call.
 812                 *
 813                 * We can safely assume that the Quad Enable bit is present in
 814                 * the Status Register 2 at BIT(1). According to the JESD216
 815                 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
 816                 * Write Status (01h) command is available just for the cases
 817                 * in which the QE bit is described in SR2 at BIT(1).
 818                 */
 819                sr_cr[1] = SR2_QUAD_EN_BIT1;
 820        } else {
 821                sr_cr[1] = 0;
 822        }
 823
 824        sr_cr[0] = sr1;
 825
 826        ret = spi_nor_write_sr(nor, sr_cr, 2);
 827        if (ret)
 828                return ret;
 829
 830        if (nor->flags & SNOR_F_NO_READ_CR)
 831                return 0;
 832
 833        cr_written = sr_cr[1];
 834
 835        ret = spi_nor_read_cr(nor, &sr_cr[1]);
 836        if (ret)
 837                return ret;
 838
 839        if (cr_written != sr_cr[1]) {
 840                dev_dbg(nor->dev, "CR: read back test failed\n");
 841                return -EIO;
 842        }
 843
 844        return 0;
 845}
 846
 847/**
 848 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
 849 * Configuration Register in one shot. Ensure that the byte written in the
 850 * Configuration Register match the received value, and that the 16-bit Write
 851 * did not affect what was already in the Status Register 1.
 852 * @nor:        pointer to a 'struct spi_nor'.
 853 * @cr:         byte value to be written to the Configuration Register.
 854 *
 855 * Return: 0 on success, -errno otherwise.
 856 */
 857static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
 858{
 859        int ret;
 860        u8 *sr_cr = nor->bouncebuf;
 861        u8 sr_written;
 862
 863        /* Keep the current value of the Status Register 1. */
 864        ret = spi_nor_read_sr(nor, sr_cr);
 865        if (ret)
 866                return ret;
 867
 868        sr_cr[1] = cr;
 869
 870        ret = spi_nor_write_sr(nor, sr_cr, 2);
 871        if (ret)
 872                return ret;
 873
 874        sr_written = sr_cr[0];
 875
 876        ret = spi_nor_read_sr(nor, sr_cr);
 877        if (ret)
 878                return ret;
 879
 880        if (sr_written != sr_cr[0]) {
 881                dev_dbg(nor->dev, "SR: Read back test failed\n");
 882                return -EIO;
 883        }
 884
 885        if (nor->flags & SNOR_F_NO_READ_CR)
 886                return 0;
 887
 888        ret = spi_nor_read_cr(nor, &sr_cr[1]);
 889        if (ret)
 890                return ret;
 891
 892        if (cr != sr_cr[1]) {
 893                dev_dbg(nor->dev, "CR: read back test failed\n");
 894                return -EIO;
 895        }
 896
 897        return 0;
 898}
 899
 900/**
 901 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
 902 * the byte written match the received value without affecting other bits in the
 903 * Status Register 1 and 2.
 904 * @nor:        pointer to a 'struct spi_nor'.
 905 * @sr1:        byte value to be written to the Status Register.
 906 *
 907 * Return: 0 on success, -errno otherwise.
 908 */
 909static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
 910{
 911        if (nor->flags & SNOR_F_HAS_16BIT_SR)
 912                return spi_nor_write_16bit_sr_and_check(nor, sr1);
 913
 914        return spi_nor_write_sr1_and_check(nor, sr1);
 915}
 916
 917/**
 918 * spi_nor_write_sr2() - Write the Status Register 2 using the
 919 * SPINOR_OP_WRSR2 (3eh) command.
 920 * @nor:        pointer to 'struct spi_nor'.
 921 * @sr2:        pointer to DMA-able buffer to write to the Status Register 2.
 922 *
 923 * Return: 0 on success, -errno otherwise.
 924 */
 925static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
 926{
 927        int ret;
 928
 929        ret = spi_nor_write_enable(nor);
 930        if (ret)
 931                return ret;
 932
 933        if (nor->spimem) {
 934                struct spi_mem_op op =
 935                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
 936                                   SPI_MEM_OP_NO_ADDR,
 937                                   SPI_MEM_OP_NO_DUMMY,
 938                                   SPI_MEM_OP_DATA_OUT(1, sr2, 1));
 939
 940                ret = spi_mem_exec_op(nor->spimem, &op);
 941        } else {
 942                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
 943                                                     sr2, 1);
 944        }
 945
 946        if (ret) {
 947                dev_dbg(nor->dev, "error %d writing SR2\n", ret);
 948                return ret;
 949        }
 950
 951        return spi_nor_wait_till_ready(nor);
 952}
 953
 954/**
 955 * spi_nor_read_sr2() - Read the Status Register 2 using the
 956 * SPINOR_OP_RDSR2 (3fh) command.
 957 * @nor:        pointer to 'struct spi_nor'.
 958 * @sr2:        pointer to DMA-able buffer where the value of the
 959 *              Status Register 2 will be written.
 960 *
 961 * Return: 0 on success, -errno otherwise.
 962 */
 963static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
 964{
 965        int ret;
 966
 967        if (nor->spimem) {
 968                struct spi_mem_op op =
 969                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
 970                                   SPI_MEM_OP_NO_ADDR,
 971                                   SPI_MEM_OP_NO_DUMMY,
 972                                   SPI_MEM_OP_DATA_IN(1, sr2, 1));
 973
 974                ret = spi_mem_exec_op(nor->spimem, &op);
 975        } else {
 976                ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
 977                                                    sr2, 1);
 978        }
 979
 980        if (ret)
 981                dev_dbg(nor->dev, "error %d reading SR2\n", ret);
 982
 983        return ret;
 984}
 985
 986/**
 987 * spi_nor_erase_chip() - Erase the entire flash memory.
 988 * @nor:        pointer to 'struct spi_nor'.
 989 *
 990 * Return: 0 on success, -errno otherwise.
 991 */
 992static int spi_nor_erase_chip(struct spi_nor *nor)
 993{
 994        int ret;
 995
 996        dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
 997
 998        if (nor->spimem) {
 999                struct spi_mem_op op =
1000                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
1001                                   SPI_MEM_OP_NO_ADDR,
1002                                   SPI_MEM_OP_NO_DUMMY,
1003                                   SPI_MEM_OP_NO_DATA);
1004
1005                ret = spi_mem_exec_op(nor->spimem, &op);
1006        } else {
1007                ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
1008                                                     NULL, 0);
1009        }
1010
1011        if (ret)
1012                dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1013
1014        return ret;
1015}
1016
1017static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1018{
1019        size_t i;
1020
1021        for (i = 0; i < size; i++)
1022                if (table[i][0] == opcode)
1023                        return table[i][1];
1024
1025        /* No conversion found, keep input op code. */
1026        return opcode;
1027}
1028
1029u8 spi_nor_convert_3to4_read(u8 opcode)
1030{
1031        static const u8 spi_nor_3to4_read[][2] = {
1032                { SPINOR_OP_READ,       SPINOR_OP_READ_4B },
1033                { SPINOR_OP_READ_FAST,  SPINOR_OP_READ_FAST_4B },
1034                { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1035                { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1036                { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1037                { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1038                { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1039                { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1040
1041                { SPINOR_OP_READ_1_1_1_DTR,     SPINOR_OP_READ_1_1_1_DTR_4B },
1042                { SPINOR_OP_READ_1_2_2_DTR,     SPINOR_OP_READ_1_2_2_DTR_4B },
1043                { SPINOR_OP_READ_1_4_4_DTR,     SPINOR_OP_READ_1_4_4_DTR_4B },
1044        };
1045
1046        return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1047                                      ARRAY_SIZE(spi_nor_3to4_read));
1048}
1049
1050static u8 spi_nor_convert_3to4_program(u8 opcode)
1051{
1052        static const u8 spi_nor_3to4_program[][2] = {
1053                { SPINOR_OP_PP,         SPINOR_OP_PP_4B },
1054                { SPINOR_OP_PP_1_1_4,   SPINOR_OP_PP_1_1_4_4B },
1055                { SPINOR_OP_PP_1_4_4,   SPINOR_OP_PP_1_4_4_4B },
1056                { SPINOR_OP_PP_1_1_8,   SPINOR_OP_PP_1_1_8_4B },
1057                { SPINOR_OP_PP_1_8_8,   SPINOR_OP_PP_1_8_8_4B },
1058        };
1059
1060        return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1061                                      ARRAY_SIZE(spi_nor_3to4_program));
1062}
1063
1064static u8 spi_nor_convert_3to4_erase(u8 opcode)
1065{
1066        static const u8 spi_nor_3to4_erase[][2] = {
1067                { SPINOR_OP_BE_4K,      SPINOR_OP_BE_4K_4B },
1068                { SPINOR_OP_BE_32K,     SPINOR_OP_BE_32K_4B },
1069                { SPINOR_OP_SE,         SPINOR_OP_SE_4B },
1070        };
1071
1072        return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1073                                      ARRAY_SIZE(spi_nor_3to4_erase));
1074}
1075
1076static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1077{
1078        return !!nor->params->erase_map.uniform_erase_type;
1079}
1080
1081static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1082{
1083        nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1084        nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1085        nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1086
1087        if (!spi_nor_has_uniform_erase(nor)) {
1088                struct spi_nor_erase_map *map = &nor->params->erase_map;
1089                struct spi_nor_erase_type *erase;
1090                int i;
1091
1092                for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1093                        erase = &map->erase_type[i];
1094                        erase->opcode =
1095                                spi_nor_convert_3to4_erase(erase->opcode);
1096                }
1097        }
1098}
1099
1100int spi_nor_lock_and_prep(struct spi_nor *nor)
1101{
1102        int ret = 0;
1103
1104        mutex_lock(&nor->lock);
1105
1106        if (nor->controller_ops &&  nor->controller_ops->prepare) {
1107                ret = nor->controller_ops->prepare(nor);
1108                if (ret) {
1109                        mutex_unlock(&nor->lock);
1110                        return ret;
1111                }
1112        }
1113        return ret;
1114}
1115
1116void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1117{
1118        if (nor->controller_ops && nor->controller_ops->unprepare)
1119                nor->controller_ops->unprepare(nor);
1120        mutex_unlock(&nor->lock);
1121}
1122
1123static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1124{
1125        if (!nor->params->convert_addr)
1126                return addr;
1127
1128        return nor->params->convert_addr(nor, addr);
1129}
1130
1131/*
1132 * Initiate the erasure of a single sector
1133 */
1134static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1135{
1136        int i;
1137
1138        addr = spi_nor_convert_addr(nor, addr);
1139
1140        if (nor->spimem) {
1141                struct spi_mem_op op =
1142                        SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
1143                                   SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
1144                                   SPI_MEM_OP_NO_DUMMY,
1145                                   SPI_MEM_OP_NO_DATA);
1146
1147                return spi_mem_exec_op(nor->spimem, &op);
1148        } else if (nor->controller_ops->erase) {
1149                return nor->controller_ops->erase(nor, addr);
1150        }
1151
1152        /*
1153         * Default implementation, if driver doesn't have a specialized HW
1154         * control
1155         */
1156        for (i = nor->addr_width - 1; i >= 0; i--) {
1157                nor->bouncebuf[i] = addr & 0xff;
1158                addr >>= 8;
1159        }
1160
1161        return nor->controller_ops->write_reg(nor, nor->erase_opcode,
1162                                              nor->bouncebuf, nor->addr_width);
1163}
1164
1165/**
1166 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1167 * @erase:      pointer to a structure that describes a SPI NOR erase type
1168 * @dividend:   dividend value
1169 * @remainder:  pointer to u32 remainder (will be updated)
1170 *
1171 * Return: the result of the division
1172 */
1173static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1174                                     u64 dividend, u32 *remainder)
1175{
1176        /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1177        *remainder = (u32)dividend & erase->size_mask;
1178        return dividend >> erase->size_shift;
1179}
1180
1181/**
1182 * spi_nor_find_best_erase_type() - find the best erase type for the given
1183 *                                  offset in the serial flash memory and the
1184 *                                  number of bytes to erase. The region in
1185 *                                  which the address fits is expected to be
1186 *                                  provided.
1187 * @map:        the erase map of the SPI NOR
1188 * @region:     pointer to a structure that describes a SPI NOR erase region
1189 * @addr:       offset in the serial flash memory
1190 * @len:        number of bytes to erase
1191 *
1192 * Return: a pointer to the best fitted erase type, NULL otherwise.
1193 */
1194static const struct spi_nor_erase_type *
1195spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1196                             const struct spi_nor_erase_region *region,
1197                             u64 addr, u32 len)
1198{
1199        const struct spi_nor_erase_type *erase;
1200        u32 rem;
1201        int i;
1202        u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1203
1204        /*
1205         * Erase types are ordered by size, with the smallest erase type at
1206         * index 0.
1207         */
1208        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1209                /* Does the erase region support the tested erase type? */
1210                if (!(erase_mask & BIT(i)))
1211                        continue;
1212
1213                erase = &map->erase_type[i];
1214
1215                /* Don't erase more than what the user has asked for. */
1216                if (erase->size > len)
1217                        continue;
1218
1219                /* Alignment is not mandatory for overlaid regions */
1220                if (region->offset & SNOR_OVERLAID_REGION)
1221                        return erase;
1222
1223                spi_nor_div_by_erase_size(erase, addr, &rem);
1224                if (rem)
1225                        continue;
1226                else
1227                        return erase;
1228        }
1229
1230        return NULL;
1231}
1232
1233static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1234{
1235        return region->offset & SNOR_LAST_REGION;
1236}
1237
1238static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1239{
1240        return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1241}
1242
1243/**
1244 * spi_nor_region_next() - get the next spi nor region
1245 * @region:     pointer to a structure that describes a SPI NOR erase region
1246 *
1247 * Return: the next spi nor region or NULL if last region.
1248 */
1249struct spi_nor_erase_region *
1250spi_nor_region_next(struct spi_nor_erase_region *region)
1251{
1252        if (spi_nor_region_is_last(region))
1253                return NULL;
1254        region++;
1255        return region;
1256}
1257
1258/**
1259 * spi_nor_find_erase_region() - find the region of the serial flash memory in
1260 *                               which the offset fits
1261 * @map:        the erase map of the SPI NOR
1262 * @addr:       offset in the serial flash memory
1263 *
1264 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1265 *         otherwise.
1266 */
1267static struct spi_nor_erase_region *
1268spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1269{
1270        struct spi_nor_erase_region *region = map->regions;
1271        u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1272        u64 region_end = region_start + region->size;
1273
1274        while (addr < region_start || addr >= region_end) {
1275                region = spi_nor_region_next(region);
1276                if (!region)
1277                        return ERR_PTR(-EINVAL);
1278
1279                region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1280                region_end = region_start + region->size;
1281        }
1282
1283        return region;
1284}
1285
1286/**
1287 * spi_nor_init_erase_cmd() - initialize an erase command
1288 * @region:     pointer to a structure that describes a SPI NOR erase region
1289 * @erase:      pointer to a structure that describes a SPI NOR erase type
1290 *
1291 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1292 *         otherwise.
1293 */
1294static struct spi_nor_erase_command *
1295spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1296                       const struct spi_nor_erase_type *erase)
1297{
1298        struct spi_nor_erase_command *cmd;
1299
1300        cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1301        if (!cmd)
1302                return ERR_PTR(-ENOMEM);
1303
1304        INIT_LIST_HEAD(&cmd->list);
1305        cmd->opcode = erase->opcode;
1306        cmd->count = 1;
1307
1308        if (region->offset & SNOR_OVERLAID_REGION)
1309                cmd->size = region->size;
1310        else
1311                cmd->size = erase->size;
1312
1313        return cmd;
1314}
1315
1316/**
1317 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1318 * @erase_list: list of erase commands
1319 */
1320static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1321{
1322        struct spi_nor_erase_command *cmd, *next;
1323
1324        list_for_each_entry_safe(cmd, next, erase_list, list) {
1325                list_del(&cmd->list);
1326                kfree(cmd);
1327        }
1328}
1329
1330/**
1331 * spi_nor_init_erase_cmd_list() - initialize erase command list
1332 * @nor:        pointer to a 'struct spi_nor'
1333 * @erase_list: list of erase commands to be executed once we validate that the
1334 *              erase can be performed
1335 * @addr:       offset in the serial flash memory
1336 * @len:        number of bytes to erase
1337 *
1338 * Builds the list of best fitted erase commands and verifies if the erase can
1339 * be performed.
1340 *
1341 * Return: 0 on success, -errno otherwise.
1342 */
1343static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1344                                       struct list_head *erase_list,
1345                                       u64 addr, u32 len)
1346{
1347        const struct spi_nor_erase_map *map = &nor->params->erase_map;
1348        const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1349        struct spi_nor_erase_region *region;
1350        struct spi_nor_erase_command *cmd = NULL;
1351        u64 region_end;
1352        int ret = -EINVAL;
1353
1354        region = spi_nor_find_erase_region(map, addr);
1355        if (IS_ERR(region))
1356                return PTR_ERR(region);
1357
1358        region_end = spi_nor_region_end(region);
1359
1360        while (len) {
1361                erase = spi_nor_find_best_erase_type(map, region, addr, len);
1362                if (!erase)
1363                        goto destroy_erase_cmd_list;
1364
1365                if (prev_erase != erase ||
1366                    region->offset & SNOR_OVERLAID_REGION) {
1367                        cmd = spi_nor_init_erase_cmd(region, erase);
1368                        if (IS_ERR(cmd)) {
1369                                ret = PTR_ERR(cmd);
1370                                goto destroy_erase_cmd_list;
1371                        }
1372
1373                        list_add_tail(&cmd->list, erase_list);
1374                } else {
1375                        cmd->count++;
1376                }
1377
1378                addr += cmd->size;
1379                len -= cmd->size;
1380
1381                if (len && addr >= region_end) {
1382                        region = spi_nor_region_next(region);
1383                        if (!region)
1384                                goto destroy_erase_cmd_list;
1385                        region_end = spi_nor_region_end(region);
1386                }
1387
1388                prev_erase = erase;
1389        }
1390
1391        return 0;
1392
1393destroy_erase_cmd_list:
1394        spi_nor_destroy_erase_cmd_list(erase_list);
1395        return ret;
1396}
1397
1398/**
1399 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1400 * @nor:        pointer to a 'struct spi_nor'
1401 * @addr:       offset in the serial flash memory
1402 * @len:        number of bytes to erase
1403 *
1404 * Build a list of best fitted erase commands and execute it once we validate
1405 * that the erase can be performed.
1406 *
1407 * Return: 0 on success, -errno otherwise.
1408 */
1409static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1410{
1411        LIST_HEAD(erase_list);
1412        struct spi_nor_erase_command *cmd, *next;
1413        int ret;
1414
1415        ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1416        if (ret)
1417                return ret;
1418
1419        list_for_each_entry_safe(cmd, next, &erase_list, list) {
1420                nor->erase_opcode = cmd->opcode;
1421                while (cmd->count) {
1422                        ret = spi_nor_write_enable(nor);
1423                        if (ret)
1424                                goto destroy_erase_cmd_list;
1425
1426                        ret = spi_nor_erase_sector(nor, addr);
1427                        if (ret)
1428                                goto destroy_erase_cmd_list;
1429
1430                        addr += cmd->size;
1431                        cmd->count--;
1432
1433                        ret = spi_nor_wait_till_ready(nor);
1434                        if (ret)
1435                                goto destroy_erase_cmd_list;
1436                }
1437                list_del(&cmd->list);
1438                kfree(cmd);
1439        }
1440
1441        return 0;
1442
1443destroy_erase_cmd_list:
1444        spi_nor_destroy_erase_cmd_list(&erase_list);
1445        return ret;
1446}
1447
1448/*
1449 * Erase an address range on the nor chip.  The address range may extend
1450 * one or more erase sectors.  Return an error is there is a problem erasing.
1451 */
1452static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1453{
1454        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1455        u32 addr, len;
1456        uint32_t rem;
1457        int ret;
1458
1459        dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1460                        (long long)instr->len);
1461
1462        if (spi_nor_has_uniform_erase(nor)) {
1463                div_u64_rem(instr->len, mtd->erasesize, &rem);
1464                if (rem)
1465                        return -EINVAL;
1466        }
1467
1468        addr = instr->addr;
1469        len = instr->len;
1470
1471        ret = spi_nor_lock_and_prep(nor);
1472        if (ret)
1473                return ret;
1474
1475        /* whole-chip erase? */
1476        if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1477                unsigned long timeout;
1478
1479                ret = spi_nor_write_enable(nor);
1480                if (ret)
1481                        goto erase_err;
1482
1483                ret = spi_nor_erase_chip(nor);
1484                if (ret)
1485                        goto erase_err;
1486
1487                /*
1488                 * Scale the timeout linearly with the size of the flash, with
1489                 * a minimum calibrated to an old 2MB flash. We could try to
1490                 * pull these from CFI/SFDP, but these values should be good
1491                 * enough for now.
1492                 */
1493                timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1494                              CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1495                              (unsigned long)(mtd->size / SZ_2M));
1496                ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1497                if (ret)
1498                        goto erase_err;
1499
1500        /* REVISIT in some cases we could speed up erasing large regions
1501         * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1502         * to use "small sector erase", but that's not always optimal.
1503         */
1504
1505        /* "sector"-at-a-time erase */
1506        } else if (spi_nor_has_uniform_erase(nor)) {
1507                while (len) {
1508                        ret = spi_nor_write_enable(nor);
1509                        if (ret)
1510                                goto erase_err;
1511
1512                        ret = spi_nor_erase_sector(nor, addr);
1513                        if (ret)
1514                                goto erase_err;
1515
1516                        addr += mtd->erasesize;
1517                        len -= mtd->erasesize;
1518
1519                        ret = spi_nor_wait_till_ready(nor);
1520                        if (ret)
1521                                goto erase_err;
1522                }
1523
1524        /* erase multiple sectors */
1525        } else {
1526                ret = spi_nor_erase_multi_sectors(nor, addr, len);
1527                if (ret)
1528                        goto erase_err;
1529        }
1530
1531        ret = spi_nor_write_disable(nor);
1532
1533erase_err:
1534        spi_nor_unlock_and_unprep(nor);
1535
1536        return ret;
1537}
1538
1539static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
1540{
1541        u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1542
1543        if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
1544                return mask | SR_BP3_BIT6;
1545
1546        if (nor->flags & SNOR_F_HAS_4BIT_BP)
1547                return mask | SR_BP3;
1548
1549        return mask;
1550}
1551
1552static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
1553{
1554        if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1555                return SR_TB_BIT6;
1556        else
1557                return SR_TB_BIT5;
1558}
1559
1560static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
1561{
1562        unsigned int bp_slots, bp_slots_needed;
1563        u8 mask = spi_nor_get_sr_bp_mask(nor);
1564
1565        /* Reserved one for "protect none" and one for "protect all". */
1566        bp_slots = (1 << hweight8(mask)) - 2;
1567        bp_slots_needed = ilog2(nor->info->n_sectors);
1568
1569        if (bp_slots_needed > bp_slots)
1570                return nor->info->sector_size <<
1571                        (bp_slots_needed - bp_slots);
1572        else
1573                return nor->info->sector_size;
1574}
1575
1576static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
1577                                        uint64_t *len)
1578{
1579        struct mtd_info *mtd = &nor->mtd;
1580        u64 min_prot_len;
1581        u8 mask = spi_nor_get_sr_bp_mask(nor);
1582        u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1583        u8 bp, val = sr & mask;
1584
1585        if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
1586                val = (val & ~SR_BP3_BIT6) | SR_BP3;
1587
1588        bp = val >> SR_BP_SHIFT;
1589
1590        if (!bp) {
1591                /* No protection */
1592                *ofs = 0;
1593                *len = 0;
1594                return;
1595        }
1596
1597        min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1598        *len = min_prot_len << (bp - 1);
1599
1600        if (*len > mtd->size)
1601                *len = mtd->size;
1602
1603        if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
1604                *ofs = 0;
1605        else
1606                *ofs = mtd->size - *len;
1607}
1608
1609/*
1610 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1611 * @locked is false); 0 otherwise
1612 */
1613static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
1614                                        uint64_t len, u8 sr, bool locked)
1615{
1616        loff_t lock_offs;
1617        uint64_t lock_len;
1618
1619        if (!len)
1620                return 1;
1621
1622        spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
1623
1624        if (locked)
1625                /* Requested range is a sub-range of locked range */
1626                return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1627        else
1628                /* Requested range does not overlap with locked range */
1629                return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1630}
1631
1632static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1633                                u8 sr)
1634{
1635        return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
1636}
1637
1638static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1639                                  u8 sr)
1640{
1641        return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
1642}
1643
1644/*
1645 * Lock a region of the flash. Compatible with ST Micro and similar flash.
1646 * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
1647 * register
1648 * (SR). Does not support these features found in newer SR bitfields:
1649 *   - SEC: sector/block protect - only handle SEC=0 (block protect)
1650 *   - CMP: complement protect - only support CMP=0 (range is not complemented)
1651 *
1652 * Support for the following is provided conditionally for some flash:
1653 *   - TB: top/bottom protect
1654 *
1655 * Sample table portion for 8MB flash (Winbond w25q64fw):
1656 *
1657 *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
1658 *  --------------------------------------------------------------------------
1659 *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
1660 *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
1661 *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
1662 *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
1663 *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
1664 *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
1665 *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
1666 *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
1667 *  ------|-------|-------|-------|-------|---------------|-------------------
1668 *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
1669 *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
1670 *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
1671 *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
1672 *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
1673 *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
1674 *
1675 * Returns negative on errors, 0 on success.
1676 */
1677static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1678{
1679        struct mtd_info *mtd = &nor->mtd;
1680        u64 min_prot_len;
1681        int ret, status_old, status_new;
1682        u8 mask = spi_nor_get_sr_bp_mask(nor);
1683        u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1684        u8 pow, val;
1685        loff_t lock_len;
1686        bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1687        bool use_top;
1688
1689        ret = spi_nor_read_sr(nor, nor->bouncebuf);
1690        if (ret)
1691                return ret;
1692
1693        status_old = nor->bouncebuf[0];
1694
1695        /* If nothing in our range is unlocked, we don't need to do anything */
1696        if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
1697                return 0;
1698
1699        /* If anything below us is unlocked, we can't use 'bottom' protection */
1700        if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
1701                can_be_bottom = false;
1702
1703        /* If anything above us is unlocked, we can't use 'top' protection */
1704        if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1705                                  status_old))
1706                can_be_top = false;
1707
1708        if (!can_be_bottom && !can_be_top)
1709                return -EINVAL;
1710
1711        /* Prefer top, if both are valid */
1712        use_top = can_be_top;
1713
1714        /* lock_len: length of region that should end up locked */
1715        if (use_top)
1716                lock_len = mtd->size - ofs;
1717        else
1718                lock_len = ofs + len;
1719
1720        if (lock_len == mtd->size) {
1721                val = mask;
1722        } else {
1723                min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1724                pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1725                val = pow << SR_BP_SHIFT;
1726
1727                if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1728                        val = (val & ~SR_BP3) | SR_BP3_BIT6;
1729
1730                if (val & ~mask)
1731                        return -EINVAL;
1732
1733                /* Don't "lock" with no region! */
1734                if (!(val & mask))
1735                        return -EINVAL;
1736        }
1737
1738        status_new = (status_old & ~mask & ~tb_mask) | val;
1739
1740        /* Disallow further writes if WP pin is asserted */
1741        status_new |= SR_SRWD;
1742
1743        if (!use_top)
1744                status_new |= tb_mask;
1745
1746        /* Don't bother if they're the same */
1747        if (status_new == status_old)
1748                return 0;
1749
1750        /* Only modify protection if it will not unlock other areas */
1751        if ((status_new & mask) < (status_old & mask))
1752                return -EINVAL;
1753
1754        return spi_nor_write_sr_and_check(nor, status_new);
1755}
1756
1757/*
1758 * Unlock a region of the flash. See spi_nor_sr_lock() for more info
1759 *
1760 * Returns negative on errors, 0 on success.
1761 */
1762static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1763{
1764        struct mtd_info *mtd = &nor->mtd;
1765        u64 min_prot_len;
1766        int ret, status_old, status_new;
1767        u8 mask = spi_nor_get_sr_bp_mask(nor);
1768        u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1769        u8 pow, val;
1770        loff_t lock_len;
1771        bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1772        bool use_top;
1773
1774        ret = spi_nor_read_sr(nor, nor->bouncebuf);
1775        if (ret)
1776                return ret;
1777
1778        status_old = nor->bouncebuf[0];
1779
1780        /* If nothing in our range is locked, we don't need to do anything */
1781        if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
1782                return 0;
1783
1784        /* If anything below us is locked, we can't use 'top' protection */
1785        if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
1786                can_be_top = false;
1787
1788        /* If anything above us is locked, we can't use 'bottom' protection */
1789        if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1790                                    status_old))
1791                can_be_bottom = false;
1792
1793        if (!can_be_bottom && !can_be_top)
1794                return -EINVAL;
1795
1796        /* Prefer top, if both are valid */
1797        use_top = can_be_top;
1798
1799        /* lock_len: length of region that should remain locked */
1800        if (use_top)
1801                lock_len = mtd->size - (ofs + len);
1802        else
1803                lock_len = ofs;
1804
1805        if (lock_len == 0) {
1806                val = 0; /* fully unlocked */
1807        } else {
1808                min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1809                pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1810                val = pow << SR_BP_SHIFT;
1811
1812                if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1813                        val = (val & ~SR_BP3) | SR_BP3_BIT6;
1814
1815                /* Some power-of-two sizes are not supported */
1816                if (val & ~mask)
1817                        return -EINVAL;
1818        }
1819
1820        status_new = (status_old & ~mask & ~tb_mask) | val;
1821
1822        /* Don't protect status register if we're fully unlocked */
1823        if (lock_len == 0)
1824                status_new &= ~SR_SRWD;
1825
1826        if (!use_top)
1827                status_new |= tb_mask;
1828
1829        /* Don't bother if they're the same */
1830        if (status_new == status_old)
1831                return 0;
1832
1833        /* Only modify protection if it will not lock other areas */
1834        if ((status_new & mask) > (status_old & mask))
1835                return -EINVAL;
1836
1837        return spi_nor_write_sr_and_check(nor, status_new);
1838}
1839
1840/*
1841 * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
1842 * for more info.
1843 *
1844 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1845 * negative on errors.
1846 */
1847static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1848{
1849        int ret;
1850
1851        ret = spi_nor_read_sr(nor, nor->bouncebuf);
1852        if (ret)
1853                return ret;
1854
1855        return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
1856}
1857
1858static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
1859        .lock = spi_nor_sr_lock,
1860        .unlock = spi_nor_sr_unlock,
1861        .is_locked = spi_nor_sr_is_locked,
1862};
1863
1864static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1865{
1866        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1867        int ret;
1868
1869        ret = spi_nor_lock_and_prep(nor);
1870        if (ret)
1871                return ret;
1872
1873        ret = nor->params->locking_ops->lock(nor, ofs, len);
1874
1875        spi_nor_unlock_and_unprep(nor);
1876        return ret;
1877}
1878
1879static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1880{
1881        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1882        int ret;
1883
1884        ret = spi_nor_lock_and_prep(nor);
1885        if (ret)
1886                return ret;
1887
1888        ret = nor->params->locking_ops->unlock(nor, ofs, len);
1889
1890        spi_nor_unlock_and_unprep(nor);
1891        return ret;
1892}
1893
1894static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1895{
1896        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1897        int ret;
1898
1899        ret = spi_nor_lock_and_prep(nor);
1900        if (ret)
1901                return ret;
1902
1903        ret = nor->params->locking_ops->is_locked(nor, ofs, len);
1904
1905        spi_nor_unlock_and_unprep(nor);
1906        return ret;
1907}
1908
1909/**
1910 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1911 * Register 1.
1912 * @nor:        pointer to a 'struct spi_nor'
1913 *
1914 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1915 *
1916 * Return: 0 on success, -errno otherwise.
1917 */
1918int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1919{
1920        int ret;
1921
1922        ret = spi_nor_read_sr(nor, nor->bouncebuf);
1923        if (ret)
1924                return ret;
1925
1926        if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1927                return 0;
1928
1929        nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1930
1931        return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1932}
1933
1934/**
1935 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1936 * Register 2.
1937 * @nor:       pointer to a 'struct spi_nor'.
1938 *
1939 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1940 *
1941 * Return: 0 on success, -errno otherwise.
1942 */
1943int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1944{
1945        int ret;
1946
1947        if (nor->flags & SNOR_F_NO_READ_CR)
1948                return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1949
1950        ret = spi_nor_read_cr(nor, nor->bouncebuf);
1951        if (ret)
1952                return ret;
1953
1954        if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1955                return 0;
1956
1957        nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1958
1959        return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1960}
1961
1962/**
1963 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1964 * @nor:        pointer to a 'struct spi_nor'
1965 *
1966 * Set the Quad Enable (QE) bit in the Status Register 2.
1967 *
1968 * This is one of the procedures to set the QE bit described in the SFDP
1969 * (JESD216 rev B) specification but no manufacturer using this procedure has
1970 * been identified yet, hence the name of the function.
1971 *
1972 * Return: 0 on success, -errno otherwise.
1973 */
1974int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1975{
1976        u8 *sr2 = nor->bouncebuf;
1977        int ret;
1978        u8 sr2_written;
1979
1980        /* Check current Quad Enable bit value. */
1981        ret = spi_nor_read_sr2(nor, sr2);
1982        if (ret)
1983                return ret;
1984        if (*sr2 & SR2_QUAD_EN_BIT7)
1985                return 0;
1986
1987        /* Update the Quad Enable bit. */
1988        *sr2 |= SR2_QUAD_EN_BIT7;
1989
1990        ret = spi_nor_write_sr2(nor, sr2);
1991        if (ret)
1992                return ret;
1993
1994        sr2_written = *sr2;
1995
1996        /* Read back and check it. */
1997        ret = spi_nor_read_sr2(nor, sr2);
1998        if (ret)
1999                return ret;
2000
2001        if (*sr2 != sr2_written) {
2002                dev_dbg(nor->dev, "SR2: Read back test failed\n");
2003                return -EIO;
2004        }
2005
2006        return 0;
2007}
2008
2009static const struct spi_nor_manufacturer *manufacturers[] = {
2010        &spi_nor_atmel,
2011        &spi_nor_catalyst,
2012        &spi_nor_eon,
2013        &spi_nor_esmt,
2014        &spi_nor_everspin,
2015        &spi_nor_fujitsu,
2016        &spi_nor_gigadevice,
2017        &spi_nor_intel,
2018        &spi_nor_issi,
2019        &spi_nor_macronix,
2020        &spi_nor_micron,
2021        &spi_nor_st,
2022        &spi_nor_spansion,
2023        &spi_nor_sst,
2024        &spi_nor_winbond,
2025        &spi_nor_xilinx,
2026        &spi_nor_xmc,
2027};
2028
2029static const struct flash_info *
2030spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
2031                          const u8 *id)
2032{
2033        unsigned int i;
2034
2035        for (i = 0; i < nparts; i++) {
2036                if (parts[i].id_len &&
2037                    !memcmp(parts[i].id, id, parts[i].id_len))
2038                        return &parts[i];
2039        }
2040
2041        return NULL;
2042}
2043
2044static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2045{
2046        const struct flash_info *info;
2047        u8 *id = nor->bouncebuf;
2048        unsigned int i;
2049        int ret;
2050
2051        if (nor->spimem) {
2052                struct spi_mem_op op =
2053                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2054                                   SPI_MEM_OP_NO_ADDR,
2055                                   SPI_MEM_OP_NO_DUMMY,
2056                                   SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2057
2058                ret = spi_mem_exec_op(nor->spimem, &op);
2059        } else {
2060                ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
2061                                                    SPI_NOR_MAX_ID_LEN);
2062        }
2063        if (ret) {
2064                dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2065                return ERR_PTR(ret);
2066        }
2067
2068        for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2069                info = spi_nor_search_part_by_id(manufacturers[i]->parts,
2070                                                 manufacturers[i]->nparts,
2071                                                 id);
2072                if (info) {
2073                        nor->manufacturer = manufacturers[i];
2074                        return info;
2075                }
2076        }
2077
2078        dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2079                SPI_NOR_MAX_ID_LEN, id);
2080        return ERR_PTR(-ENODEV);
2081}
2082
2083static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2084                        size_t *retlen, u_char *buf)
2085{
2086        struct spi_nor *nor = mtd_to_spi_nor(mtd);
2087        ssize_t ret;
2088
2089        dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2090
2091        ret = spi_nor_lock_and_prep(nor);
2092        if (ret)
2093                return ret;
2094
2095        while (len) {
2096                loff_t addr = from;
2097
2098                addr = spi_nor_convert_addr(nor, addr);
2099
2100                ret = spi_nor_read_data(nor, addr, len, buf);
2101                if (ret == 0) {
2102                        /* We shouldn't see 0-length reads */
2103                        ret = -EIO;
2104                        goto read_err;
2105                }
2106                if (ret < 0)
2107                        goto read_err;
2108
2109                WARN_ON(ret > len);
2110                *retlen += ret;
2111                buf += ret;
2112                from += ret;
2113                len -= ret;
2114        }
2115        ret = 0;
2116
2117read_err:
2118        spi_nor_unlock_and_unprep(nor);
2119        return ret;
2120}
2121
2122/*
2123 * Write an address range to the nor chip.  Data must be written in
2124 * FLASH_PAGESIZE chunks.  The address range may be any size provided
2125 * it is within the physical boundaries.
2126 */
2127static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2128        size_t *retlen, const u_char *buf)
2129{
2130        struct spi_nor *nor = mtd_to_spi_nor(mtd);
2131        size_t page_offset, page_remain, i;
2132        ssize_t ret;
2133
2134        dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2135
2136        ret = spi_nor_lock_and_prep(nor);
2137        if (ret)
2138                return ret;
2139
2140        for (i = 0; i < len; ) {
2141                ssize_t written;
2142                loff_t addr = to + i;
2143
2144                /*
2145                 * If page_size is a power of two, the offset can be quickly
2146                 * calculated with an AND operation. On the other cases we
2147                 * need to do a modulus operation (more expensive).
2148                 * Power of two numbers have only one bit set and we can use
2149                 * the instruction hweight32 to detect if we need to do a
2150                 * modulus (do_div()) or not.
2151                 */
2152                if (hweight32(nor->page_size) == 1) {
2153                        page_offset = addr & (nor->page_size - 1);
2154                } else {
2155                        uint64_t aux = addr;
2156
2157                        page_offset = do_div(aux, nor->page_size);
2158                }
2159                /* the size of data remaining on the first page */
2160                page_remain = min_t(size_t,
2161                                    nor->page_size - page_offset, len - i);
2162
2163                addr = spi_nor_convert_addr(nor, addr);
2164
2165                ret = spi_nor_write_enable(nor);
2166                if (ret)
2167                        goto write_err;
2168
2169                ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2170                if (ret < 0)
2171                        goto write_err;
2172                written = ret;
2173
2174                ret = spi_nor_wait_till_ready(nor);
2175                if (ret)
2176                        goto write_err;
2177                *retlen += written;
2178                i += written;
2179        }
2180
2181write_err:
2182        spi_nor_unlock_and_unprep(nor);
2183        return ret;
2184}
2185
2186static int spi_nor_check(struct spi_nor *nor)
2187{
2188        if (!nor->dev ||
2189            (!nor->spimem && !nor->controller_ops) ||
2190            (!nor->spimem && nor->controller_ops &&
2191            (!nor->controller_ops->read ||
2192             !nor->controller_ops->write ||
2193             !nor->controller_ops->read_reg ||
2194             !nor->controller_ops->write_reg))) {
2195                pr_err("spi-nor: please fill all the necessary fields!\n");
2196                return -EINVAL;
2197        }
2198
2199        if (nor->spimem && nor->controller_ops) {
2200                dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2201                return -EINVAL;
2202        }
2203
2204        return 0;
2205}
2206
2207static void
2208spi_nor_set_read_settings(struct spi_nor_read_command *read,
2209                          u8 num_mode_clocks,
2210                          u8 num_wait_states,
2211                          u8 opcode,
2212                          enum spi_nor_protocol proto)
2213{
2214        read->num_mode_clocks = num_mode_clocks;
2215        read->num_wait_states = num_wait_states;
2216        read->opcode = opcode;
2217        read->proto = proto;
2218}
2219
2220void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2221                             enum spi_nor_protocol proto)
2222{
2223        pp->opcode = opcode;
2224        pp->proto = proto;
2225}
2226
2227static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2228{
2229        size_t i;
2230
2231        for (i = 0; i < size; i++)
2232                if (table[i][0] == (int)hwcaps)
2233                        return table[i][1];
2234
2235        return -EINVAL;
2236}
2237
2238int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2239{
2240        static const int hwcaps_read2cmd[][2] = {
2241                { SNOR_HWCAPS_READ,             SNOR_CMD_READ },
2242                { SNOR_HWCAPS_READ_FAST,        SNOR_CMD_READ_FAST },
2243                { SNOR_HWCAPS_READ_1_1_1_DTR,   SNOR_CMD_READ_1_1_1_DTR },
2244                { SNOR_HWCAPS_READ_1_1_2,       SNOR_CMD_READ_1_1_2 },
2245                { SNOR_HWCAPS_READ_1_2_2,       SNOR_CMD_READ_1_2_2 },
2246                { SNOR_HWCAPS_READ_2_2_2,       SNOR_CMD_READ_2_2_2 },
2247                { SNOR_HWCAPS_READ_1_2_2_DTR,   SNOR_CMD_READ_1_2_2_DTR },
2248                { SNOR_HWCAPS_READ_1_1_4,       SNOR_CMD_READ_1_1_4 },
2249                { SNOR_HWCAPS_READ_1_4_4,       SNOR_CMD_READ_1_4_4 },
2250                { SNOR_HWCAPS_READ_4_4_4,       SNOR_CMD_READ_4_4_4 },
2251                { SNOR_HWCAPS_READ_1_4_4_DTR,   SNOR_CMD_READ_1_4_4_DTR },
2252                { SNOR_HWCAPS_READ_1_1_8,       SNOR_CMD_READ_1_1_8 },
2253                { SNOR_HWCAPS_READ_1_8_8,       SNOR_CMD_READ_1_8_8 },
2254                { SNOR_HWCAPS_READ_8_8_8,       SNOR_CMD_READ_8_8_8 },
2255                { SNOR_HWCAPS_READ_1_8_8_DTR,   SNOR_CMD_READ_1_8_8_DTR },
2256        };
2257
2258        return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2259                                  ARRAY_SIZE(hwcaps_read2cmd));
2260}
2261
2262static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2263{
2264        static const int hwcaps_pp2cmd[][2] = {
2265                { SNOR_HWCAPS_PP,               SNOR_CMD_PP },
2266                { SNOR_HWCAPS_PP_1_1_4,         SNOR_CMD_PP_1_1_4 },
2267                { SNOR_HWCAPS_PP_1_4_4,         SNOR_CMD_PP_1_4_4 },
2268                { SNOR_HWCAPS_PP_4_4_4,         SNOR_CMD_PP_4_4_4 },
2269                { SNOR_HWCAPS_PP_1_1_8,         SNOR_CMD_PP_1_1_8 },
2270                { SNOR_HWCAPS_PP_1_8_8,         SNOR_CMD_PP_1_8_8 },
2271                { SNOR_HWCAPS_PP_8_8_8,         SNOR_CMD_PP_8_8_8 },
2272        };
2273
2274        return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2275                                  ARRAY_SIZE(hwcaps_pp2cmd));
2276}
2277
2278/**
2279 * spi_nor_spimem_check_op - check if the operation is supported
2280 *                           by controller
2281 *@nor:        pointer to a 'struct spi_nor'
2282 *@op:         pointer to op template to be checked
2283 *
2284 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2285 */
2286static int spi_nor_spimem_check_op(struct spi_nor *nor,
2287                                   struct spi_mem_op *op)
2288{
2289        /*
2290         * First test with 4 address bytes. The opcode itself might
2291         * be a 3B addressing opcode but we don't care, because
2292         * SPI controller implementation should not check the opcode,
2293         * but just the sequence.
2294         */
2295        op->addr.nbytes = 4;
2296        if (!spi_mem_supports_op(nor->spimem, op)) {
2297                if (nor->mtd.size > SZ_16M)
2298                        return -ENOTSUPP;
2299
2300                /* If flash size <= 16MB, 3 address bytes are sufficient */
2301                op->addr.nbytes = 3;
2302                if (!spi_mem_supports_op(nor->spimem, op))
2303                        return -ENOTSUPP;
2304        }
2305
2306        return 0;
2307}
2308
2309/**
2310 * spi_nor_spimem_check_readop - check if the read op is supported
2311 *                               by controller
2312 *@nor:         pointer to a 'struct spi_nor'
2313 *@read:        pointer to op template to be checked
2314 *
2315 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2316 */
2317static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2318                                       const struct spi_nor_read_command *read)
2319{
2320        struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2321                                          SPI_MEM_OP_ADDR(3, 0, 1),
2322                                          SPI_MEM_OP_DUMMY(0, 1),
2323                                          SPI_MEM_OP_DATA_IN(0, NULL, 1));
2324
2325        op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2326        op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2327        op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2328        op.dummy.buswidth = op.addr.buswidth;
2329        op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2330                          op.dummy.buswidth / 8;
2331
2332        return spi_nor_spimem_check_op(nor, &op);
2333}
2334
2335/**
2336 * spi_nor_spimem_check_pp - check if the page program op is supported
2337 *                           by controller
2338 *@nor:         pointer to a 'struct spi_nor'
2339 *@pp:          pointer to op template to be checked
2340 *
2341 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2342 */
2343static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2344                                   const struct spi_nor_pp_command *pp)
2345{
2346        struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2347                                          SPI_MEM_OP_ADDR(3, 0, 1),
2348                                          SPI_MEM_OP_NO_DUMMY,
2349                                          SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2350
2351        op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2352        op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2353        op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2354
2355        return spi_nor_spimem_check_op(nor, &op);
2356}
2357
2358/**
2359 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2360 *                                based on SPI controller capabilities
2361 * @nor:        pointer to a 'struct spi_nor'
2362 * @hwcaps:     pointer to resulting capabilities after adjusting
2363 *              according to controller and flash's capability
2364 */
2365static void
2366spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2367{
2368        struct spi_nor_flash_parameter *params = nor->params;
2369        unsigned int cap;
2370
2371        /* DTR modes are not supported yet, mask them all. */
2372        *hwcaps &= ~SNOR_HWCAPS_DTR;
2373
2374        /* X-X-X modes are not supported yet, mask them all. */
2375        *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2376
2377        for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2378                int rdidx, ppidx;
2379
2380                if (!(*hwcaps & BIT(cap)))
2381                        continue;
2382
2383                rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2384                if (rdidx >= 0 &&
2385                    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
2386                        *hwcaps &= ~BIT(cap);
2387
2388                ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2389                if (ppidx < 0)
2390                        continue;
2391
2392                if (spi_nor_spimem_check_pp(nor,
2393                                            &params->page_programs[ppidx]))
2394                        *hwcaps &= ~BIT(cap);
2395        }
2396}
2397
2398/**
2399 * spi_nor_set_erase_type() - set a SPI NOR erase type
2400 * @erase:      pointer to a structure that describes a SPI NOR erase type
2401 * @size:       the size of the sector/block erased by the erase type
2402 * @opcode:     the SPI command op code to erase the sector/block
2403 */
2404void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2405                            u8 opcode)
2406{
2407        erase->size = size;
2408        erase->opcode = opcode;
2409        /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2410        erase->size_shift = ffs(erase->size) - 1;
2411        erase->size_mask = (1 << erase->size_shift) - 1;
2412}
2413
2414/**
2415 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2416 * @map:                the erase map of the SPI NOR
2417 * @erase_mask:         bitmask encoding erase types that can erase the entire
2418 *                      flash memory
2419 * @flash_size:         the spi nor flash memory size
2420 */
2421void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2422                                    u8 erase_mask, u64 flash_size)
2423{
2424        /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2425        map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2426                                     SNOR_LAST_REGION;
2427        map->uniform_region.size = flash_size;
2428        map->regions = &map->uniform_region;
2429        map->uniform_erase_type = erase_mask;
2430}
2431
2432int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2433                             const struct sfdp_parameter_header *bfpt_header,
2434                             const struct sfdp_bfpt *bfpt,
2435                             struct spi_nor_flash_parameter *params)
2436{
2437        int ret;
2438
2439        if (nor->manufacturer && nor->manufacturer->fixups &&
2440            nor->manufacturer->fixups->post_bfpt) {
2441                ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2442                                                           bfpt, params);
2443                if (ret)
2444                        return ret;
2445        }
2446
2447        if (nor->info->fixups && nor->info->fixups->post_bfpt)
2448                return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2449                                                    params);
2450
2451        return 0;
2452}
2453
2454static int spi_nor_select_read(struct spi_nor *nor,
2455                               u32 shared_hwcaps)
2456{
2457        int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2458        const struct spi_nor_read_command *read;
2459
2460        if (best_match < 0)
2461                return -EINVAL;
2462
2463        cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2464        if (cmd < 0)
2465                return -EINVAL;
2466
2467        read = &nor->params->reads[cmd];
2468        nor->read_opcode = read->opcode;
2469        nor->read_proto = read->proto;
2470
2471        /*
2472         * In the spi-nor framework, we don't need to make the difference
2473         * between mode clock cycles and wait state clock cycles.
2474         * Indeed, the value of the mode clock cycles is used by a QSPI
2475         * flash memory to know whether it should enter or leave its 0-4-4
2476         * (Continuous Read / XIP) mode.
2477         * eXecution In Place is out of the scope of the mtd sub-system.
2478         * Hence we choose to merge both mode and wait state clock cycles
2479         * into the so called dummy clock cycles.
2480         */
2481        nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2482        return 0;
2483}
2484
2485static int spi_nor_select_pp(struct spi_nor *nor,
2486                             u32 shared_hwcaps)
2487{
2488        int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2489        const struct spi_nor_pp_command *pp;
2490
2491        if (best_match < 0)
2492                return -EINVAL;
2493
2494        cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2495        if (cmd < 0)
2496                return -EINVAL;
2497
2498        pp = &nor->params->page_programs[cmd];
2499        nor->program_opcode = pp->opcode;
2500        nor->write_proto = pp->proto;
2501        return 0;
2502}
2503
2504/**
2505 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2506 * @map:                the erase map of the SPI NOR
2507 * @wanted_size:        the erase type size to search for. Contains the value of
2508 *                      info->sector_size or of the "small sector" size in case
2509 *                      CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
2510 *
2511 * Once the optimum uniform sector erase command is found, disable all the
2512 * other.
2513 *
2514 * Return: pointer to erase type on success, NULL otherwise.
2515 */
2516static const struct spi_nor_erase_type *
2517spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2518                             const u32 wanted_size)
2519{
2520        const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2521        int i;
2522        u8 uniform_erase_type = map->uniform_erase_type;
2523
2524        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2525                if (!(uniform_erase_type & BIT(i)))
2526                        continue;
2527
2528                tested_erase = &map->erase_type[i];
2529
2530                /*
2531                 * If the current erase size is the one, stop here:
2532                 * we have found the right uniform Sector Erase command.
2533                 */
2534                if (tested_erase->size == wanted_size) {
2535                        erase = tested_erase;
2536                        break;
2537                }
2538
2539                /*
2540                 * Otherwise, the current erase size is still a valid canditate.
2541                 * Select the biggest valid candidate.
2542                 */
2543                if (!erase && tested_erase->size)
2544                        erase = tested_erase;
2545                        /* keep iterating to find the wanted_size */
2546        }
2547
2548        if (!erase)
2549                return NULL;
2550
2551        /* Disable all other Sector Erase commands. */
2552        map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2553        map->uniform_erase_type |= BIT(erase - map->erase_type);
2554        return erase;
2555}
2556
2557static int spi_nor_select_erase(struct spi_nor *nor)
2558{
2559        struct spi_nor_erase_map *map = &nor->params->erase_map;
2560        const struct spi_nor_erase_type *erase = NULL;
2561        struct mtd_info *mtd = &nor->mtd;
2562        u32 wanted_size = nor->info->sector_size;
2563        int i;
2564
2565        /*
2566         * The previous implementation handling Sector Erase commands assumed
2567         * that the SPI flash memory has an uniform layout then used only one
2568         * of the supported erase sizes for all Sector Erase commands.
2569         * So to be backward compatible, the new implementation also tries to
2570         * manage the SPI flash memory as uniform with a single erase sector
2571         * size, when possible.
2572         */
2573#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2574        /* prefer "small sector" erase if possible */
2575        wanted_size = 4096u;
2576#endif
2577
2578        if (spi_nor_has_uniform_erase(nor)) {
2579                erase = spi_nor_select_uniform_erase(map, wanted_size);
2580                if (!erase)
2581                        return -EINVAL;
2582                nor->erase_opcode = erase->opcode;
2583                mtd->erasesize = erase->size;
2584                return 0;
2585        }
2586
2587        /*
2588         * For non-uniform SPI flash memory, set mtd->erasesize to the
2589         * maximum erase sector size. No need to set nor->erase_opcode.
2590         */
2591        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2592                if (map->erase_type[i].size) {
2593                        erase = &map->erase_type[i];
2594                        break;
2595                }
2596        }
2597
2598        if (!erase)
2599                return -EINVAL;
2600
2601        mtd->erasesize = erase->size;
2602        return 0;
2603}
2604
2605static int spi_nor_default_setup(struct spi_nor *nor,
2606                                 const struct spi_nor_hwcaps *hwcaps)
2607{
2608        struct spi_nor_flash_parameter *params = nor->params;
2609        u32 ignored_mask, shared_mask;
2610        int err;
2611
2612        /*
2613         * Keep only the hardware capabilities supported by both the SPI
2614         * controller and the SPI flash memory.
2615         */
2616        shared_mask = hwcaps->mask & params->hwcaps.mask;
2617
2618        if (nor->spimem) {
2619                /*
2620                 * When called from spi_nor_probe(), all caps are set and we
2621                 * need to discard some of them based on what the SPI
2622                 * controller actually supports (using spi_mem_supports_op()).
2623                 */
2624                spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2625        } else {
2626                /*
2627                 * SPI n-n-n protocols are not supported when the SPI
2628                 * controller directly implements the spi_nor interface.
2629                 * Yet another reason to switch to spi-mem.
2630                 */
2631                ignored_mask = SNOR_HWCAPS_X_X_X;
2632                if (shared_mask & ignored_mask) {
2633                        dev_dbg(nor->dev,
2634                                "SPI n-n-n protocols are not supported.\n");
2635                        shared_mask &= ~ignored_mask;
2636                }
2637        }
2638
2639        /* Select the (Fast) Read command. */
2640        err = spi_nor_select_read(nor, shared_mask);
2641        if (err) {
2642                dev_dbg(nor->dev,
2643                        "can't select read settings supported by both the SPI controller and memory.\n");
2644                return err;
2645        }
2646
2647        /* Select the Page Program command. */
2648        err = spi_nor_select_pp(nor, shared_mask);
2649        if (err) {
2650                dev_dbg(nor->dev,
2651                        "can't select write settings supported by both the SPI controller and memory.\n");
2652                return err;
2653        }
2654
2655        /* Select the Sector Erase command. */
2656        err = spi_nor_select_erase(nor);
2657        if (err) {
2658                dev_dbg(nor->dev,
2659                        "can't select erase settings supported by both the SPI controller and memory.\n");
2660                return err;
2661        }
2662
2663        return 0;
2664}
2665
2666static int spi_nor_setup(struct spi_nor *nor,
2667                         const struct spi_nor_hwcaps *hwcaps)
2668{
2669        if (!nor->params->setup)
2670                return 0;
2671
2672        return nor->params->setup(nor, hwcaps);
2673}
2674
2675/**
2676 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2677 * settings based on MFR register and ->default_init() hook.
2678 * @nor:        pointer to a 'struct spi-nor'.
2679 */
2680static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2681{
2682        if (nor->manufacturer && nor->manufacturer->fixups &&
2683            nor->manufacturer->fixups->default_init)
2684                nor->manufacturer->fixups->default_init(nor);
2685
2686        if (nor->info->fixups && nor->info->fixups->default_init)
2687                nor->info->fixups->default_init(nor);
2688}
2689
2690/**
2691 * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
2692 * based on JESD216 SFDP standard.
2693 * @nor:        pointer to a 'struct spi-nor'.
2694 *
2695 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2696 * legacy flash parameters and settings will be restored.
2697 */
2698static void spi_nor_sfdp_init_params(struct spi_nor *nor)
2699{
2700        struct spi_nor_flash_parameter sfdp_params;
2701
2702        memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2703
2704        if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
2705                nor->addr_width = 0;
2706                nor->flags &= ~SNOR_F_4B_OPCODES;
2707        } else {
2708                memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2709        }
2710}
2711
2712/**
2713 * spi_nor_info_init_params() - Initialize the flash's parameters and settings
2714 * based on nor->info data.
2715 * @nor:        pointer to a 'struct spi-nor'.
2716 */
2717static void spi_nor_info_init_params(struct spi_nor *nor)
2718{
2719        struct spi_nor_flash_parameter *params = nor->params;
2720        struct spi_nor_erase_map *map = &params->erase_map;
2721        const struct flash_info *info = nor->info;
2722        struct device_node *np = spi_nor_get_flash_node(nor);
2723        u8 i, erase_mask;
2724
2725        /* Initialize legacy flash parameters and settings. */
2726        params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2727        params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2728        params->setup = spi_nor_default_setup;
2729        /* Default to 16-bit Write Status (01h) Command */
2730        nor->flags |= SNOR_F_HAS_16BIT_SR;
2731
2732        /* Set SPI NOR sizes. */
2733        params->size = (u64)info->sector_size * info->n_sectors;
2734        params->page_size = info->page_size;
2735
2736        if (!(info->flags & SPI_NOR_NO_FR)) {
2737                /* Default to Fast Read for DT and non-DT platform devices. */
2738                params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2739
2740                /* Mask out Fast Read if not requested at DT instantiation. */
2741                if (np && !of_property_read_bool(np, "m25p,fast-read"))
2742                        params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2743        }
2744
2745        /* (Fast) Read settings. */
2746        params->hwcaps.mask |= SNOR_HWCAPS_READ;
2747        spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
2748                                  0, 0, SPINOR_OP_READ,
2749                                  SNOR_PROTO_1_1_1);
2750
2751        if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2752                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
2753                                          0, 8, SPINOR_OP_READ_FAST,
2754                                          SNOR_PROTO_1_1_1);
2755
2756        if (info->flags & SPI_NOR_DUAL_READ) {
2757                params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2758                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
2759                                          0, 8, SPINOR_OP_READ_1_1_2,
2760                                          SNOR_PROTO_1_1_2);
2761        }
2762
2763        if (info->flags & SPI_NOR_QUAD_READ) {
2764                params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2765                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
2766                                          0, 8, SPINOR_OP_READ_1_1_4,
2767                                          SNOR_PROTO_1_1_4);
2768        }
2769
2770        if (info->flags & SPI_NOR_OCTAL_READ) {
2771                params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2772                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
2773                                          0, 8, SPINOR_OP_READ_1_1_8,
2774                                          SNOR_PROTO_1_1_8);
2775        }
2776
2777        /* Page Program settings. */
2778        params->hwcaps.mask |= SNOR_HWCAPS_PP;
2779        spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
2780                                SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2781
2782        /*
2783         * Sector Erase settings. Sort Erase Types in ascending order, with the
2784         * smallest erase size starting at BIT(0).
2785         */
2786        erase_mask = 0;
2787        i = 0;
2788        if (info->flags & SECT_4K_PMC) {
2789                erase_mask |= BIT(i);
2790                spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2791                                       SPINOR_OP_BE_4K_PMC);
2792                i++;
2793        } else if (info->flags & SECT_4K) {
2794                erase_mask |= BIT(i);
2795                spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2796                                       SPINOR_OP_BE_4K);
2797                i++;
2798        }
2799        erase_mask |= BIT(i);
2800        spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
2801                               SPINOR_OP_SE);
2802        spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2803}
2804
2805/**
2806 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
2807 * after SFDP has been parsed (is also called for SPI NORs that do not
2808 * support RDSFDP).
2809 * @nor:        pointer to a 'struct spi_nor'
2810 *
2811 * Typically used to tweak various parameters that could not be extracted by
2812 * other means (i.e. when information provided by the SFDP/flash_info tables
2813 * are incomplete or wrong).
2814 */
2815static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
2816{
2817        if (nor->manufacturer && nor->manufacturer->fixups &&
2818            nor->manufacturer->fixups->post_sfdp)
2819                nor->manufacturer->fixups->post_sfdp(nor);
2820
2821        if (nor->info->fixups && nor->info->fixups->post_sfdp)
2822                nor->info->fixups->post_sfdp(nor);
2823}
2824
2825/**
2826 * spi_nor_late_init_params() - Late initialization of default flash parameters.
2827 * @nor:        pointer to a 'struct spi_nor'
2828 *
2829 * Used to set default flash parameters and settings when the ->default_init()
2830 * hook or the SFDP parser let voids.
2831 */
2832static void spi_nor_late_init_params(struct spi_nor *nor)
2833{
2834        /*
2835         * NOR protection support. When locking_ops are not provided, we pick
2836         * the default ones.
2837         */
2838        if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2839                nor->params->locking_ops = &spi_nor_sr_locking_ops;
2840}
2841
2842/**
2843 * spi_nor_init_params() - Initialize the flash's parameters and settings.
2844 * @nor:        pointer to a 'struct spi-nor'.
2845 *
2846 * The flash parameters and settings are initialized based on a sequence of
2847 * calls that are ordered by priority:
2848 *
2849 * 1/ Default flash parameters initialization. The initializations are done
2850 *    based on nor->info data:
2851 *              spi_nor_info_init_params()
2852 *
2853 * which can be overwritten by:
2854 * 2/ Manufacturer flash parameters initialization. The initializations are
2855 *    done based on MFR register, or when the decisions can not be done solely
2856 *    based on MFR, by using specific flash_info tweeks, ->default_init():
2857 *              spi_nor_manufacturer_init_params()
2858 *
2859 * which can be overwritten by:
2860 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
2861 *    should be more accurate that the above.
2862 *              spi_nor_sfdp_init_params()
2863 *
2864 *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
2865 *    the flash parameters and settings immediately after parsing the Basic
2866 *    Flash Parameter Table.
2867 *
2868 * which can be overwritten by:
2869 * 4/ Post SFDP flash parameters initialization. Used to tweak various
2870 *    parameters that could not be extracted by other means (i.e. when
2871 *    information provided by the SFDP/flash_info tables are incomplete or
2872 *    wrong).
2873 *              spi_nor_post_sfdp_fixups()
2874 *
2875 * 5/ Late default flash parameters initialization, used when the
2876 * ->default_init() hook or the SFDP parser do not set specific params.
2877 *              spi_nor_late_init_params()
2878 */
2879static int spi_nor_init_params(struct spi_nor *nor)
2880{
2881        nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2882        if (!nor->params)
2883                return -ENOMEM;
2884
2885        spi_nor_info_init_params(nor);
2886
2887        spi_nor_manufacturer_init_params(nor);
2888
2889        if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
2890            !(nor->info->flags & SPI_NOR_SKIP_SFDP))
2891                spi_nor_sfdp_init_params(nor);
2892
2893        spi_nor_post_sfdp_fixups(nor);
2894
2895        spi_nor_late_init_params(nor);
2896
2897        return 0;
2898}
2899
2900/**
2901 * spi_nor_quad_enable() - enable Quad I/O if needed.
2902 * @nor:                pointer to a 'struct spi_nor'
2903 *
2904 * Return: 0 on success, -errno otherwise.
2905 */
2906static int spi_nor_quad_enable(struct spi_nor *nor)
2907{
2908        if (!nor->params->quad_enable)
2909                return 0;
2910
2911        if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2912              spi_nor_get_protocol_width(nor->write_proto) == 4))
2913                return 0;
2914
2915        return nor->params->quad_enable(nor);
2916}
2917
2918/**
2919 * spi_nor_unlock_all() - Unlocks the entire flash memory array.
2920 * @nor:        pointer to a 'struct spi_nor'.
2921 *
2922 * Some SPI NOR flashes are write protected by default after a power-on reset
2923 * cycle, in order to avoid inadvertent writes during power-up. Backward
2924 * compatibility imposes to unlock the entire flash memory array at power-up
2925 * by default.
2926 */
2927static int spi_nor_unlock_all(struct spi_nor *nor)
2928{
2929        if (nor->flags & SNOR_F_HAS_LOCK)
2930                return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
2931
2932        return 0;
2933}
2934
2935static int spi_nor_init(struct spi_nor *nor)
2936{
2937        int err;
2938
2939        err = spi_nor_quad_enable(nor);
2940        if (err) {
2941                dev_dbg(nor->dev, "quad mode not supported\n");
2942                return err;
2943        }
2944
2945        err = spi_nor_unlock_all(nor);
2946        if (err) {
2947                dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
2948                return err;
2949        }
2950
2951        if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
2952                /*
2953                 * If the RESET# pin isn't hooked up properly, or the system
2954                 * otherwise doesn't perform a reset command in the boot
2955                 * sequence, it's impossible to 100% protect against unexpected
2956                 * reboots (e.g., crashes). Warn the user (or hopefully, system
2957                 * designer) that this is bad.
2958                 */
2959                WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
2960                          "enabling reset hack; may not recover from unexpected reboots\n");
2961                nor->params->set_4byte_addr_mode(nor, true);
2962        }
2963
2964        return 0;
2965}
2966
2967/* mtd resume handler */
2968static void spi_nor_resume(struct mtd_info *mtd)
2969{
2970        struct spi_nor *nor = mtd_to_spi_nor(mtd);
2971        struct device *dev = nor->dev;
2972        int ret;
2973
2974        /* re-initialize the nor chip */
2975        ret = spi_nor_init(nor);
2976        if (ret)
2977                dev_err(dev, "resume() failed\n");
2978}
2979
2980void spi_nor_restore(struct spi_nor *nor)
2981{
2982        /* restore the addressing mode */
2983        if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
2984            nor->flags & SNOR_F_BROKEN_RESET)
2985                nor->params->set_4byte_addr_mode(nor, false);
2986}
2987EXPORT_SYMBOL_GPL(spi_nor_restore);
2988
2989static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
2990                                                 const char *name)
2991{
2992        unsigned int i, j;
2993
2994        for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2995                for (j = 0; j < manufacturers[i]->nparts; j++) {
2996                        if (!strcmp(name, manufacturers[i]->parts[j].name)) {
2997                                nor->manufacturer = manufacturers[i];
2998                                return &manufacturers[i]->parts[j];
2999                        }
3000                }
3001        }
3002
3003        return NULL;
3004}
3005
3006static int spi_nor_set_addr_width(struct spi_nor *nor)
3007{
3008        if (nor->addr_width) {
3009                /* already configured from SFDP */
3010        } else if (nor->info->addr_width) {
3011                nor->addr_width = nor->info->addr_width;
3012        } else if (nor->mtd.size > 0x1000000) {
3013                /* enable 4-byte addressing if the device exceeds 16MiB */
3014                nor->addr_width = 4;
3015        } else {
3016                nor->addr_width = 3;
3017        }
3018
3019        if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
3020                dev_dbg(nor->dev, "address width is too large: %u\n",
3021                        nor->addr_width);
3022                return -EINVAL;
3023        }
3024
3025        /* Set 4byte opcodes when possible. */
3026        if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
3027            !(nor->flags & SNOR_F_HAS_4BAIT))
3028                spi_nor_set_4byte_opcodes(nor);
3029
3030        return 0;
3031}
3032
3033static void spi_nor_debugfs_init(struct spi_nor *nor,
3034                                 const struct flash_info *info)
3035{
3036        struct mtd_info *mtd = &nor->mtd;
3037
3038        mtd->dbg.partname = info->name;
3039        mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
3040                                         info->id_len, info->id);
3041}
3042
3043static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3044                                                       const char *name)
3045{
3046        const struct flash_info *info = NULL;
3047
3048        if (name)
3049                info = spi_nor_match_id(nor, name);
3050        /* Try to auto-detect if chip name wasn't specified or not found */
3051        if (!info)
3052                info = spi_nor_read_id(nor);
3053        if (IS_ERR_OR_NULL(info))
3054                return ERR_PTR(-ENOENT);
3055
3056        /*
3057         * If caller has specified name of flash model that can normally be
3058         * detected using JEDEC, let's verify it.
3059         */
3060        if (name && info->id_len) {
3061                const struct flash_info *jinfo;
3062
3063                jinfo = spi_nor_read_id(nor);
3064                if (IS_ERR(jinfo)) {
3065                        return jinfo;
3066                } else if (jinfo != info) {
3067                        /*
3068                         * JEDEC knows better, so overwrite platform ID. We
3069                         * can't trust partitions any longer, but we'll let
3070                         * mtd apply them anyway, since some partitions may be
3071                         * marked read-only, and we don't want to lose that
3072                         * information, even if it's not 100% accurate.
3073                         */
3074                        dev_warn(nor->dev, "found %s, expected %s\n",
3075                                 jinfo->name, info->name);
3076                        info = jinfo;
3077                }
3078        }
3079
3080        return info;
3081}
3082
3083int spi_nor_scan(struct spi_nor *nor, const char *name,
3084                 const struct spi_nor_hwcaps *hwcaps)
3085{
3086        const struct flash_info *info;
3087        struct device *dev = nor->dev;
3088        struct mtd_info *mtd = &nor->mtd;
3089        struct device_node *np = spi_nor_get_flash_node(nor);
3090        int ret;
3091        int i;
3092
3093        ret = spi_nor_check(nor);
3094        if (ret)
3095                return ret;
3096
3097        /* Reset SPI protocol for all commands. */
3098        nor->reg_proto = SNOR_PROTO_1_1_1;
3099        nor->read_proto = SNOR_PROTO_1_1_1;
3100        nor->write_proto = SNOR_PROTO_1_1_1;
3101
3102        /*
3103         * We need the bounce buffer early to read/write registers when going
3104         * through the spi-mem layer (buffers have to be DMA-able).
3105         * For spi-mem drivers, we'll reallocate a new buffer if
3106         * nor->page_size turns out to be greater than PAGE_SIZE (which
3107         * shouldn't happen before long since NOR pages are usually less
3108         * than 1KB) after spi_nor_scan() returns.
3109         */
3110        nor->bouncebuf_size = PAGE_SIZE;
3111        nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3112                                      GFP_KERNEL);
3113        if (!nor->bouncebuf)
3114                return -ENOMEM;
3115
3116        info = spi_nor_get_flash_info(nor, name);
3117        if (IS_ERR(info))
3118                return PTR_ERR(info);
3119
3120        nor->info = info;
3121
3122        spi_nor_debugfs_init(nor, info);
3123
3124        mutex_init(&nor->lock);
3125
3126        /*
3127         * Make sure the XSR_RDY flag is set before calling
3128         * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
3129         * with Atmel spi-nor
3130         */
3131        if (info->flags & SPI_NOR_XSR_RDY)
3132                nor->flags |=  SNOR_F_READY_XSR_RDY;
3133
3134        if (info->flags & SPI_NOR_HAS_LOCK)
3135                nor->flags |= SNOR_F_HAS_LOCK;
3136
3137        mtd->_write = spi_nor_write;
3138
3139        /* Init flash parameters based on flash_info struct and SFDP */
3140        ret = spi_nor_init_params(nor);
3141        if (ret)
3142                return ret;
3143
3144        if (!mtd->name)
3145                mtd->name = dev_name(dev);
3146        mtd->priv = nor;
3147        mtd->type = MTD_NORFLASH;
3148        mtd->writesize = 1;
3149        mtd->flags = MTD_CAP_NORFLASH;
3150        mtd->size = nor->params->size;
3151        mtd->_erase = spi_nor_erase;
3152        mtd->_read = spi_nor_read;
3153        mtd->_resume = spi_nor_resume;
3154
3155        if (nor->params->locking_ops) {
3156                mtd->_lock = spi_nor_lock;
3157                mtd->_unlock = spi_nor_unlock;
3158                mtd->_is_locked = spi_nor_is_locked;
3159        }
3160
3161        if (info->flags & USE_FSR)
3162                nor->flags |= SNOR_F_USE_FSR;
3163        if (info->flags & SPI_NOR_HAS_TB) {
3164                nor->flags |= SNOR_F_HAS_SR_TB;
3165                if (info->flags & SPI_NOR_TB_SR_BIT6)
3166                        nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
3167        }
3168
3169        if (info->flags & NO_CHIP_ERASE)
3170                nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3171        if (info->flags & USE_CLSR)
3172                nor->flags |= SNOR_F_USE_CLSR;
3173
3174        if (info->flags & SPI_NOR_4BIT_BP) {
3175                nor->flags |= SNOR_F_HAS_4BIT_BP;
3176                if (info->flags & SPI_NOR_BP3_SR_BIT6)
3177                        nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
3178        }
3179
3180        if (info->flags & SPI_NOR_NO_ERASE)
3181                mtd->flags |= MTD_NO_ERASE;
3182
3183        mtd->dev.parent = dev;
3184        nor->page_size = nor->params->page_size;
3185        mtd->writebufsize = nor->page_size;
3186
3187        if (of_property_read_bool(np, "broken-flash-reset"))
3188                nor->flags |= SNOR_F_BROKEN_RESET;
3189
3190        /*
3191         * Configure the SPI memory:
3192         * - select op codes for (Fast) Read, Page Program and Sector Erase.
3193         * - set the number of dummy cycles (mode cycles + wait states).
3194         * - set the SPI protocols for register and memory accesses.
3195         */
3196        ret = spi_nor_setup(nor, hwcaps);
3197        if (ret)
3198                return ret;
3199
3200        if (info->flags & SPI_NOR_4B_OPCODES)
3201                nor->flags |= SNOR_F_4B_OPCODES;
3202
3203        ret = spi_nor_set_addr_width(nor);
3204        if (ret)
3205                return ret;
3206
3207        /* Send all the required SPI flash commands to initialize device */
3208        ret = spi_nor_init(nor);
3209        if (ret)
3210                return ret;
3211
3212        dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3213                        (long long)mtd->size >> 10);
3214
3215        dev_dbg(dev,
3216                "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3217                ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3218                mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3219                mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3220
3221        if (mtd->numeraseregions)
3222                for (i = 0; i < mtd->numeraseregions; i++)
3223                        dev_dbg(dev,
3224                                "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3225                                ".erasesize = 0x%.8x (%uKiB), "
3226                                ".numblocks = %d }\n",
3227                                i, (long long)mtd->eraseregions[i].offset,
3228                                mtd->eraseregions[i].erasesize,
3229                                mtd->eraseregions[i].erasesize / 1024,
3230                                mtd->eraseregions[i].numblocks);
3231        return 0;
3232}
3233EXPORT_SYMBOL_GPL(spi_nor_scan);
3234
3235static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3236{
3237        struct spi_mem_dirmap_info info = {
3238                .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
3239                                      SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3240                                      SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
3241                                      SPI_MEM_OP_DATA_IN(0, NULL, 1)),
3242                .offset = 0,
3243                .length = nor->mtd.size,
3244        };
3245        struct spi_mem_op *op = &info.op_tmpl;
3246
3247        /* get transfer protocols. */
3248        op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
3249        op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
3250        op->dummy.buswidth = op->addr.buswidth;
3251        op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3252
3253        /* convert the dummy cycles to the number of bytes */
3254        op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3255
3256        nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3257                                                       &info);
3258        return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3259}
3260
3261static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3262{
3263        struct spi_mem_dirmap_info info = {
3264                .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
3265                                      SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3266                                      SPI_MEM_OP_NO_DUMMY,
3267                                      SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
3268                .offset = 0,
3269                .length = nor->mtd.size,
3270        };
3271        struct spi_mem_op *op = &info.op_tmpl;
3272
3273        /* get transfer protocols. */
3274        op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
3275        op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
3276        op->dummy.buswidth = op->addr.buswidth;
3277        op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3278
3279        if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3280                op->addr.nbytes = 0;
3281
3282        nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3283                                                       &info);
3284        return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3285}
3286
3287static int spi_nor_probe(struct spi_mem *spimem)
3288{
3289        struct spi_device *spi = spimem->spi;
3290        struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3291        struct spi_nor *nor;
3292        /*
3293         * Enable all caps by default. The core will mask them after
3294         * checking what's really supported using spi_mem_supports_op().
3295         */
3296        const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3297        char *flash_name;
3298        int ret;
3299
3300        nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3301        if (!nor)
3302                return -ENOMEM;
3303
3304        nor->spimem = spimem;
3305        nor->dev = &spi->dev;
3306        spi_nor_set_flash_node(nor, spi->dev.of_node);
3307
3308        spi_mem_set_drvdata(spimem, nor);
3309
3310        if (data && data->name)
3311                nor->mtd.name = data->name;
3312
3313        if (!nor->mtd.name)
3314                nor->mtd.name = spi_mem_get_name(spimem);
3315
3316        /*
3317         * For some (historical?) reason many platforms provide two different
3318         * names in flash_platform_data: "name" and "type". Quite often name is
3319         * set to "m25p80" and then "type" provides a real chip name.
3320         * If that's the case, respect "type" and ignore a "name".
3321         */
3322        if (data && data->type)
3323                flash_name = data->type;
3324        else if (!strcmp(spi->modalias, "spi-nor"))
3325                flash_name = NULL; /* auto-detect */
3326        else
3327                flash_name = spi->modalias;
3328
3329        ret = spi_nor_scan(nor, flash_name, &hwcaps);
3330        if (ret)
3331                return ret;
3332
3333        /*
3334         * None of the existing parts have > 512B pages, but let's play safe
3335         * and add this logic so that if anyone ever adds support for such
3336         * a NOR we don't end up with buffer overflows.
3337         */
3338        if (nor->page_size > PAGE_SIZE) {
3339                nor->bouncebuf_size = nor->page_size;
3340                devm_kfree(nor->dev, nor->bouncebuf);
3341                nor->bouncebuf = devm_kmalloc(nor->dev,
3342                                              nor->bouncebuf_size,
3343                                              GFP_KERNEL);
3344                if (!nor->bouncebuf)
3345                        return -ENOMEM;
3346        }
3347
3348        ret = spi_nor_create_read_dirmap(nor);
3349        if (ret)
3350                return ret;
3351
3352        ret = spi_nor_create_write_dirmap(nor);
3353        if (ret)
3354                return ret;
3355
3356        return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3357                                   data ? data->nr_parts : 0);
3358}
3359
3360static int spi_nor_remove(struct spi_mem *spimem)
3361{
3362        struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3363
3364        spi_nor_restore(nor);
3365
3366        /* Clean up MTD stuff. */
3367        return mtd_device_unregister(&nor->mtd);
3368}
3369
3370static void spi_nor_shutdown(struct spi_mem *spimem)
3371{
3372        struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3373
3374        spi_nor_restore(nor);
3375}
3376
3377/*
3378 * Do NOT add to this array without reading the following:
3379 *
3380 * Historically, many flash devices are bound to this driver by their name. But
3381 * since most of these flash are compatible to some extent, and their
3382 * differences can often be differentiated by the JEDEC read-ID command, we
3383 * encourage new users to add support to the spi-nor library, and simply bind
3384 * against a generic string here (e.g., "jedec,spi-nor").
3385 *
3386 * Many flash names are kept here in this list (as well as in spi-nor.c) to
3387 * keep them available as module aliases for existing platforms.
3388 */
3389static const struct spi_device_id spi_nor_dev_ids[] = {
3390        /*
3391         * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3392         * hack around the fact that the SPI core does not provide uevent
3393         * matching for .of_match_table
3394         */
3395        {"spi-nor"},
3396
3397        /*
3398         * Entries not used in DTs that should be safe to drop after replacing
3399         * them with "spi-nor" in platform data.
3400         */
3401        {"s25sl064a"},  {"w25x16"},     {"m25p10"},     {"m25px64"},
3402
3403        /*
3404         * Entries that were used in DTs without "jedec,spi-nor" fallback and
3405         * should be kept for backward compatibility.
3406         */
3407        {"at25df321a"}, {"at25df641"},  {"at26df081a"},
3408        {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3409        {"mx25l25635e"},{"mx66l51235l"},
3410        {"n25q064"},    {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3411        {"s25fl256s1"}, {"s25fl512s"},  {"s25sl12801"}, {"s25fl008k"},
3412        {"s25fl064k"},
3413        {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3414        {"m25p40"},     {"m25p80"},     {"m25p16"},     {"m25p32"},
3415        {"m25p64"},     {"m25p128"},
3416        {"w25x80"},     {"w25x32"},     {"w25q32"},     {"w25q32dw"},
3417        {"w25q80bl"},   {"w25q128"},    {"w25q256"},
3418
3419        /* Flashes that can't be detected using JEDEC */
3420        {"m25p05-nonjedec"},    {"m25p10-nonjedec"},    {"m25p20-nonjedec"},
3421        {"m25p40-nonjedec"},    {"m25p80-nonjedec"},    {"m25p16-nonjedec"},
3422        {"m25p32-nonjedec"},    {"m25p64-nonjedec"},    {"m25p128-nonjedec"},
3423
3424        /* Everspin MRAMs (non-JEDEC) */
3425        { "mr25h128" }, /* 128 Kib, 40 MHz */
3426        { "mr25h256" }, /* 256 Kib, 40 MHz */
3427        { "mr25h10" },  /*   1 Mib, 40 MHz */
3428        { "mr25h40" },  /*   4 Mib, 40 MHz */
3429
3430        { },
3431};
3432MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3433
3434static const struct of_device_id spi_nor_of_table[] = {
3435        /*
3436         * Generic compatibility for SPI NOR that can be identified by the
3437         * JEDEC READ ID opcode (0x9F). Use this, if possible.
3438         */
3439        { .compatible = "jedec,spi-nor" },
3440        { /* sentinel */ },
3441};
3442MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3443
3444/*
3445 * REVISIT: many of these chips have deep power-down modes, which
3446 * should clearly be entered on suspend() to minimize power use.
3447 * And also when they're otherwise idle...
3448 */
3449static struct spi_mem_driver spi_nor_driver = {
3450        .spidrv = {
3451                .driver = {
3452                        .name = "spi-nor",
3453                        .of_match_table = spi_nor_of_table,
3454                },
3455                .id_table = spi_nor_dev_ids,
3456        },
3457        .probe = spi_nor_probe,
3458        .remove = spi_nor_remove,
3459        .shutdown = spi_nor_shutdown,
3460};
3461module_spi_mem_driver(spi_nor_driver);
3462
3463MODULE_LICENSE("GPL v2");
3464MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3465MODULE_AUTHOR("Mike Lavender");
3466MODULE_DESCRIPTION("framework for SPI NOR");
3467