linux/drivers/mtd/spi-nor/spi-nor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
   4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
   5 *
   6 * Copyright (C) 2005, Intec Automation Inc.
   7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
   8 */
   9
  10#include <linux/err.h>
  11#include <linux/errno.h>
  12#include <linux/module.h>
  13#include <linux/device.h>
  14#include <linux/mutex.h>
  15#include <linux/math64.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18#include <linux/sort.h>
  19
  20#include <linux/mtd/mtd.h>
  21#include <linux/of_platform.h>
  22#include <linux/spi/flash.h>
  23#include <linux/mtd/spi-nor.h>
  24
  25/* Define max times to check status register before we give up. */
  26
  27/*
  28 * For everything but full-chip erase; probably could be much smaller, but kept
  29 * around for safety for now
  30 */
  31#define DEFAULT_READY_WAIT_JIFFIES              (40UL * HZ)
  32
  33/*
  34 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  35 * for larger flash
  36 */
  37#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES       (40UL * HZ)
  38
  39#define SPI_NOR_MAX_ID_LEN      6
  40#define SPI_NOR_MAX_ADDR_WIDTH  4
  41
  42struct spi_nor_read_command {
  43        u8                      num_mode_clocks;
  44        u8                      num_wait_states;
  45        u8                      opcode;
  46        enum spi_nor_protocol   proto;
  47};
  48
  49struct spi_nor_pp_command {
  50        u8                      opcode;
  51        enum spi_nor_protocol   proto;
  52};
  53
  54enum spi_nor_read_command_index {
  55        SNOR_CMD_READ,
  56        SNOR_CMD_READ_FAST,
  57        SNOR_CMD_READ_1_1_1_DTR,
  58
  59        /* Dual SPI */
  60        SNOR_CMD_READ_1_1_2,
  61        SNOR_CMD_READ_1_2_2,
  62        SNOR_CMD_READ_2_2_2,
  63        SNOR_CMD_READ_1_2_2_DTR,
  64
  65        /* Quad SPI */
  66        SNOR_CMD_READ_1_1_4,
  67        SNOR_CMD_READ_1_4_4,
  68        SNOR_CMD_READ_4_4_4,
  69        SNOR_CMD_READ_1_4_4_DTR,
  70
  71        /* Octal SPI */
  72        SNOR_CMD_READ_1_1_8,
  73        SNOR_CMD_READ_1_8_8,
  74        SNOR_CMD_READ_8_8_8,
  75        SNOR_CMD_READ_1_8_8_DTR,
  76
  77        SNOR_CMD_READ_MAX
  78};
  79
  80enum spi_nor_pp_command_index {
  81        SNOR_CMD_PP,
  82
  83        /* Quad SPI */
  84        SNOR_CMD_PP_1_1_4,
  85        SNOR_CMD_PP_1_4_4,
  86        SNOR_CMD_PP_4_4_4,
  87
  88        /* Octal SPI */
  89        SNOR_CMD_PP_1_1_8,
  90        SNOR_CMD_PP_1_8_8,
  91        SNOR_CMD_PP_8_8_8,
  92
  93        SNOR_CMD_PP_MAX
  94};
  95
  96struct spi_nor_flash_parameter {
  97        u64                             size;
  98        u32                             page_size;
  99
 100        struct spi_nor_hwcaps           hwcaps;
 101        struct spi_nor_read_command     reads[SNOR_CMD_READ_MAX];
 102        struct spi_nor_pp_command       page_programs[SNOR_CMD_PP_MAX];
 103
 104        int (*quad_enable)(struct spi_nor *nor);
 105};
 106
 107struct sfdp_parameter_header {
 108        u8              id_lsb;
 109        u8              minor;
 110        u8              major;
 111        u8              length; /* in double words */
 112        u8              parameter_table_pointer[3]; /* byte address */
 113        u8              id_msb;
 114};
 115
 116#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
 117#define SFDP_PARAM_HEADER_PTP(p) \
 118        (((p)->parameter_table_pointer[2] << 16) | \
 119         ((p)->parameter_table_pointer[1] <<  8) | \
 120         ((p)->parameter_table_pointer[0] <<  0))
 121
 122#define SFDP_BFPT_ID            0xff00  /* Basic Flash Parameter Table */
 123#define SFDP_SECTOR_MAP_ID      0xff81  /* Sector Map Table */
 124#define SFDP_4BAIT_ID           0xff84  /* 4-byte Address Instruction Table */
 125
 126#define SFDP_SIGNATURE          0x50444653U
 127#define SFDP_JESD216_MAJOR      1
 128#define SFDP_JESD216_MINOR      0
 129#define SFDP_JESD216A_MINOR     5
 130#define SFDP_JESD216B_MINOR     6
 131
 132struct sfdp_header {
 133        u32             signature; /* Ox50444653U <=> "SFDP" */
 134        u8              minor;
 135        u8              major;
 136        u8              nph; /* 0-base number of parameter headers */
 137        u8              unused;
 138
 139        /* Basic Flash Parameter Table. */
 140        struct sfdp_parameter_header    bfpt_header;
 141};
 142
 143/* Basic Flash Parameter Table */
 144
 145/*
 146 * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
 147 * They are indexed from 1 but C arrays are indexed from 0.
 148 */
 149#define BFPT_DWORD(i)           ((i) - 1)
 150#define BFPT_DWORD_MAX          16
 151
 152/* The first version of JESB216 defined only 9 DWORDs. */
 153#define BFPT_DWORD_MAX_JESD216                  9
 154
 155/* 1st DWORD. */
 156#define BFPT_DWORD1_FAST_READ_1_1_2             BIT(16)
 157#define BFPT_DWORD1_ADDRESS_BYTES_MASK          GENMASK(18, 17)
 158#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY        (0x0UL << 17)
 159#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4        (0x1UL << 17)
 160#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY        (0x2UL << 17)
 161#define BFPT_DWORD1_DTR                         BIT(19)
 162#define BFPT_DWORD1_FAST_READ_1_2_2             BIT(20)
 163#define BFPT_DWORD1_FAST_READ_1_4_4             BIT(21)
 164#define BFPT_DWORD1_FAST_READ_1_1_4             BIT(22)
 165
 166/* 5th DWORD. */
 167#define BFPT_DWORD5_FAST_READ_2_2_2             BIT(0)
 168#define BFPT_DWORD5_FAST_READ_4_4_4             BIT(4)
 169
 170/* 11th DWORD. */
 171#define BFPT_DWORD11_PAGE_SIZE_SHIFT            4
 172#define BFPT_DWORD11_PAGE_SIZE_MASK             GENMASK(7, 4)
 173
 174/* 15th DWORD. */
 175
 176/*
 177 * (from JESD216 rev B)
 178 * Quad Enable Requirements (QER):
 179 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
 180 *         reads based on instruction. DQ3/HOLD# functions are hold during
 181 *         instruction phase.
 182 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
 183 *         two data bytes where bit 1 of the second byte is one.
 184 *         [...]
 185 *         Writing only one byte to the status register has the side-effect of
 186 *         clearing status register 2, including the QE bit. The 100b code is
 187 *         used if writing one byte to the status register does not modify
 188 *         status register 2.
 189 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
 190 *         one data byte where bit 6 is one.
 191 *         [...]
 192 * - 011b: QE is bit 7 of status register 2. It is set via Write status
 193 *         register 2 instruction 3Eh with one data byte where bit 7 is one.
 194 *         [...]
 195 *         The status register 2 is read using instruction 3Fh.
 196 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
 197 *         two data bytes where bit 1 of the second byte is one.
 198 *         [...]
 199 *         In contrast to the 001b code, writing one byte to the status
 200 *         register does not modify status register 2.
 201 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
 202 *         Read Status instruction 05h. Status register2 is read using
 203 *         instruction 35h. QE is set via Write Status instruction 01h with
 204 *         two data bytes where bit 1 of the second byte is one.
 205 *         [...]
 206 */
 207#define BFPT_DWORD15_QER_MASK                   GENMASK(22, 20)
 208#define BFPT_DWORD15_QER_NONE                   (0x0UL << 20) /* Micron */
 209#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY         (0x1UL << 20)
 210#define BFPT_DWORD15_QER_SR1_BIT6               (0x2UL << 20) /* Macronix */
 211#define BFPT_DWORD15_QER_SR2_BIT7               (0x3UL << 20)
 212#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD         (0x4UL << 20)
 213#define BFPT_DWORD15_QER_SR2_BIT1               (0x5UL << 20) /* Spansion */
 214
 215struct sfdp_bfpt {
 216        u32     dwords[BFPT_DWORD_MAX];
 217};
 218
 219/**
 220 * struct spi_nor_fixups - SPI NOR fixup hooks
 221 * @post_bfpt: called after the BFPT table has been parsed
 222 *
 223 * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
 224 * table is broken or not available.
 225 */
 226struct spi_nor_fixups {
 227        int (*post_bfpt)(struct spi_nor *nor,
 228                         const struct sfdp_parameter_header *bfpt_header,
 229                         const struct sfdp_bfpt *bfpt,
 230                         struct spi_nor_flash_parameter *params);
 231};
 232
 233struct flash_info {
 234        char            *name;
 235
 236        /*
 237         * This array stores the ID bytes.
 238         * The first three bytes are the JEDIC ID.
 239         * JEDEC ID zero means "no ID" (mostly older chips).
 240         */
 241        u8              id[SPI_NOR_MAX_ID_LEN];
 242        u8              id_len;
 243
 244        /* The size listed here is what works with SPINOR_OP_SE, which isn't
 245         * necessarily called a "sector" by the vendor.
 246         */
 247        unsigned        sector_size;
 248        u16             n_sectors;
 249
 250        u16             page_size;
 251        u16             addr_width;
 252
 253        u16             flags;
 254#define SECT_4K                 BIT(0)  /* SPINOR_OP_BE_4K works uniformly */
 255#define SPI_NOR_NO_ERASE        BIT(1)  /* No erase command needed */
 256#define SST_WRITE               BIT(2)  /* use SST byte programming */
 257#define SPI_NOR_NO_FR           BIT(3)  /* Can't do fastread */
 258#define SECT_4K_PMC             BIT(4)  /* SPINOR_OP_BE_4K_PMC works uniformly */
 259#define SPI_NOR_DUAL_READ       BIT(5)  /* Flash supports Dual Read */
 260#define SPI_NOR_QUAD_READ       BIT(6)  /* Flash supports Quad Read */
 261#define USE_FSR                 BIT(7)  /* use flag status register */
 262#define SPI_NOR_HAS_LOCK        BIT(8)  /* Flash supports lock/unlock via SR */
 263#define SPI_NOR_HAS_TB          BIT(9)  /*
 264                                         * Flash SR has Top/Bottom (TB) protect
 265                                         * bit. Must be used with
 266                                         * SPI_NOR_HAS_LOCK.
 267                                         */
 268#define SPI_S3AN                BIT(10) /*
 269                                         * Xilinx Spartan 3AN In-System Flash
 270                                         * (MFR cannot be used for probing
 271                                         * because it has the same value as
 272                                         * ATMEL flashes)
 273                                         */
 274#define SPI_NOR_4B_OPCODES      BIT(11) /*
 275                                         * Use dedicated 4byte address op codes
 276                                         * to support memory size above 128Mib.
 277                                         */
 278#define NO_CHIP_ERASE           BIT(12) /* Chip does not support chip erase */
 279#define SPI_NOR_SKIP_SFDP       BIT(13) /* Skip parsing of SFDP tables */
 280#define USE_CLSR                BIT(14) /* use CLSR command */
 281#define SPI_NOR_OCTAL_READ      BIT(15) /* Flash supports Octal Read */
 282
 283        /* Part specific fixup hooks. */
 284        const struct spi_nor_fixups *fixups;
 285
 286        int     (*quad_enable)(struct spi_nor *nor);
 287};
 288
 289#define JEDEC_MFR(info) ((info)->id[0])
 290
 291/*
 292 * Read the status register, returning its value in the location
 293 * Return the status register value.
 294 * Returns negative if error occurred.
 295 */
 296static int read_sr(struct spi_nor *nor)
 297{
 298        int ret;
 299        u8 val;
 300
 301        ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
 302        if (ret < 0) {
 303                pr_err("error %d reading SR\n", (int) ret);
 304                return ret;
 305        }
 306
 307        return val;
 308}
 309
 310/*
 311 * Read the flag status register, returning its value in the location
 312 * Return the status register value.
 313 * Returns negative if error occurred.
 314 */
 315static int read_fsr(struct spi_nor *nor)
 316{
 317        int ret;
 318        u8 val;
 319
 320        ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
 321        if (ret < 0) {
 322                pr_err("error %d reading FSR\n", ret);
 323                return ret;
 324        }
 325
 326        return val;
 327}
 328
 329/*
 330 * Read configuration register, returning its value in the
 331 * location. Return the configuration register value.
 332 * Returns negative if error occurred.
 333 */
 334static int read_cr(struct spi_nor *nor)
 335{
 336        int ret;
 337        u8 val;
 338
 339        ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
 340        if (ret < 0) {
 341                dev_err(nor->dev, "error %d reading CR\n", ret);
 342                return ret;
 343        }
 344
 345        return val;
 346}
 347
 348/*
 349 * Write status register 1 byte
 350 * Returns negative if error occurred.
 351 */
 352static int write_sr(struct spi_nor *nor, u8 val)
 353{
 354        nor->cmd_buf[0] = val;
 355        return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
 356}
 357
 358/*
 359 * Set write enable latch with Write Enable command.
 360 * Returns negative if error occurred.
 361 */
 362static int write_enable(struct spi_nor *nor)
 363{
 364        return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
 365}
 366
 367/*
 368 * Send write disable instruction to the chip.
 369 */
 370static int write_disable(struct spi_nor *nor)
 371{
 372        return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
 373}
 374
 375static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
 376{
 377        return mtd->priv;
 378}
 379
 380
 381static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
 382{
 383        size_t i;
 384
 385        for (i = 0; i < size; i++)
 386                if (table[i][0] == opcode)
 387                        return table[i][1];
 388
 389        /* No conversion found, keep input op code. */
 390        return opcode;
 391}
 392
 393static u8 spi_nor_convert_3to4_read(u8 opcode)
 394{
 395        static const u8 spi_nor_3to4_read[][2] = {
 396                { SPINOR_OP_READ,       SPINOR_OP_READ_4B },
 397                { SPINOR_OP_READ_FAST,  SPINOR_OP_READ_FAST_4B },
 398                { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
 399                { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
 400                { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
 401                { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
 402                { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
 403                { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
 404
 405                { SPINOR_OP_READ_1_1_1_DTR,     SPINOR_OP_READ_1_1_1_DTR_4B },
 406                { SPINOR_OP_READ_1_2_2_DTR,     SPINOR_OP_READ_1_2_2_DTR_4B },
 407                { SPINOR_OP_READ_1_4_4_DTR,     SPINOR_OP_READ_1_4_4_DTR_4B },
 408        };
 409
 410        return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
 411                                      ARRAY_SIZE(spi_nor_3to4_read));
 412}
 413
 414static u8 spi_nor_convert_3to4_program(u8 opcode)
 415{
 416        static const u8 spi_nor_3to4_program[][2] = {
 417                { SPINOR_OP_PP,         SPINOR_OP_PP_4B },
 418                { SPINOR_OP_PP_1_1_4,   SPINOR_OP_PP_1_1_4_4B },
 419                { SPINOR_OP_PP_1_4_4,   SPINOR_OP_PP_1_4_4_4B },
 420                { SPINOR_OP_PP_1_1_8,   SPINOR_OP_PP_1_1_8_4B },
 421                { SPINOR_OP_PP_1_8_8,   SPINOR_OP_PP_1_8_8_4B },
 422        };
 423
 424        return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
 425                                      ARRAY_SIZE(spi_nor_3to4_program));
 426}
 427
 428static u8 spi_nor_convert_3to4_erase(u8 opcode)
 429{
 430        static const u8 spi_nor_3to4_erase[][2] = {
 431                { SPINOR_OP_BE_4K,      SPINOR_OP_BE_4K_4B },
 432                { SPINOR_OP_BE_32K,     SPINOR_OP_BE_32K_4B },
 433                { SPINOR_OP_SE,         SPINOR_OP_SE_4B },
 434        };
 435
 436        return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
 437                                      ARRAY_SIZE(spi_nor_3to4_erase));
 438}
 439
 440static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
 441{
 442        /* Do some manufacturer fixups first */
 443        switch (JEDEC_MFR(nor->info)) {
 444        case SNOR_MFR_SPANSION:
 445                /* No small sector erase for 4-byte command set */
 446                nor->erase_opcode = SPINOR_OP_SE;
 447                nor->mtd.erasesize = nor->info->sector_size;
 448                break;
 449
 450        default:
 451                break;
 452        }
 453
 454        nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
 455        nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
 456        nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
 457
 458        if (!spi_nor_has_uniform_erase(nor)) {
 459                struct spi_nor_erase_map *map = &nor->erase_map;
 460                struct spi_nor_erase_type *erase;
 461                int i;
 462
 463                for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
 464                        erase = &map->erase_type[i];
 465                        erase->opcode =
 466                                spi_nor_convert_3to4_erase(erase->opcode);
 467                }
 468        }
 469}
 470
 471/* Enable/disable 4-byte addressing mode. */
 472static int set_4byte(struct spi_nor *nor, bool enable)
 473{
 474        int status;
 475        bool need_wren = false;
 476        u8 cmd;
 477
 478        switch (JEDEC_MFR(nor->info)) {
 479        case SNOR_MFR_ST:
 480        case SNOR_MFR_MICRON:
 481                /* Some Micron need WREN command; all will accept it */
 482                need_wren = true;
 483                /* fall through */
 484        case SNOR_MFR_MACRONIX:
 485        case SNOR_MFR_WINBOND:
 486                if (need_wren)
 487                        write_enable(nor);
 488
 489                cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
 490                status = nor->write_reg(nor, cmd, NULL, 0);
 491                if (need_wren)
 492                        write_disable(nor);
 493
 494                if (!status && !enable &&
 495                    JEDEC_MFR(nor->info) == SNOR_MFR_WINBOND) {
 496                        /*
 497                         * On Winbond W25Q256FV, leaving 4byte mode causes
 498                         * the Extended Address Register to be set to 1, so all
 499                         * 3-byte-address reads come from the second 16M.
 500                         * We must clear the register to enable normal behavior.
 501                         */
 502                        write_enable(nor);
 503                        nor->cmd_buf[0] = 0;
 504                        nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
 505                        write_disable(nor);
 506                }
 507
 508                return status;
 509        default:
 510                /* Spansion style */
 511                nor->cmd_buf[0] = enable << 7;
 512                return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
 513        }
 514}
 515
 516static int s3an_sr_ready(struct spi_nor *nor)
 517{
 518        int ret;
 519        u8 val;
 520
 521        ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
 522        if (ret < 0) {
 523                dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
 524                return ret;
 525        }
 526
 527        return !!(val & XSR_RDY);
 528}
 529
 530static int spi_nor_sr_ready(struct spi_nor *nor)
 531{
 532        int sr = read_sr(nor);
 533        if (sr < 0)
 534                return sr;
 535
 536        if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
 537                if (sr & SR_E_ERR)
 538                        dev_err(nor->dev, "Erase Error occurred\n");
 539                else
 540                        dev_err(nor->dev, "Programming Error occurred\n");
 541
 542                nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
 543                return -EIO;
 544        }
 545
 546        return !(sr & SR_WIP);
 547}
 548
 549static int spi_nor_fsr_ready(struct spi_nor *nor)
 550{
 551        int fsr = read_fsr(nor);
 552        if (fsr < 0)
 553                return fsr;
 554
 555        if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
 556                if (fsr & FSR_E_ERR)
 557                        dev_err(nor->dev, "Erase operation failed.\n");
 558                else
 559                        dev_err(nor->dev, "Program operation failed.\n");
 560
 561                if (fsr & FSR_PT_ERR)
 562                        dev_err(nor->dev,
 563                        "Attempted to modify a protected sector.\n");
 564
 565                nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
 566                return -EIO;
 567        }
 568
 569        return fsr & FSR_READY;
 570}
 571
 572static int spi_nor_ready(struct spi_nor *nor)
 573{
 574        int sr, fsr;
 575
 576        if (nor->flags & SNOR_F_READY_XSR_RDY)
 577                sr = s3an_sr_ready(nor);
 578        else
 579                sr = spi_nor_sr_ready(nor);
 580        if (sr < 0)
 581                return sr;
 582        fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
 583        if (fsr < 0)
 584                return fsr;
 585        return sr && fsr;
 586}
 587
 588/*
 589 * Service routine to read status register until ready, or timeout occurs.
 590 * Returns non-zero if error.
 591 */
 592static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
 593                                                unsigned long timeout_jiffies)
 594{
 595        unsigned long deadline;
 596        int timeout = 0, ret;
 597
 598        deadline = jiffies + timeout_jiffies;
 599
 600        while (!timeout) {
 601                if (time_after_eq(jiffies, deadline))
 602                        timeout = 1;
 603
 604                ret = spi_nor_ready(nor);
 605                if (ret < 0)
 606                        return ret;
 607                if (ret)
 608                        return 0;
 609
 610                cond_resched();
 611        }
 612
 613        dev_err(nor->dev, "flash operation timed out\n");
 614
 615        return -ETIMEDOUT;
 616}
 617
 618static int spi_nor_wait_till_ready(struct spi_nor *nor)
 619{
 620        return spi_nor_wait_till_ready_with_timeout(nor,
 621                                                    DEFAULT_READY_WAIT_JIFFIES);
 622}
 623
 624/*
 625 * Erase the whole flash memory
 626 *
 627 * Returns 0 if successful, non-zero otherwise.
 628 */
 629static int erase_chip(struct spi_nor *nor)
 630{
 631        dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
 632
 633        return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
 634}
 635
 636static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 637{
 638        int ret = 0;
 639
 640        mutex_lock(&nor->lock);
 641
 642        if (nor->prepare) {
 643                ret = nor->prepare(nor, ops);
 644                if (ret) {
 645                        dev_err(nor->dev, "failed in the preparation.\n");
 646                        mutex_unlock(&nor->lock);
 647                        return ret;
 648                }
 649        }
 650        return ret;
 651}
 652
 653static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
 654{
 655        if (nor->unprepare)
 656                nor->unprepare(nor, ops);
 657        mutex_unlock(&nor->lock);
 658}
 659
 660/*
 661 * This code converts an address to the Default Address Mode, that has non
 662 * power of two page sizes. We must support this mode because it is the default
 663 * mode supported by Xilinx tools, it can access the whole flash area and
 664 * changing over to the Power-of-two mode is irreversible and corrupts the
 665 * original data.
 666 * Addr can safely be unsigned int, the biggest S3AN device is smaller than
 667 * 4 MiB.
 668 */
 669static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
 670{
 671        unsigned int offset;
 672        unsigned int page;
 673
 674        offset = addr % nor->page_size;
 675        page = addr / nor->page_size;
 676        page <<= (nor->page_size > 512) ? 10 : 9;
 677
 678        return page | offset;
 679}
 680
 681/*
 682 * Initiate the erasure of a single sector
 683 */
 684static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
 685{
 686        u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
 687        int i;
 688
 689        if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
 690                addr = spi_nor_s3an_addr_convert(nor, addr);
 691
 692        if (nor->erase)
 693                return nor->erase(nor, addr);
 694
 695        /*
 696         * Default implementation, if driver doesn't have a specialized HW
 697         * control
 698         */
 699        for (i = nor->addr_width - 1; i >= 0; i--) {
 700                buf[i] = addr & 0xff;
 701                addr >>= 8;
 702        }
 703
 704        return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
 705}
 706
 707/**
 708 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
 709 * @erase:      pointer to a structure that describes a SPI NOR erase type
 710 * @dividend:   dividend value
 711 * @remainder:  pointer to u32 remainder (will be updated)
 712 *
 713 * Return: the result of the division
 714 */
 715static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
 716                                     u64 dividend, u32 *remainder)
 717{
 718        /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
 719        *remainder = (u32)dividend & erase->size_mask;
 720        return dividend >> erase->size_shift;
 721}
 722
 723/**
 724 * spi_nor_find_best_erase_type() - find the best erase type for the given
 725 *                                  offset in the serial flash memory and the
 726 *                                  number of bytes to erase. The region in
 727 *                                  which the address fits is expected to be
 728 *                                  provided.
 729 * @map:        the erase map of the SPI NOR
 730 * @region:     pointer to a structure that describes a SPI NOR erase region
 731 * @addr:       offset in the serial flash memory
 732 * @len:        number of bytes to erase
 733 *
 734 * Return: a pointer to the best fitted erase type, NULL otherwise.
 735 */
 736static const struct spi_nor_erase_type *
 737spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
 738                             const struct spi_nor_erase_region *region,
 739                             u64 addr, u32 len)
 740{
 741        const struct spi_nor_erase_type *erase;
 742        u32 rem;
 743        int i;
 744        u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
 745
 746        /*
 747         * Erase types are ordered by size, with the smallest erase type at
 748         * index 0.
 749         */
 750        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
 751                /* Does the erase region support the tested erase type? */
 752                if (!(erase_mask & BIT(i)))
 753                        continue;
 754
 755                erase = &map->erase_type[i];
 756
 757                /* Don't erase more than what the user has asked for. */
 758                if (erase->size > len)
 759                        continue;
 760
 761                /* Alignment is not mandatory for overlaid regions */
 762                if (region->offset & SNOR_OVERLAID_REGION)
 763                        return erase;
 764
 765                spi_nor_div_by_erase_size(erase, addr, &rem);
 766                if (rem)
 767                        continue;
 768                else
 769                        return erase;
 770        }
 771
 772        return NULL;
 773}
 774
 775/**
 776 * spi_nor_region_next() - get the next spi nor region
 777 * @region:     pointer to a structure that describes a SPI NOR erase region
 778 *
 779 * Return: the next spi nor region or NULL if last region.
 780 */
 781static struct spi_nor_erase_region *
 782spi_nor_region_next(struct spi_nor_erase_region *region)
 783{
 784        if (spi_nor_region_is_last(region))
 785                return NULL;
 786        region++;
 787        return region;
 788}
 789
 790/**
 791 * spi_nor_find_erase_region() - find the region of the serial flash memory in
 792 *                               which the offset fits
 793 * @map:        the erase map of the SPI NOR
 794 * @addr:       offset in the serial flash memory
 795 *
 796 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
 797 *         otherwise.
 798 */
 799static struct spi_nor_erase_region *
 800spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
 801{
 802        struct spi_nor_erase_region *region = map->regions;
 803        u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
 804        u64 region_end = region_start + region->size;
 805
 806        while (addr < region_start || addr >= region_end) {
 807                region = spi_nor_region_next(region);
 808                if (!region)
 809                        return ERR_PTR(-EINVAL);
 810
 811                region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
 812                region_end = region_start + region->size;
 813        }
 814
 815        return region;
 816}
 817
 818/**
 819 * spi_nor_init_erase_cmd() - initialize an erase command
 820 * @region:     pointer to a structure that describes a SPI NOR erase region
 821 * @erase:      pointer to a structure that describes a SPI NOR erase type
 822 *
 823 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
 824 *         otherwise.
 825 */
 826static struct spi_nor_erase_command *
 827spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
 828                       const struct spi_nor_erase_type *erase)
 829{
 830        struct spi_nor_erase_command *cmd;
 831
 832        cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
 833        if (!cmd)
 834                return ERR_PTR(-ENOMEM);
 835
 836        INIT_LIST_HEAD(&cmd->list);
 837        cmd->opcode = erase->opcode;
 838        cmd->count = 1;
 839
 840        if (region->offset & SNOR_OVERLAID_REGION)
 841                cmd->size = region->size;
 842        else
 843                cmd->size = erase->size;
 844
 845        return cmd;
 846}
 847
 848/**
 849 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
 850 * @erase_list: list of erase commands
 851 */
 852static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
 853{
 854        struct spi_nor_erase_command *cmd, *next;
 855
 856        list_for_each_entry_safe(cmd, next, erase_list, list) {
 857                list_del(&cmd->list);
 858                kfree(cmd);
 859        }
 860}
 861
 862/**
 863 * spi_nor_init_erase_cmd_list() - initialize erase command list
 864 * @nor:        pointer to a 'struct spi_nor'
 865 * @erase_list: list of erase commands to be executed once we validate that the
 866 *              erase can be performed
 867 * @addr:       offset in the serial flash memory
 868 * @len:        number of bytes to erase
 869 *
 870 * Builds the list of best fitted erase commands and verifies if the erase can
 871 * be performed.
 872 *
 873 * Return: 0 on success, -errno otherwise.
 874 */
 875static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
 876                                       struct list_head *erase_list,
 877                                       u64 addr, u32 len)
 878{
 879        const struct spi_nor_erase_map *map = &nor->erase_map;
 880        const struct spi_nor_erase_type *erase, *prev_erase = NULL;
 881        struct spi_nor_erase_region *region;
 882        struct spi_nor_erase_command *cmd = NULL;
 883        u64 region_end;
 884        int ret = -EINVAL;
 885
 886        region = spi_nor_find_erase_region(map, addr);
 887        if (IS_ERR(region))
 888                return PTR_ERR(region);
 889
 890        region_end = spi_nor_region_end(region);
 891
 892        while (len) {
 893                erase = spi_nor_find_best_erase_type(map, region, addr, len);
 894                if (!erase)
 895                        goto destroy_erase_cmd_list;
 896
 897                if (prev_erase != erase ||
 898                    region->offset & SNOR_OVERLAID_REGION) {
 899                        cmd = spi_nor_init_erase_cmd(region, erase);
 900                        if (IS_ERR(cmd)) {
 901                                ret = PTR_ERR(cmd);
 902                                goto destroy_erase_cmd_list;
 903                        }
 904
 905                        list_add_tail(&cmd->list, erase_list);
 906                } else {
 907                        cmd->count++;
 908                }
 909
 910                addr += cmd->size;
 911                len -= cmd->size;
 912
 913                if (len && addr >= region_end) {
 914                        region = spi_nor_region_next(region);
 915                        if (!region)
 916                                goto destroy_erase_cmd_list;
 917                        region_end = spi_nor_region_end(region);
 918                }
 919
 920                prev_erase = erase;
 921        }
 922
 923        return 0;
 924
 925destroy_erase_cmd_list:
 926        spi_nor_destroy_erase_cmd_list(erase_list);
 927        return ret;
 928}
 929
 930/**
 931 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
 932 * @nor:        pointer to a 'struct spi_nor'
 933 * @addr:       offset in the serial flash memory
 934 * @len:        number of bytes to erase
 935 *
 936 * Build a list of best fitted erase commands and execute it once we validate
 937 * that the erase can be performed.
 938 *
 939 * Return: 0 on success, -errno otherwise.
 940 */
 941static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
 942{
 943        LIST_HEAD(erase_list);
 944        struct spi_nor_erase_command *cmd, *next;
 945        int ret;
 946
 947        ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
 948        if (ret)
 949                return ret;
 950
 951        list_for_each_entry_safe(cmd, next, &erase_list, list) {
 952                nor->erase_opcode = cmd->opcode;
 953                while (cmd->count) {
 954                        write_enable(nor);
 955
 956                        ret = spi_nor_erase_sector(nor, addr);
 957                        if (ret)
 958                                goto destroy_erase_cmd_list;
 959
 960                        addr += cmd->size;
 961                        cmd->count--;
 962
 963                        ret = spi_nor_wait_till_ready(nor);
 964                        if (ret)
 965                                goto destroy_erase_cmd_list;
 966                }
 967                list_del(&cmd->list);
 968                kfree(cmd);
 969        }
 970
 971        return 0;
 972
 973destroy_erase_cmd_list:
 974        spi_nor_destroy_erase_cmd_list(&erase_list);
 975        return ret;
 976}
 977
 978/*
 979 * Erase an address range on the nor chip.  The address range may extend
 980 * one or more erase sectors.  Return an error is there is a problem erasing.
 981 */
 982static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
 983{
 984        struct spi_nor *nor = mtd_to_spi_nor(mtd);
 985        u32 addr, len;
 986        uint32_t rem;
 987        int ret;
 988
 989        dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
 990                        (long long)instr->len);
 991
 992        if (spi_nor_has_uniform_erase(nor)) {
 993                div_u64_rem(instr->len, mtd->erasesize, &rem);
 994                if (rem)
 995                        return -EINVAL;
 996        }
 997
 998        addr = instr->addr;
 999        len = instr->len;
1000
1001        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
1002        if (ret)
1003                return ret;
1004
1005        /* whole-chip erase? */
1006        if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1007                unsigned long timeout;
1008
1009                write_enable(nor);
1010
1011                if (erase_chip(nor)) {
1012                        ret = -EIO;
1013                        goto erase_err;
1014                }
1015
1016                /*
1017                 * Scale the timeout linearly with the size of the flash, with
1018                 * a minimum calibrated to an old 2MB flash. We could try to
1019                 * pull these from CFI/SFDP, but these values should be good
1020                 * enough for now.
1021                 */
1022                timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1023                              CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1024                              (unsigned long)(mtd->size / SZ_2M));
1025                ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1026                if (ret)
1027                        goto erase_err;
1028
1029        /* REVISIT in some cases we could speed up erasing large regions
1030         * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1031         * to use "small sector erase", but that's not always optimal.
1032         */
1033
1034        /* "sector"-at-a-time erase */
1035        } else if (spi_nor_has_uniform_erase(nor)) {
1036                while (len) {
1037                        write_enable(nor);
1038
1039                        ret = spi_nor_erase_sector(nor, addr);
1040                        if (ret)
1041                                goto erase_err;
1042
1043                        addr += mtd->erasesize;
1044                        len -= mtd->erasesize;
1045
1046                        ret = spi_nor_wait_till_ready(nor);
1047                        if (ret)
1048                                goto erase_err;
1049                }
1050
1051        /* erase multiple sectors */
1052        } else {
1053                ret = spi_nor_erase_multi_sectors(nor, addr, len);
1054                if (ret)
1055                        goto erase_err;
1056        }
1057
1058        write_disable(nor);
1059
1060erase_err:
1061        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1062
1063        return ret;
1064}
1065
1066/* Write status register and ensure bits in mask match written values */
1067static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1068{
1069        int ret;
1070
1071        write_enable(nor);
1072        ret = write_sr(nor, status_new);
1073        if (ret)
1074                return ret;
1075
1076        ret = spi_nor_wait_till_ready(nor);
1077        if (ret)
1078                return ret;
1079
1080        ret = read_sr(nor);
1081        if (ret < 0)
1082                return ret;
1083
1084        return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1085}
1086
1087static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1088                                 uint64_t *len)
1089{
1090        struct mtd_info *mtd = &nor->mtd;
1091        u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1092        int shift = ffs(mask) - 1;
1093        int pow;
1094
1095        if (!(sr & mask)) {
1096                /* No protection */
1097                *ofs = 0;
1098                *len = 0;
1099        } else {
1100                pow = ((sr & mask) ^ mask) >> shift;
1101                *len = mtd->size >> pow;
1102                if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1103                        *ofs = 0;
1104                else
1105                        *ofs = mtd->size - *len;
1106        }
1107}
1108
1109/*
1110 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1111 * @locked is false); 0 otherwise
1112 */
1113static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1114                                    u8 sr, bool locked)
1115{
1116        loff_t lock_offs;
1117        uint64_t lock_len;
1118
1119        if (!len)
1120                return 1;
1121
1122        stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1123
1124        if (locked)
1125                /* Requested range is a sub-range of locked range */
1126                return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1127        else
1128                /* Requested range does not overlap with locked range */
1129                return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1130}
1131
1132static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1133                            u8 sr)
1134{
1135        return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1136}
1137
1138static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1139                              u8 sr)
1140{
1141        return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1142}
1143
1144/*
1145 * Lock a region of the flash. Compatible with ST Micro and similar flash.
1146 * Supports the block protection bits BP{0,1,2} in the status register
1147 * (SR). Does not support these features found in newer SR bitfields:
1148 *   - SEC: sector/block protect - only handle SEC=0 (block protect)
1149 *   - CMP: complement protect - only support CMP=0 (range is not complemented)
1150 *
1151 * Support for the following is provided conditionally for some flash:
1152 *   - TB: top/bottom protect
1153 *
1154 * Sample table portion for 8MB flash (Winbond w25q64fw):
1155 *
1156 *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
1157 *  --------------------------------------------------------------------------
1158 *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
1159 *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
1160 *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
1161 *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
1162 *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
1163 *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
1164 *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
1165 *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
1166 *  ------|-------|-------|-------|-------|---------------|-------------------
1167 *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
1168 *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
1169 *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
1170 *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
1171 *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
1172 *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
1173 *
1174 * Returns negative on errors, 0 on success.
1175 */
1176static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1177{
1178        struct mtd_info *mtd = &nor->mtd;
1179        int status_old, status_new;
1180        u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1181        u8 shift = ffs(mask) - 1, pow, val;
1182        loff_t lock_len;
1183        bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1184        bool use_top;
1185
1186        status_old = read_sr(nor);
1187        if (status_old < 0)
1188                return status_old;
1189
1190        /* If nothing in our range is unlocked, we don't need to do anything */
1191        if (stm_is_locked_sr(nor, ofs, len, status_old))
1192                return 0;
1193
1194        /* If anything below us is unlocked, we can't use 'bottom' protection */
1195        if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1196                can_be_bottom = false;
1197
1198        /* If anything above us is unlocked, we can't use 'top' protection */
1199        if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1200                                status_old))
1201                can_be_top = false;
1202
1203        if (!can_be_bottom && !can_be_top)
1204                return -EINVAL;
1205
1206        /* Prefer top, if both are valid */
1207        use_top = can_be_top;
1208
1209        /* lock_len: length of region that should end up locked */
1210        if (use_top)
1211                lock_len = mtd->size - ofs;
1212        else
1213                lock_len = ofs + len;
1214
1215        /*
1216         * Need smallest pow such that:
1217         *
1218         *   1 / (2^pow) <= (len / size)
1219         *
1220         * so (assuming power-of-2 size) we do:
1221         *
1222         *   pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
1223         */
1224        pow = ilog2(mtd->size) - ilog2(lock_len);
1225        val = mask - (pow << shift);
1226        if (val & ~mask)
1227                return -EINVAL;
1228        /* Don't "lock" with no region! */
1229        if (!(val & mask))
1230                return -EINVAL;
1231
1232        status_new = (status_old & ~mask & ~SR_TB) | val;
1233
1234        /* Disallow further writes if WP pin is asserted */
1235        status_new |= SR_SRWD;
1236
1237        if (!use_top)
1238                status_new |= SR_TB;
1239
1240        /* Don't bother if they're the same */
1241        if (status_new == status_old)
1242                return 0;
1243
1244        /* Only modify protection if it will not unlock other areas */
1245        if ((status_new & mask) < (status_old & mask))
1246                return -EINVAL;
1247
1248        return write_sr_and_check(nor, status_new, mask);
1249}
1250
1251/*
1252 * Unlock a region of the flash. See stm_lock() for more info
1253 *
1254 * Returns negative on errors, 0 on success.
1255 */
1256static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1257{
1258        struct mtd_info *mtd = &nor->mtd;
1259        int status_old, status_new;
1260        u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1261        u8 shift = ffs(mask) - 1, pow, val;
1262        loff_t lock_len;
1263        bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1264        bool use_top;
1265
1266        status_old = read_sr(nor);
1267        if (status_old < 0)
1268                return status_old;
1269
1270        /* If nothing in our range is locked, we don't need to do anything */
1271        if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1272                return 0;
1273
1274        /* If anything below us is locked, we can't use 'top' protection */
1275        if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1276                can_be_top = false;
1277
1278        /* If anything above us is locked, we can't use 'bottom' protection */
1279        if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1280                                status_old))
1281                can_be_bottom = false;
1282
1283        if (!can_be_bottom && !can_be_top)
1284                return -EINVAL;
1285
1286        /* Prefer top, if both are valid */
1287        use_top = can_be_top;
1288
1289        /* lock_len: length of region that should remain locked */
1290        if (use_top)
1291                lock_len = mtd->size - (ofs + len);
1292        else
1293                lock_len = ofs;
1294
1295        /*
1296         * Need largest pow such that:
1297         *
1298         *   1 / (2^pow) >= (len / size)
1299         *
1300         * so (assuming power-of-2 size) we do:
1301         *
1302         *   pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
1303         */
1304        pow = ilog2(mtd->size) - order_base_2(lock_len);
1305        if (lock_len == 0) {
1306                val = 0; /* fully unlocked */
1307        } else {
1308                val = mask - (pow << shift);
1309                /* Some power-of-two sizes are not supported */
1310                if (val & ~mask)
1311                        return -EINVAL;
1312        }
1313
1314        status_new = (status_old & ~mask & ~SR_TB) | val;
1315
1316        /* Don't protect status register if we're fully unlocked */
1317        if (lock_len == 0)
1318                status_new &= ~SR_SRWD;
1319
1320        if (!use_top)
1321                status_new |= SR_TB;
1322
1323        /* Don't bother if they're the same */
1324        if (status_new == status_old)
1325                return 0;
1326
1327        /* Only modify protection if it will not lock other areas */
1328        if ((status_new & mask) > (status_old & mask))
1329                return -EINVAL;
1330
1331        return write_sr_and_check(nor, status_new, mask);
1332}
1333
1334/*
1335 * Check if a region of the flash is (completely) locked. See stm_lock() for
1336 * more info.
1337 *
1338 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1339 * negative on errors.
1340 */
1341static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1342{
1343        int status;
1344
1345        status = read_sr(nor);
1346        if (status < 0)
1347                return status;
1348
1349        return stm_is_locked_sr(nor, ofs, len, status);
1350}
1351
1352static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1353{
1354        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1355        int ret;
1356
1357        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1358        if (ret)
1359                return ret;
1360
1361        ret = nor->flash_lock(nor, ofs, len);
1362
1363        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1364        return ret;
1365}
1366
1367static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1368{
1369        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1370        int ret;
1371
1372        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1373        if (ret)
1374                return ret;
1375
1376        ret = nor->flash_unlock(nor, ofs, len);
1377
1378        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1379        return ret;
1380}
1381
1382static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1383{
1384        struct spi_nor *nor = mtd_to_spi_nor(mtd);
1385        int ret;
1386
1387        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1388        if (ret)
1389                return ret;
1390
1391        ret = nor->flash_is_locked(nor, ofs, len);
1392
1393        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1394        return ret;
1395}
1396
1397/*
1398 * Write status Register and configuration register with 2 bytes
1399 * The first byte will be written to the status register, while the
1400 * second byte will be written to the configuration register.
1401 * Return negative if error occurred.
1402 */
1403static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1404{
1405        int ret;
1406
1407        write_enable(nor);
1408
1409        ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1410        if (ret < 0) {
1411                dev_err(nor->dev,
1412                        "error while writing configuration register\n");
1413                return -EINVAL;
1414        }
1415
1416        ret = spi_nor_wait_till_ready(nor);
1417        if (ret) {
1418                dev_err(nor->dev,
1419                        "timeout while writing configuration register\n");
1420                return ret;
1421        }
1422
1423        return 0;
1424}
1425
1426/**
1427 * macronix_quad_enable() - set QE bit in Status Register.
1428 * @nor:        pointer to a 'struct spi_nor'
1429 *
1430 * Set the Quad Enable (QE) bit in the Status Register.
1431 *
1432 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1433 *
1434 * Return: 0 on success, -errno otherwise.
1435 */
1436static int macronix_quad_enable(struct spi_nor *nor)
1437{
1438        int ret, val;
1439
1440        val = read_sr(nor);
1441        if (val < 0)
1442                return val;
1443        if (val & SR_QUAD_EN_MX)
1444                return 0;
1445
1446        write_enable(nor);
1447
1448        write_sr(nor, val | SR_QUAD_EN_MX);
1449
1450        ret = spi_nor_wait_till_ready(nor);
1451        if (ret)
1452                return ret;
1453
1454        ret = read_sr(nor);
1455        if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1456                dev_err(nor->dev, "Macronix Quad bit not set\n");
1457                return -EINVAL;
1458        }
1459
1460        return 0;
1461}
1462
1463/**
1464 * spansion_quad_enable() - set QE bit in Configuraiton Register.
1465 * @nor:        pointer to a 'struct spi_nor'
1466 *
1467 * Set the Quad Enable (QE) bit in the Configuration Register.
1468 * This function is kept for legacy purpose because it has been used for a
1469 * long time without anybody complaining but it should be considered as
1470 * deprecated and maybe buggy.
1471 * First, this function doesn't care about the previous values of the Status
1472 * and Configuration Registers when it sets the QE bit (bit 1) in the
1473 * Configuration Register: all other bits are cleared, which may have unwanted
1474 * side effects like removing some block protections.
1475 * Secondly, it uses the Read Configuration Register (35h) instruction though
1476 * some very old and few memories don't support this instruction. If a pull-up
1477 * resistor is present on the MISO/IO1 line, we might still be able to pass the
1478 * "read back" test because the QSPI memory doesn't recognize the command,
1479 * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
1480 *
1481 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1482 * memories.
1483 *
1484 * Return: 0 on success, -errno otherwise.
1485 */
1486static int spansion_quad_enable(struct spi_nor *nor)
1487{
1488        u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
1489        int ret;
1490
1491        ret = write_sr_cr(nor, sr_cr);
1492        if (ret)
1493                return ret;
1494
1495        /* read back and check it */
1496        ret = read_cr(nor);
1497        if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1498                dev_err(nor->dev, "Spansion Quad bit not set\n");
1499                return -EINVAL;
1500        }
1501
1502        return 0;
1503}
1504
1505/**
1506 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1507 * @nor:        pointer to a 'struct spi_nor'
1508 *
1509 * Set the Quad Enable (QE) bit in the Configuration Register.
1510 * This function should be used with QSPI memories not supporting the Read
1511 * Configuration Register (35h) instruction.
1512 *
1513 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1514 * memories.
1515 *
1516 * Return: 0 on success, -errno otherwise.
1517 */
1518static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1519{
1520        u8 sr_cr[2];
1521        int ret;
1522
1523        /* Keep the current value of the Status Register. */
1524        ret = read_sr(nor);
1525        if (ret < 0) {
1526                dev_err(nor->dev, "error while reading status register\n");
1527                return -EINVAL;
1528        }
1529        sr_cr[0] = ret;
1530        sr_cr[1] = CR_QUAD_EN_SPAN;
1531
1532        return write_sr_cr(nor, sr_cr);
1533}
1534
1535/**
1536 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1537 * @nor:        pointer to a 'struct spi_nor'
1538 *
1539 * Set the Quad Enable (QE) bit in the Configuration Register.
1540 * This function should be used with QSPI memories supporting the Read
1541 * Configuration Register (35h) instruction.
1542 *
1543 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1544 * memories.
1545 *
1546 * Return: 0 on success, -errno otherwise.
1547 */
1548static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1549{
1550        struct device *dev = nor->dev;
1551        u8 sr_cr[2];
1552        int ret;
1553
1554        /* Check current Quad Enable bit value. */
1555        ret = read_cr(nor);
1556        if (ret < 0) {
1557                dev_err(dev, "error while reading configuration register\n");
1558                return -EINVAL;
1559        }
1560
1561        if (ret & CR_QUAD_EN_SPAN)
1562                return 0;
1563
1564        sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1565
1566        /* Keep the current value of the Status Register. */
1567        ret = read_sr(nor);
1568        if (ret < 0) {
1569                dev_err(dev, "error while reading status register\n");
1570                return -EINVAL;
1571        }
1572        sr_cr[0] = ret;
1573
1574        ret = write_sr_cr(nor, sr_cr);
1575        if (ret)
1576                return ret;
1577
1578        /* Read back and check it. */
1579        ret = read_cr(nor);
1580        if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1581                dev_err(nor->dev, "Spansion Quad bit not set\n");
1582                return -EINVAL;
1583        }
1584
1585        return 0;
1586}
1587
1588/**
1589 * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1590 * @nor:        pointer to a 'struct spi_nor'
1591 *
1592 * Set the Quad Enable (QE) bit in the Status Register 2.
1593 *
1594 * This is one of the procedures to set the QE bit described in the SFDP
1595 * (JESD216 rev B) specification but no manufacturer using this procedure has
1596 * been identified yet, hence the name of the function.
1597 *
1598 * Return: 0 on success, -errno otherwise.
1599 */
1600static int sr2_bit7_quad_enable(struct spi_nor *nor)
1601{
1602        u8 sr2;
1603        int ret;
1604
1605        /* Check current Quad Enable bit value. */
1606        ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1607        if (ret)
1608                return ret;
1609        if (sr2 & SR2_QUAD_EN_BIT7)
1610                return 0;
1611
1612        /* Update the Quad Enable bit. */
1613        sr2 |= SR2_QUAD_EN_BIT7;
1614
1615        write_enable(nor);
1616
1617        ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
1618        if (ret < 0) {
1619                dev_err(nor->dev, "error while writing status register 2\n");
1620                return -EINVAL;
1621        }
1622
1623        ret = spi_nor_wait_till_ready(nor);
1624        if (ret < 0) {
1625                dev_err(nor->dev, "timeout while writing status register 2\n");
1626                return ret;
1627        }
1628
1629        /* Read back and check it. */
1630        ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1631        if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
1632                dev_err(nor->dev, "SR2 Quad bit not set\n");
1633                return -EINVAL;
1634        }
1635
1636        return 0;
1637}
1638
1639/**
1640 * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
1641 * @nor:        pointer to a 'struct spi_nor'
1642 *
1643 * Read-modify-write function that clears the Block Protection bits from the
1644 * Status Register without affecting other bits.
1645 *
1646 * Return: 0 on success, -errno otherwise.
1647 */
1648static int spi_nor_clear_sr_bp(struct spi_nor *nor)
1649{
1650        int ret;
1651        u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1652
1653        ret = read_sr(nor);
1654        if (ret < 0) {
1655                dev_err(nor->dev, "error while reading status register\n");
1656                return ret;
1657        }
1658
1659        write_enable(nor);
1660
1661        ret = write_sr(nor, ret & ~mask);
1662        if (ret) {
1663                dev_err(nor->dev, "write to status register failed\n");
1664                return ret;
1665        }
1666
1667        ret = spi_nor_wait_till_ready(nor);
1668        if (ret)
1669                dev_err(nor->dev, "timeout while writing status register\n");
1670        return ret;
1671}
1672
1673/**
1674 * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
1675 * bits on spansion flashes.
1676 * @nor:        pointer to a 'struct spi_nor'
1677 *
1678 * Read-modify-write function that clears the Block Protection bits from the
1679 * Status Register without affecting other bits. The function is tightly
1680 * coupled with the spansion_quad_enable() function. Both assume that the Write
1681 * Register with 16 bits, together with the Read Configuration Register (35h)
1682 * instructions are supported.
1683 *
1684 * Return: 0 on success, -errno otherwise.
1685 */
1686static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1687{
1688        int ret;
1689        u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1690        u8 sr_cr[2] = {0};
1691
1692        /* Check current Quad Enable bit value. */
1693        ret = read_cr(nor);
1694        if (ret < 0) {
1695                dev_err(nor->dev,
1696                        "error while reading configuration register\n");
1697                return ret;
1698        }
1699
1700        /*
1701         * When the configuration register Quad Enable bit is one, only the
1702         * Write Status (01h) command with two data bytes may be used.
1703         */
1704        if (ret & CR_QUAD_EN_SPAN) {
1705                sr_cr[1] = ret;
1706
1707                ret = read_sr(nor);
1708                if (ret < 0) {
1709                        dev_err(nor->dev,
1710                                "error while reading status register\n");
1711                        return ret;
1712                }
1713                sr_cr[0] = ret & ~mask;
1714
1715                ret = write_sr_cr(nor, sr_cr);
1716                if (ret)
1717                        dev_err(nor->dev, "16-bit write register failed\n");
1718                return ret;
1719        }
1720
1721        /*
1722         * If the Quad Enable bit is zero, use the Write Status (01h) command
1723         * with one data byte.
1724         */
1725        return spi_nor_clear_sr_bp(nor);
1726}
1727
1728/* Used when the "_ext_id" is two bytes at most */
1729#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)      \
1730                .id = {                                                 \
1731                        ((_jedec_id) >> 16) & 0xff,                     \
1732                        ((_jedec_id) >> 8) & 0xff,                      \
1733                        (_jedec_id) & 0xff,                             \
1734                        ((_ext_id) >> 8) & 0xff,                        \
1735                        (_ext_id) & 0xff,                               \
1736                        },                                              \
1737                .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),       \
1738                .sector_size = (_sector_size),                          \
1739                .n_sectors = (_n_sectors),                              \
1740                .page_size = 256,                                       \
1741                .flags = (_flags),
1742
1743#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)     \
1744                .id = {                                                 \
1745                        ((_jedec_id) >> 16) & 0xff,                     \
1746                        ((_jedec_id) >> 8) & 0xff,                      \
1747                        (_jedec_id) & 0xff,                             \
1748                        ((_ext_id) >> 16) & 0xff,                       \
1749                        ((_ext_id) >> 8) & 0xff,                        \
1750                        (_ext_id) & 0xff,                               \
1751                        },                                              \
1752                .id_len = 6,                                            \
1753                .sector_size = (_sector_size),                          \
1754                .n_sectors = (_n_sectors),                              \
1755                .page_size = 256,                                       \
1756                .flags = (_flags),
1757
1758#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)   \
1759                .sector_size = (_sector_size),                          \
1760                .n_sectors = (_n_sectors),                              \
1761                .page_size = (_page_size),                              \
1762                .addr_width = (_addr_width),                            \
1763                .flags = (_flags),
1764
1765#define S3AN_INFO(_jedec_id, _n_sectors, _page_size)                    \
1766                .id = {                                                 \
1767                        ((_jedec_id) >> 16) & 0xff,                     \
1768                        ((_jedec_id) >> 8) & 0xff,                      \
1769                        (_jedec_id) & 0xff                              \
1770                        },                                              \
1771                .id_len = 3,                                            \
1772                .sector_size = (8*_page_size),                          \
1773                .n_sectors = (_n_sectors),                              \
1774                .page_size = _page_size,                                \
1775                .addr_width = 3,                                        \
1776                .flags = SPI_NOR_NO_FR | SPI_S3AN,
1777
1778static int
1779is25lp256_post_bfpt_fixups(struct spi_nor *nor,
1780                           const struct sfdp_parameter_header *bfpt_header,
1781                           const struct sfdp_bfpt *bfpt,
1782                           struct spi_nor_flash_parameter *params)
1783{
1784        /*
1785         * IS25LP256 supports 4B opcodes, but the BFPT advertises a
1786         * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
1787         * Overwrite the address width advertised by the BFPT.
1788         */
1789        if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
1790                BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
1791                nor->addr_width = 4;
1792
1793        return 0;
1794}
1795
1796static struct spi_nor_fixups is25lp256_fixups = {
1797        .post_bfpt = is25lp256_post_bfpt_fixups,
1798};
1799
1800static int
1801mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
1802                            const struct sfdp_parameter_header *bfpt_header,
1803                            const struct sfdp_bfpt *bfpt,
1804                            struct spi_nor_flash_parameter *params)
1805{
1806        /*
1807         * MX25L25635F supports 4B opcodes but MX25L25635E does not.
1808         * Unfortunately, Macronix has re-used the same JEDEC ID for both
1809         * variants which prevents us from defining a new entry in the parts
1810         * table.
1811         * We need a way to differentiate MX25L25635E and MX25L25635F, and it
1812         * seems that the F version advertises support for Fast Read 4-4-4 in
1813         * its BFPT table.
1814         */
1815        if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
1816                nor->flags |= SNOR_F_4B_OPCODES;
1817
1818        return 0;
1819}
1820
1821static struct spi_nor_fixups mx25l25635_fixups = {
1822        .post_bfpt = mx25l25635_post_bfpt_fixups,
1823};
1824
1825/* NOTE: double check command sets and memory organization when you add
1826 * more nor chips.  This current list focusses on newer chips, which
1827 * have been converging on command sets which including JEDEC ID.
1828 *
1829 * All newly added entries should describe *hardware* and should use SECT_4K
1830 * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
1831 * scenarios excluding small sectors there is config option that can be
1832 * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
1833 * For historical (and compatibility) reasons (before we got above config) some
1834 * old entries may be missing 4K flag.
1835 */
1836static const struct flash_info spi_nor_ids[] = {
1837        /* Atmel -- some are (confusingly) marketed as "DataFlash" */
1838        { "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) },
1839        { "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
1840
1841        { "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
1842        { "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K) },
1843        { "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
1844        { "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
1845
1846        { "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
1847        { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
1848        { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
1849        { "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1850
1851        { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
1852
1853        /* EON -- en25xxx */
1854        { "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
1855        { "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
1856        { "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
1857        { "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
1858        { "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
1859        { "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16,
1860                        SECT_4K | SPI_NOR_DUAL_READ) },
1861        { "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
1862        { "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128,
1863                        SECT_4K | SPI_NOR_DUAL_READ) },
1864        { "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
1865        { "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
1866        { "en25s64",    INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
1867
1868        /* ESMT */
1869        { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1870        { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1871        { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
1872
1873        /* Everspin */
1874        { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1875        { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1876        { "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1877        { "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1878
1879        /* Fujitsu */
1880        { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
1881
1882        /* GigaDevice */
1883        {
1884                "gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
1885                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1886                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1887        },
1888        {
1889                "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
1890                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1891                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1892        },
1893        {
1894                "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
1895                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1896                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1897        },
1898        {
1899                "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
1900                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1901                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1902        },
1903        {
1904                "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
1905                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1906                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1907        },
1908        {
1909                "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
1910                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1911                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1912        },
1913        {
1914                "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
1915                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1916                        SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1917                        .quad_enable = macronix_quad_enable,
1918        },
1919
1920        /* Intel/Numonyx -- xxxs33b */
1921        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
1922        { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
1923        { "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
1924
1925        /* ISSI */
1926        { "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
1927        { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
1928                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1929        { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32,
1930                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1931        { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16,
1932                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1933        { "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64,
1934                        SECT_4K | SPI_NOR_DUAL_READ) },
1935        { "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128,
1936                        SECT_4K | SPI_NOR_DUAL_READ) },
1937        { "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
1938                        SECT_4K | SPI_NOR_DUAL_READ) },
1939        { "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
1940                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1941                        SPI_NOR_4B_OPCODES)
1942                        .fixups = &is25lp256_fixups },
1943        { "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
1944                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1945        { "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
1946                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1947        { "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
1948                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1949
1950        /* Macronix */
1951        { "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
1952        { "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
1953        { "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
1954        { "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
1955        { "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
1956        { "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
1957        { "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
1958        { "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
1959        { "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
1960        { "mx25u3235f",  INFO(0xc22536, 0, 64 * 1024,  64,
1961                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1962        { "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8, SECT_4K) },
1963        { "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16, SECT_4K) },
1964        { "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
1965        { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
1966        { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
1967        { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
1968                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1969        { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
1970                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
1971                         .fixups = &mx25l25635_fixups },
1972        { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
1973        { "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
1974                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1975        { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
1976        { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1977        { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1978        { "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1979        { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
1980
1981        /* Micron <--> ST Micro */
1982        { "n25q016a",    INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
1983        { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
1984        { "n25q032a",    INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
1985        { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
1986        { "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
1987        { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
1988        { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
1989        { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1990        { "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
1991        { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1992        { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1993        { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1994        { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1995        { "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
1996                              SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
1997                              NO_CHIP_ERASE) },
1998        { "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1999
2000        /* Micron */
2001        {
2002                "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2003                        SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2004                        SPI_NOR_4B_OPCODES)
2005        },
2006
2007        /* PMC */
2008        { "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
2009        { "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
2010        { "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
2011
2012        /* Spansion/Cypress -- single (large) sector size only, at least
2013         * for the chips listed here (without boot sectors).
2014         */
2015        { "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2016        { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2017        { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
2018                        SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2019        { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
2020                        SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2021        { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
2022        { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2023        { "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
2024                        SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2025                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
2026        { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2027        { "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
2028        { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
2029        { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
2030        { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2031        { "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2032        { "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
2033        { "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
2034        { "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
2035        { "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
2036        { "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
2037        { "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2038        { "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2039        { "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2040        { "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
2041        { "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2042        { "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
2043        { "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
2044        { "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ) },
2045        { "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
2046        { "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2047        { "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2048        { "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2049
2050        /* SST -- large erase sizes are "overlays", "sectors" are 4K */
2051        { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2052        { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2053        { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
2054        { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
2055        { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
2056        { "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
2057        { "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
2058        { "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
2059        { "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
2060        { "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
2061        { "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2062        { "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2063        { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2064
2065        /* ST Microelectronics -- newer production may have feature updates */
2066        { "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
2067        { "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
2068        { "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
2069        { "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
2070        { "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
2071        { "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
2072        { "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
2073        { "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
2074        { "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
2075
2076        { "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
2077        { "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
2078        { "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) },
2079        { "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) },
2080        { "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) },
2081        { "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) },
2082        { "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) },
2083        { "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) },
2084        { "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) },
2085
2086        { "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) },
2087        { "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
2088        { "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
2089
2090        { "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
2091        { "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
2092        { "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
2093
2094        { "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
2095        { "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
2096        { "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
2097        { "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
2098        { "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
2099        { "m25px80",    INFO(0x207114,  0, 64 * 1024, 16, 0) },
2100
2101        /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
2102        { "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
2103        { "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
2104        { "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
2105        { "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
2106        { "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
2107        { "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
2108        {
2109                "w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
2110                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2111                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2112        },
2113        { "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
2114        {
2115                "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
2116                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2117                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2118        },
2119        { "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
2120        { "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
2121        { "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
2122        { "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
2123        {
2124                "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
2125                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2126                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2127        },
2128        {
2129                "w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
2130                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2131                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2132        },
2133        { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2134        { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2135        {
2136                "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2137                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2138                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2139        },
2140        {
2141                "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2142                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2143                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2144        },
2145        {
2146                "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2147                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2148                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2149        },
2150        { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
2151        { "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
2152        { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2153        { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2154        { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2155                        SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2156
2157        /* Catalyst / On Semiconductor -- non-JEDEC */
2158        { "cat25c11", CAT25_INFO(  16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2159        { "cat25c03", CAT25_INFO(  32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2160        { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2161        { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2162        { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2163
2164        /* Xilinx S3AN Internal Flash */
2165        { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2166        { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2167        { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2168        { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2169        { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2170
2171        /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
2172        { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2173        { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2174        { },
2175};
2176
2177static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2178{
2179        int                     tmp;
2180        u8                      id[SPI_NOR_MAX_ID_LEN];
2181        const struct flash_info *info;
2182
2183        tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
2184        if (tmp < 0) {
2185                dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
2186                return ERR_PTR(tmp);
2187        }
2188
2189        for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
2190                info = &spi_nor_ids[tmp];
2191                if (info->id_len) {
2192                        if (!memcmp(info->id, id, info->id_len))
2193                                return &spi_nor_ids[tmp];
2194                }
2195        }
2196        dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2197                SPI_NOR_MAX_ID_LEN, id);
2198        return ERR_PTR(-ENODEV);
2199}
2200
2201static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2202                        size_t *retlen, u_char *buf)
2203{
2204        struct spi_nor *nor = mtd_to_spi_nor(mtd);
2205        int ret;
2206
2207        dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2208
2209        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2210        if (ret)
2211                return ret;
2212
2213        while (len) {
2214                loff_t addr = from;
2215
2216                if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2217                        addr = spi_nor_s3an_addr_convert(nor, addr);
2218
2219                ret = nor->read(nor, addr, len, buf);
2220                if (ret == 0) {
2221                        /* We shouldn't see 0-length reads */
2222                        ret = -EIO;
2223                        goto read_err;
2224                }
2225                if (ret < 0)
2226                        goto read_err;
2227
2228                WARN_ON(ret > len);
2229                *retlen += ret;
2230                buf += ret;
2231                from += ret;
2232                len -= ret;
2233        }
2234        ret = 0;
2235
2236read_err:
2237        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2238        return ret;
2239}
2240
2241static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2242                size_t *retlen, const u_char *buf)
2243{
2244        struct spi_nor *nor = mtd_to_spi_nor(mtd);
2245        size_t actual;
2246        int ret;
2247
2248        dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2249
2250        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2251        if (ret)
2252                return ret;
2253
2254        write_enable(nor);
2255
2256        nor->sst_write_second = false;
2257
2258        actual = to % 2;
2259        /* Start write from odd address. */
2260        if (actual) {
2261                nor->program_opcode = SPINOR_OP_BP;
2262
2263                /* write one byte. */
2264                ret = nor->write(nor, to, 1, buf);
2265                if (ret < 0)
2266                        goto sst_write_err;
2267                WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2268                     (int)ret);
2269                ret = spi_nor_wait_till_ready(nor);
2270                if (ret)
2271                        goto sst_write_err;
2272        }
2273        to += actual;
2274
2275        /* Write out most of the data here. */
2276        for (; actual < len - 1; actual += 2) {
2277                nor->program_opcode = SPINOR_OP_AAI_WP;
2278
2279                /* write two bytes. */
2280                ret = nor->write(nor, to, 2, buf + actual);
2281                if (ret < 0)
2282                        goto sst_write_err;
2283                WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2284                     (int)ret);
2285                ret = spi_nor_wait_till_ready(nor);
2286                if (ret)
2287                        goto sst_write_err;
2288                to += 2;
2289                nor->sst_write_second = true;
2290        }
2291        nor->sst_write_second = false;
2292
2293        write_disable(nor);
2294        ret = spi_nor_wait_till_ready(nor);
2295        if (ret)
2296                goto sst_write_err;
2297
2298        /* Write out trailing byte if it exists. */
2299        if (actual != len) {
2300                write_enable(nor);
2301
2302                nor->program_opcode = SPINOR_OP_BP;
2303                ret = nor->write(nor, to, 1, buf + actual);
2304                if (ret < 0)
2305                        goto sst_write_err;
2306                WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2307                     (int)ret);
2308                ret = spi_nor_wait_till_ready(nor);
2309                if (ret)
2310                        goto sst_write_err;
2311                write_disable(nor);
2312                actual += 1;
2313        }
2314sst_write_err:
2315        *retlen += actual;
2316        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2317        return ret;
2318}
2319
2320/*
2321 * Write an address range to the nor chip.  Data must be written in
2322 * FLASH_PAGESIZE chunks.  The address range may be any size provided
2323 * it is within the physical boundaries.
2324 */
2325static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2326        size_t *retlen, const u_char *buf)
2327{
2328        struct spi_nor *nor = mtd_to_spi_nor(mtd);
2329        size_t page_offset, page_remain, i;
2330        ssize_t ret;
2331
2332        dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2333
2334        ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2335        if (ret)
2336                return ret;
2337
2338        for (i = 0; i < len; ) {
2339                ssize_t written;
2340                loff_t addr = to + i;
2341
2342                /*
2343                 * If page_size is a power of two, the offset can be quickly
2344                 * calculated with an AND operation. On the other cases we
2345                 * need to do a modulus operation (more expensive).
2346                 * Power of two numbers have only one bit set and we can use
2347                 * the instruction hweight32 to detect if we need to do a
2348                 * modulus (do_div()) or not.
2349                 */
2350                if (hweight32(nor->page_size) == 1) {
2351                        page_offset = addr & (nor->page_size - 1);
2352                } else {
2353                        uint64_t aux = addr;
2354
2355                        page_offset = do_div(aux, nor->page_size);
2356                }
2357                /* the size of data remaining on the first page */
2358                page_remain = min_t(size_t,
2359                                    nor->page_size - page_offset, len - i);
2360
2361                if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2362                        addr = spi_nor_s3an_addr_convert(nor, addr);
2363
2364                write_enable(nor);
2365                ret = nor->write(nor, addr, page_remain, buf + i);
2366                if (ret < 0)
2367                        goto write_err;
2368                written = ret;
2369
2370                ret = spi_nor_wait_till_ready(nor);
2371                if (ret)
2372                        goto write_err;
2373                *retlen += written;
2374                i += written;
2375        }
2376
2377write_err:
2378        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2379        return ret;
2380}
2381
2382static int spi_nor_check(struct spi_nor *nor)
2383{
2384        if (!nor->dev || !nor->read || !nor->write ||
2385                !nor->read_reg || !nor->write_reg) {
2386                pr_err("spi-nor: please fill all the necessary fields!\n");
2387                return -EINVAL;
2388        }
2389
2390        return 0;
2391}
2392
2393static int s3an_nor_scan(struct spi_nor *nor)
2394{
2395        int ret;
2396        u8 val;
2397
2398        ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
2399        if (ret < 0) {
2400                dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2401                return ret;
2402        }
2403
2404        nor->erase_opcode = SPINOR_OP_XSE;
2405        nor->program_opcode = SPINOR_OP_XPP;
2406        nor->read_opcode = SPINOR_OP_READ;
2407        nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2408
2409        /*
2410         * This flashes have a page size of 264 or 528 bytes (known as
2411         * Default addressing mode). It can be changed to a more standard
2412         * Power of two mode where the page size is 256/512. This comes
2413         * with a price: there is 3% less of space, the data is corrupted
2414         * and the page size cannot be changed back to default addressing
2415         * mode.
2416         *
2417         * The current addressing mode can be read from the XRDSR register
2418         * and should not be changed, because is a destructive operation.
2419         */
2420        if (val & XSR_PAGESIZE) {
2421                /* Flash in Power of 2 mode */
2422                nor->page_size = (nor->page_size == 264) ? 256 : 512;
2423                nor->mtd.writebufsize = nor->page_size;
2424                nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2425                nor->mtd.erasesize = 8 * nor->page_size;
2426        } else {
2427                /* Flash in Default addressing mode */
2428                nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
2429        }
2430
2431        return 0;
2432}
2433
2434static void
2435spi_nor_set_read_settings(struct spi_nor_read_command *read,
2436                          u8 num_mode_clocks,
2437                          u8 num_wait_states,
2438                          u8 opcode,
2439                          enum spi_nor_protocol proto)
2440{
2441        read->num_mode_clocks = num_mode_clocks;
2442        read->num_wait_states = num_wait_states;
2443        read->opcode = opcode;
2444        read->proto = proto;
2445}
2446
2447static void
2448spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2449                        u8 opcode,
2450                        enum spi_nor_protocol proto)
2451{
2452        pp->opcode = opcode;
2453        pp->proto = proto;
2454}
2455
2456static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2457{
2458        size_t i;
2459
2460        for (i = 0; i < size; i++)
2461                if (table[i][0] == (int)hwcaps)
2462                        return table[i][1];
2463
2464        return -EINVAL;
2465}
2466
2467static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2468{
2469        static const int hwcaps_read2cmd[][2] = {
2470                { SNOR_HWCAPS_READ,             SNOR_CMD_READ },
2471                { SNOR_HWCAPS_READ_FAST,        SNOR_CMD_READ_FAST },
2472                { SNOR_HWCAPS_READ_1_1_1_DTR,   SNOR_CMD_READ_1_1_1_DTR },
2473                { SNOR_HWCAPS_READ_1_1_2,       SNOR_CMD_READ_1_1_2 },
2474                { SNOR_HWCAPS_READ_1_2_2,       SNOR_CMD_READ_1_2_2 },
2475                { SNOR_HWCAPS_READ_2_2_2,       SNOR_CMD_READ_2_2_2 },
2476                { SNOR_HWCAPS_READ_1_2_2_DTR,   SNOR_CMD_READ_1_2_2_DTR },
2477                { SNOR_HWCAPS_READ_1_1_4,       SNOR_CMD_READ_1_1_4 },
2478                { SNOR_HWCAPS_READ_1_4_4,       SNOR_CMD_READ_1_4_4 },
2479                { SNOR_HWCAPS_READ_4_4_4,       SNOR_CMD_READ_4_4_4 },
2480                { SNOR_HWCAPS_READ_1_4_4_DTR,   SNOR_CMD_READ_1_4_4_DTR },
2481                { SNOR_HWCAPS_READ_1_1_8,       SNOR_CMD_READ_1_1_8 },
2482                { SNOR_HWCAPS_READ_1_8_8,       SNOR_CMD_READ_1_8_8 },
2483                { SNOR_HWCAPS_READ_8_8_8,       SNOR_CMD_READ_8_8_8 },
2484                { SNOR_HWCAPS_READ_1_8_8_DTR,   SNOR_CMD_READ_1_8_8_DTR },
2485        };
2486
2487        return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2488                                  ARRAY_SIZE(hwcaps_read2cmd));
2489}
2490
2491static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2492{
2493        static const int hwcaps_pp2cmd[][2] = {
2494                { SNOR_HWCAPS_PP,               SNOR_CMD_PP },
2495                { SNOR_HWCAPS_PP_1_1_4,         SNOR_CMD_PP_1_1_4 },
2496                { SNOR_HWCAPS_PP_1_4_4,         SNOR_CMD_PP_1_4_4 },
2497                { SNOR_HWCAPS_PP_4_4_4,         SNOR_CMD_PP_4_4_4 },
2498                { SNOR_HWCAPS_PP_1_1_8,         SNOR_CMD_PP_1_1_8 },
2499                { SNOR_HWCAPS_PP_1_8_8,         SNOR_CMD_PP_1_8_8 },
2500                { SNOR_HWCAPS_PP_8_8_8,         SNOR_CMD_PP_8_8_8 },
2501        };
2502
2503        return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2504                                  ARRAY_SIZE(hwcaps_pp2cmd));
2505}
2506
2507/*
2508 * Serial Flash Discoverable Parameters (SFDP) parsing.
2509 */
2510
2511/**
2512 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
2513 *                      addr_width and read_dummy members of the struct spi_nor
2514 *                      should be previously
2515 * set.
2516 * @nor:        pointer to a 'struct spi_nor'
2517 * @addr:       offset in the serial flash memory
2518 * @len:        number of bytes to read
2519 * @buf:        buffer where the data is copied into (dma-safe memory)
2520 *
2521 * Return: 0 on success, -errno otherwise.
2522 */
2523static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2524{
2525        int ret;
2526
2527        while (len) {
2528                ret = nor->read(nor, addr, len, buf);
2529                if (!ret || ret > len)
2530                        return -EIO;
2531                if (ret < 0)
2532                        return ret;
2533
2534                buf += ret;
2535                addr += ret;
2536                len -= ret;
2537        }
2538        return 0;
2539}
2540
2541/**
2542 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
2543 * @nor:        pointer to a 'struct spi_nor'
2544 * @addr:       offset in the SFDP area to start reading data from
2545 * @len:        number of bytes to read
2546 * @buf:        buffer where the SFDP data are copied into (dma-safe memory)
2547 *
2548 * Whatever the actual numbers of bytes for address and dummy cycles are
2549 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
2550 * followed by a 3-byte address and 8 dummy clock cycles.
2551 *
2552 * Return: 0 on success, -errno otherwise.
2553 */
2554static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2555                             size_t len, void *buf)
2556{
2557        u8 addr_width, read_opcode, read_dummy;
2558        int ret;
2559
2560        read_opcode = nor->read_opcode;
2561        addr_width = nor->addr_width;
2562        read_dummy = nor->read_dummy;
2563
2564        nor->read_opcode = SPINOR_OP_RDSFDP;
2565        nor->addr_width = 3;
2566        nor->read_dummy = 8;
2567
2568        ret = spi_nor_read_raw(nor, addr, len, buf);
2569
2570        nor->read_opcode = read_opcode;
2571        nor->addr_width = addr_width;
2572        nor->read_dummy = read_dummy;
2573
2574        return ret;
2575}
2576
2577/**
2578 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
2579 * @nor:        pointer to a 'struct spi_nor'
2580 * @addr:       offset in the SFDP area to start reading data from
2581 * @len:        number of bytes to read
2582 * @buf:        buffer where the SFDP data are copied into
2583 *
2584 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
2585 * guaranteed to be dma-safe.
2586 *
2587 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
2588 *          otherwise.
2589 */
2590static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
2591                                        size_t len, void *buf)
2592{
2593        void *dma_safe_buf;
2594        int ret;
2595
2596        dma_safe_buf = kmalloc(len, GFP_KERNEL);
2597        if (!dma_safe_buf)
2598                return -ENOMEM;
2599
2600        ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
2601        memcpy(buf, dma_safe_buf, len);
2602        kfree(dma_safe_buf);
2603
2604        return ret;
2605}
2606
2607/* Fast Read settings. */
2608
2609static void
2610spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
2611                                    u16 half,
2612                                    enum spi_nor_protocol proto)
2613{
2614        read->num_mode_clocks = (half >> 5) & 0x07;
2615        read->num_wait_states = (half >> 0) & 0x1f;
2616        read->opcode = (half >> 8) & 0xff;
2617        read->proto = proto;
2618}
2619
2620struct sfdp_bfpt_read {
2621        /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
2622        u32                     hwcaps;
2623
2624        /*
2625         * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
2626         * whether the Fast Read x-y-z command is supported.
2627         */
2628        u32                     supported_dword;
2629        u32                     supported_bit;
2630
2631        /*
2632         * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
2633         * encodes the op code, the number of mode clocks and the number of wait
2634         * states to be used by Fast Read x-y-z command.
2635         */
2636        u32                     settings_dword;
2637        u32                     settings_shift;
2638
2639        /* The SPI protocol for this Fast Read x-y-z command. */
2640        enum spi_nor_protocol   proto;
2641};
2642
2643static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
2644        /* Fast Read 1-1-2 */
2645        {
2646                SNOR_HWCAPS_READ_1_1_2,
2647                BFPT_DWORD(1), BIT(16), /* Supported bit */
2648                BFPT_DWORD(4), 0,       /* Settings */
2649                SNOR_PROTO_1_1_2,
2650        },
2651
2652        /* Fast Read 1-2-2 */
2653        {
2654                SNOR_HWCAPS_READ_1_2_2,
2655                BFPT_DWORD(1), BIT(20), /* Supported bit */
2656                BFPT_DWORD(4), 16,      /* Settings */
2657                SNOR_PROTO_1_2_2,
2658        },
2659
2660        /* Fast Read 2-2-2 */
2661        {
2662                SNOR_HWCAPS_READ_2_2_2,
2663                BFPT_DWORD(5),  BIT(0), /* Supported bit */
2664                BFPT_DWORD(6), 16,      /* Settings */
2665                SNOR_PROTO_2_2_2,
2666        },
2667
2668        /* Fast Read 1-1-4 */
2669        {
2670                SNOR_HWCAPS_READ_1_1_4,
2671                BFPT_DWORD(1), BIT(22), /* Supported bit */
2672                BFPT_DWORD(3), 16,      /* Settings */
2673                SNOR_PROTO_1_1_4,
2674        },
2675
2676        /* Fast Read 1-4-4 */
2677        {
2678                SNOR_HWCAPS_READ_1_4_4,
2679                BFPT_DWORD(1), BIT(21), /* Supported bit */
2680                BFPT_DWORD(3), 0,       /* Settings */
2681                SNOR_PROTO_1_4_4,
2682        },
2683
2684        /* Fast Read 4-4-4 */
2685        {
2686                SNOR_HWCAPS_READ_4_4_4,
2687                BFPT_DWORD(5), BIT(4),  /* Supported bit */
2688                BFPT_DWORD(7), 16,      /* Settings */
2689                SNOR_PROTO_4_4_4,
2690        },
2691};
2692
2693struct sfdp_bfpt_erase {
2694        /*
2695         * The half-word at offset <shift> in DWORD <dwoard> encodes the
2696         * op code and erase sector size to be used by Sector Erase commands.
2697         */
2698        u32                     dword;
2699        u32                     shift;
2700};
2701
2702static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
2703        /* Erase Type 1 in DWORD8 bits[15:0] */
2704        {BFPT_DWORD(8), 0},
2705
2706        /* Erase Type 2 in DWORD8 bits[31:16] */
2707        {BFPT_DWORD(8), 16},
2708
2709        /* Erase Type 3 in DWORD9 bits[15:0] */
2710        {BFPT_DWORD(9), 0},
2711
2712        /* Erase Type 4 in DWORD9 bits[31:16] */
2713        {BFPT_DWORD(9), 16},
2714};
2715
2716/**
2717 * spi_nor_set_erase_type() - set a SPI NOR erase type
2718 * @erase:      pointer to a structure that describes a SPI NOR erase type
2719 * @size:       the size of the sector/block erased by the erase type
2720 * @opcode:     the SPI command op code to erase the sector/block
2721 */
2722static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
2723                                   u32 size, u8 opcode)
2724{
2725        erase->size = size;
2726        erase->opcode = opcode;
2727        /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2728        erase->size_shift = ffs(erase->size) - 1;
2729        erase->size_mask = (1 << erase->size_shift) - 1;
2730}
2731
2732/**
2733 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
2734 * @erase:      pointer to a structure that describes a SPI NOR erase type
2735 * @size:       the size of the sector/block erased by the erase type
2736 * @opcode:     the SPI command op code to erase the sector/block
2737 * @i:          erase type index as sorted in the Basic Flash Parameter Table
2738 *
2739 * The supported Erase Types will be sorted at init in ascending order, with
2740 * the smallest Erase Type size being the first member in the erase_type array
2741 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
2742 * the Basic Flash Parameter Table since it will be used later on to
2743 * synchronize with the supported Erase Types defined in SFDP optional tables.
2744 */
2745static void
2746spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
2747                                     u32 size, u8 opcode, u8 i)
2748{
2749        erase->idx = i;
2750        spi_nor_set_erase_type(erase, size, opcode);
2751}
2752
2753/**
2754 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
2755 * @l:  member in the left half of the map's erase_type array
2756 * @r:  member in the right half of the map's erase_type array
2757 *
2758 * Comparison function used in the sort() call to sort in ascending order the
2759 * map's erase types, the smallest erase type size being the first member in the
2760 * sorted erase_type array.
2761 *
2762 * Return: the result of @l->size - @r->size
2763 */
2764static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2765{
2766        const struct spi_nor_erase_type *left = l, *right = r;
2767
2768        return left->size - right->size;
2769}
2770
2771/**
2772 * spi_nor_sort_erase_mask() - sort erase mask
2773 * @map:        the erase map of the SPI NOR
2774 * @erase_mask: the erase type mask to be sorted
2775 *
2776 * Replicate the sort done for the map's erase types in BFPT: sort the erase
2777 * mask in ascending order with the smallest erase type size starting from
2778 * BIT(0) in the sorted erase mask.
2779 *
2780 * Return: sorted erase mask.
2781 */
2782static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2783{
2784        struct spi_nor_erase_type *erase_type = map->erase_type;
2785        int i;
2786        u8 sorted_erase_mask = 0;
2787
2788        if (!erase_mask)
2789                return 0;
2790
2791        /* Replicate the sort done for the map's erase types. */
2792        for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2793                if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2794                        sorted_erase_mask |= BIT(i);
2795
2796        return sorted_erase_mask;
2797}
2798
2799/**
2800 * spi_nor_regions_sort_erase_types() - sort erase types in each region
2801 * @map:        the erase map of the SPI NOR
2802 *
2803 * Function assumes that the erase types defined in the erase map are already
2804 * sorted in ascending order, with the smallest erase type size being the first
2805 * member in the erase_type array. It replicates the sort done for the map's
2806 * erase types. Each region's erase bitmask will indicate which erase types are
2807 * supported from the sorted erase types defined in the erase map.
2808 * Sort the all region's erase type at init in order to speed up the process of
2809 * finding the best erase command at runtime.
2810 */
2811static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2812{
2813        struct spi_nor_erase_region *region = map->regions;
2814        u8 region_erase_mask, sorted_erase_mask;
2815
2816        while (region) {
2817                region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2818
2819                sorted_erase_mask = spi_nor_sort_erase_mask(map,
2820                                                            region_erase_mask);
2821
2822                /* Overwrite erase mask. */
2823                region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
2824                                 sorted_erase_mask;
2825
2826                region = spi_nor_region_next(region);
2827        }
2828}
2829
2830/**
2831 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2832 * @map:                the erase map of the SPI NOR
2833 * @erase_mask:         bitmask encoding erase types that can erase the entire
2834 *                      flash memory
2835 * @flash_size:         the spi nor flash memory size
2836 */
2837static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2838                                           u8 erase_mask, u64 flash_size)
2839{
2840        /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2841        map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2842                                     SNOR_LAST_REGION;
2843        map->uniform_region.size = flash_size;
2844        map->regions = &map->uniform_region;
2845        map->uniform_erase_type = erase_mask;
2846}
2847
2848static int
2849spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2850                         const struct sfdp_parameter_header *bfpt_header,
2851                         const struct sfdp_bfpt *bfpt,
2852                         struct spi_nor_flash_parameter *params)
2853{
2854        if (nor->info->fixups && nor->info->fixups->post_bfpt)
2855                return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2856                                                    params);
2857
2858        return 0;
2859}
2860
2861/**
2862 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
2863 * @nor:                pointer to a 'struct spi_nor'
2864 * @bfpt_header:        pointer to the 'struct sfdp_parameter_header' describing
2865 *                      the Basic Flash Parameter Table length and version
2866 * @params:             pointer to the 'struct spi_nor_flash_parameter' to be
2867 *                      filled
2868 *
2869 * The Basic Flash Parameter Table is the main and only mandatory table as
2870 * defined by the SFDP (JESD216) specification.
2871 * It provides us with the total size (memory density) of the data array and
2872 * the number of address bytes for Fast Read, Page Program and Sector Erase
2873 * commands.
2874 * For Fast READ commands, it also gives the number of mode clock cycles and
2875 * wait states (regrouped in the number of dummy clock cycles) for each
2876 * supported instruction op code.
2877 * For Page Program, the page size is now available since JESD216 rev A, however
2878 * the supported instruction op codes are still not provided.
2879 * For Sector Erase commands, this table stores the supported instruction op
2880 * codes and the associated sector sizes.
2881 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
2882 * rev A. The QER bits encode the manufacturer dependent procedure to be
2883 * executed to set the Quad Enable (QE) bit in some internal register of the
2884 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
2885 * sending any Quad SPI command to the memory. Actually, setting the QE bit
2886 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
2887 * and IO3 hence enabling 4 (Quad) I/O lines.
2888 *
2889 * Return: 0 on success, -errno otherwise.
2890 */
2891static int spi_nor_parse_bfpt(struct spi_nor *nor,
2892                              const struct sfdp_parameter_header *bfpt_header,
2893                              struct spi_nor_flash_parameter *params)
2894{
2895        struct spi_nor_erase_map *map = &nor->erase_map;
2896        struct spi_nor_erase_type *erase_type = map->erase_type;
2897        struct sfdp_bfpt bfpt;
2898        size_t len;
2899        int i, cmd, err;
2900        u32 addr;
2901        u16 half;
2902        u8 erase_mask;
2903
2904        /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
2905        if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
2906                return -EINVAL;
2907
2908        /* Read the Basic Flash Parameter Table. */
2909        len = min_t(size_t, sizeof(bfpt),
2910                    bfpt_header->length * sizeof(u32));
2911        addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
2912        memset(&bfpt, 0, sizeof(bfpt));
2913        err = spi_nor_read_sfdp_dma_unsafe(nor,  addr, len, &bfpt);
2914        if (err < 0)
2915                return err;
2916
2917        /* Fix endianness of the BFPT DWORDs. */
2918        for (i = 0; i < BFPT_DWORD_MAX; i++)
2919                bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
2920
2921        /* Number of address bytes. */
2922        switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
2923        case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
2924                nor->addr_width = 3;
2925                break;
2926
2927        case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
2928                nor->addr_width = 4;
2929                break;
2930
2931        default:
2932                break;
2933        }
2934
2935        /* Flash Memory Density (in bits). */
2936        params->size = bfpt.dwords[BFPT_DWORD(2)];
2937        if (params->size & BIT(31)) {
2938                params->size &= ~BIT(31);
2939
2940                /*
2941                 * Prevent overflows on params->size. Anyway, a NOR of 2^64
2942                 * bits is unlikely to exist so this error probably means
2943                 * the BFPT we are reading is corrupted/wrong.
2944                 */
2945                if (params->size > 63)
2946                        return -EINVAL;
2947
2948                params->size = 1ULL << params->size;
2949        } else {
2950                params->size++;
2951        }
2952        params->size >>= 3; /* Convert to bytes. */
2953
2954        /* Fast Read settings. */
2955        for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
2956                const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
2957                struct spi_nor_read_command *read;
2958
2959                if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
2960                        params->hwcaps.mask &= ~rd->hwcaps;
2961                        continue;
2962                }
2963
2964                params->hwcaps.mask |= rd->hwcaps;
2965                cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
2966                read = &params->reads[cmd];
2967                half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
2968                spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
2969        }
2970
2971        /*
2972         * Sector Erase settings. Reinitialize the uniform erase map using the
2973         * Erase Types defined in the bfpt table.
2974         */
2975        erase_mask = 0;
2976        memset(&nor->erase_map, 0, sizeof(nor->erase_map));
2977        for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
2978                const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
2979                u32 erasesize;
2980                u8 opcode;
2981
2982                half = bfpt.dwords[er->dword] >> er->shift;
2983                erasesize = half & 0xff;
2984
2985                /* erasesize == 0 means this Erase Type is not supported. */
2986                if (!erasesize)
2987                        continue;
2988
2989                erasesize = 1U << erasesize;
2990                opcode = (half >> 8) & 0xff;
2991                erase_mask |= BIT(i);
2992                spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
2993                                                     opcode, i);
2994        }
2995        spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2996        /*
2997         * Sort all the map's Erase Types in ascending order with the smallest
2998         * erase size being the first member in the erase_type array.
2999         */
3000        sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
3001             spi_nor_map_cmp_erase_type, NULL);
3002        /*
3003         * Sort the erase types in the uniform region in order to update the
3004         * uniform_erase_type bitmask. The bitmask will be used later on when
3005         * selecting the uniform erase.
3006         */
3007        spi_nor_regions_sort_erase_types(map);
3008        map->uniform_erase_type = map->uniform_region.offset &
3009                                  SNOR_ERASE_TYPE_MASK;
3010
3011        /* Stop here if not JESD216 rev A or later. */
3012        if (bfpt_header->length < BFPT_DWORD_MAX)
3013                return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
3014                                                params);
3015
3016        /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
3017        params->page_size = bfpt.dwords[BFPT_DWORD(11)];
3018        params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
3019        params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
3020        params->page_size = 1U << params->page_size;
3021
3022        /* Quad Enable Requirements. */
3023        switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
3024        case BFPT_DWORD15_QER_NONE:
3025                params->quad_enable = NULL;
3026                break;
3027
3028        case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
3029        case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
3030                params->quad_enable = spansion_no_read_cr_quad_enable;
3031                break;
3032
3033        case BFPT_DWORD15_QER_SR1_BIT6:
3034                params->quad_enable = macronix_quad_enable;
3035                break;
3036
3037        case BFPT_DWORD15_QER_SR2_BIT7:
3038                params->quad_enable = sr2_bit7_quad_enable;
3039                break;
3040
3041        case BFPT_DWORD15_QER_SR2_BIT1:
3042                params->quad_enable = spansion_read_cr_quad_enable;
3043                break;
3044
3045        default:
3046                return -EINVAL;
3047        }
3048
3049        return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
3050}
3051
3052#define SMPT_CMD_ADDRESS_LEN_MASK               GENMASK(23, 22)
3053#define SMPT_CMD_ADDRESS_LEN_0                  (0x0UL << 22)
3054#define SMPT_CMD_ADDRESS_LEN_3                  (0x1UL << 22)
3055#define SMPT_CMD_ADDRESS_LEN_4                  (0x2UL << 22)
3056#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT        (0x3UL << 22)
3057
3058#define SMPT_CMD_READ_DUMMY_MASK                GENMASK(19, 16)
3059#define SMPT_CMD_READ_DUMMY_SHIFT               16
3060#define SMPT_CMD_READ_DUMMY(_cmd) \
3061        (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
3062#define SMPT_CMD_READ_DUMMY_IS_VARIABLE         0xfUL
3063
3064#define SMPT_CMD_READ_DATA_MASK                 GENMASK(31, 24)
3065#define SMPT_CMD_READ_DATA_SHIFT                24
3066#define SMPT_CMD_READ_DATA(_cmd) \
3067        (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
3068
3069#define SMPT_CMD_OPCODE_MASK                    GENMASK(15, 8)
3070#define SMPT_CMD_OPCODE_SHIFT                   8
3071#define SMPT_CMD_OPCODE(_cmd) \
3072        (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
3073
3074#define SMPT_MAP_REGION_COUNT_MASK              GENMASK(23, 16)
3075#define SMPT_MAP_REGION_COUNT_SHIFT             16
3076#define SMPT_MAP_REGION_COUNT(_header) \
3077        ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
3078          SMPT_MAP_REGION_COUNT_SHIFT) + 1)
3079
3080#define SMPT_MAP_ID_MASK                        GENMASK(15, 8)
3081#define SMPT_MAP_ID_SHIFT                       8
3082#define SMPT_MAP_ID(_header) \
3083        (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
3084
3085#define SMPT_MAP_REGION_SIZE_MASK               GENMASK(31, 8)
3086#define SMPT_MAP_REGION_SIZE_SHIFT              8
3087#define SMPT_MAP_REGION_SIZE(_region) \
3088        (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
3089           SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
3090
3091#define SMPT_MAP_REGION_ERASE_TYPE_MASK         GENMASK(3, 0)
3092#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
3093        ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
3094
3095#define SMPT_DESC_TYPE_MAP                      BIT(1)
3096#define SMPT_DESC_END                           BIT(0)
3097
3098/**
3099 * spi_nor_smpt_addr_width() - return the address width used in the
3100 *                             configuration detection command.
3101 * @nor:        pointer to a 'struct spi_nor'
3102 * @settings:   configuration detection command descriptor, dword1
3103 */
3104static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
3105{
3106        switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
3107        case SMPT_CMD_ADDRESS_LEN_0:
3108                return 0;
3109        case SMPT_CMD_ADDRESS_LEN_3:
3110                return 3;
3111        case SMPT_CMD_ADDRESS_LEN_4:
3112                return 4;
3113        case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
3114                /* fall through */
3115        default:
3116                return nor->addr_width;
3117        }
3118}
3119
3120/**
3121 * spi_nor_smpt_read_dummy() - return the configuration detection command read
3122 *                             latency, in clock cycles.
3123 * @nor:        pointer to a 'struct spi_nor'
3124 * @settings:   configuration detection command descriptor, dword1
3125 *
3126 * Return: the number of dummy cycles for an SMPT read
3127 */
3128static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
3129{
3130        u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
3131
3132        if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
3133                return nor->read_dummy;
3134        return read_dummy;
3135}
3136
3137/**
3138 * spi_nor_get_map_in_use() - get the configuration map in use
3139 * @nor:        pointer to a 'struct spi_nor'
3140 * @smpt:       pointer to the sector map parameter table
3141 * @smpt_len:   sector map parameter table length
3142 *
3143 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
3144 */
3145static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3146                                         u8 smpt_len)
3147{
3148        const u32 *ret;
3149        u8 *buf;
3150        u32 addr;
3151        int err;
3152        u8 i;
3153        u8 addr_width, read_opcode, read_dummy;
3154        u8 read_data_mask, map_id;
3155
3156        /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3157        buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3158        if (!buf)
3159                return ERR_PTR(-ENOMEM);
3160
3161        addr_width = nor->addr_width;
3162        read_dummy = nor->read_dummy;
3163        read_opcode = nor->read_opcode;
3164
3165        map_id = 0;
3166        /* Determine if there are any optional Detection Command Descriptors */
3167        for (i = 0; i < smpt_len; i += 2) {
3168                if (smpt[i] & SMPT_DESC_TYPE_MAP)
3169                        break;
3170
3171                read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3172                nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3173                nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3174                nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3175                addr = smpt[i + 1];
3176
3177                err = spi_nor_read_raw(nor, addr, 1, buf);
3178                if (err) {
3179                        ret = ERR_PTR(err);
3180                        goto out;
3181                }
3182
3183                /*
3184                 * Build an index value that is used to select the Sector Map
3185                 * Configuration that is currently in use.
3186                 */
3187                map_id = map_id << 1 | !!(*buf & read_data_mask);
3188        }
3189
3190        /*
3191         * If command descriptors are provided, they always precede map
3192         * descriptors in the table. There is no need to start the iteration
3193         * over smpt array all over again.
3194         *
3195         * Find the matching configuration map.
3196         */
3197        ret = ERR_PTR(-EINVAL);
3198        while (i < smpt_len) {
3199                if (SMPT_MAP_ID(smpt[i]) == map_id) {
3200                        ret = smpt + i;
3201                        break;
3202                }
3203
3204                /*
3205                 * If there are no more configuration map descriptors and no
3206                 * configuration ID matched the configuration identifier, the
3207                 * sector address map is unknown.
3208                 */
3209                if (smpt[i] & SMPT_DESC_END)
3210                        break;
3211
3212                /* increment the table index to the next map */
3213                i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3214        }
3215
3216        /* fall through */
3217out:
3218        kfree(buf);
3219        nor->addr_width = addr_width;
3220        nor->read_dummy = read_dummy;
3221        nor->read_opcode = read_opcode;
3222        return ret;
3223}
3224
3225/**
3226 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
3227 * @region:     pointer to a structure that describes a SPI NOR erase region
3228 * @erase:      pointer to a structure that describes a SPI NOR erase type
3229 * @erase_type: erase type bitmask
3230 */
3231static void
3232spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3233                             const struct spi_nor_erase_type *erase,
3234                             const u8 erase_type)
3235{
3236        int i;
3237
3238        for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3239                if (!(erase_type & BIT(i)))
3240                        continue;
3241                if (region->size & erase[i].size_mask) {
3242                        spi_nor_region_mark_overlay(region);
3243                        return;
3244                }
3245        }
3246}
3247
3248/**
3249 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
3250 * @nor:        pointer to a 'struct spi_nor'
3251 * @smpt:       pointer to the sector map parameter table
3252 *
3253 * Return: 0 on success, -errno otherwise.
3254 */
3255static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3256                                              const u32 *smpt)
3257{
3258        struct spi_nor_erase_map *map = &nor->erase_map;
3259        struct spi_nor_erase_type *erase = map->erase_type;
3260        struct spi_nor_erase_region *region;
3261        u64 offset;
3262        u32 region_count;
3263        int i, j;
3264        u8 uniform_erase_type, save_uniform_erase_type;
3265        u8 erase_type, regions_erase_type;
3266
3267        region_count = SMPT_MAP_REGION_COUNT(*smpt);
3268        /*
3269         * The regions will be freed when the driver detaches from the
3270         * device.
3271         */
3272        region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3273                              GFP_KERNEL);
3274        if (!region)
3275                return -ENOMEM;
3276        map->regions = region;
3277
3278        uniform_erase_type = 0xff;
3279        regions_erase_type = 0;
3280        offset = 0;
3281        /* Populate regions. */
3282        for (i = 0; i < region_count; i++) {
3283                j = i + 1; /* index for the region dword */
3284                region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3285                erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3286                region[i].offset = offset | erase_type;
3287
3288                spi_nor_region_check_overlay(&region[i], erase, erase_type);
3289
3290                /*
3291                 * Save the erase types that are supported in all regions and
3292                 * can erase the entire flash memory.
3293                 */
3294                uniform_erase_type &= erase_type;
3295
3296                /*
3297                 * regions_erase_type mask will indicate all the erase types
3298                 * supported in this configuration map.
3299                 */
3300                regions_erase_type |= erase_type;
3301
3302                offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3303                         region[i].size;
3304        }
3305
3306        save_uniform_erase_type = map->uniform_erase_type;
3307        map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3308                                                          uniform_erase_type);
3309
3310        if (!regions_erase_type) {
3311                /*
3312                 * Roll back to the previous uniform_erase_type mask, SMPT is
3313                 * broken.
3314                 */
3315                map->uniform_erase_type = save_uniform_erase_type;
3316                return -EINVAL;
3317        }
3318
3319        /*
3320         * BFPT advertises all the erase types supported by all the possible
3321         * map configurations. Mask out the erase types that are not supported
3322         * by the current map configuration.
3323         */
3324        for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3325                if (!(regions_erase_type & BIT(erase[i].idx)))
3326                        spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3327
3328        spi_nor_region_mark_end(&region[i - 1]);
3329
3330        return 0;
3331}
3332
3333/**
3334 * spi_nor_parse_smpt() - parse Sector Map Parameter Table
3335 * @nor:                pointer to a 'struct spi_nor'
3336 * @smpt_header:        sector map parameter table header
3337 *
3338 * This table is optional, but when available, we parse it to identify the
3339 * location and size of sectors within the main data array of the flash memory
3340 * device and to identify which Erase Types are supported by each sector.
3341 *
3342 * Return: 0 on success, -errno otherwise.
3343 */
3344static int spi_nor_parse_smpt(struct spi_nor *nor,
3345                              const struct sfdp_parameter_header *smpt_header)
3346{
3347        const u32 *sector_map;
3348        u32 *smpt;
3349        size_t len;
3350        u32 addr;
3351        int i, ret;
3352
3353        /* Read the Sector Map Parameter Table. */
3354        len = smpt_header->length * sizeof(*smpt);
3355        smpt = kmalloc(len, GFP_KERNEL);
3356        if (!smpt)
3357                return -ENOMEM;
3358
3359        addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3360        ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3361        if (ret)
3362                goto out;
3363
3364        /* Fix endianness of the SMPT DWORDs. */
3365        for (i = 0; i < smpt_header->length; i++)
3366                smpt[i] = le32_to_cpu(smpt[i]);
3367
3368        sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3369        if (IS_ERR(sector_map)) {
3370                ret = PTR_ERR(sector_map);
3371                goto out;
3372        }
3373
3374        ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
3375        if (ret)
3376                goto out;
3377
3378        spi_nor_regions_sort_erase_types(&nor->erase_map);
3379        /* fall through */
3380out:
3381        kfree(smpt);
3382        return ret;
3383}
3384
3385#define SFDP_4BAIT_DWORD_MAX    2
3386
3387struct sfdp_4bait {
3388        /* The hardware capability. */
3389        u32             hwcaps;
3390
3391        /*
3392         * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
3393         * the associated 4-byte address op code is supported.
3394         */
3395        u32             supported_bit;
3396};
3397
3398/**
3399 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
3400 * @nor:                pointer to a 'struct spi_nor'.
3401 * @param_header:       pointer to the 'struct sfdp_parameter_header' describing
3402 *                      the 4-Byte Address Instruction Table length and version.
3403 * @params:             pointer to the 'struct spi_nor_flash_parameter' to be.
3404 *
3405 * Return: 0 on success, -errno otherwise.
3406 */
3407static int spi_nor_parse_4bait(struct spi_nor *nor,
3408                               const struct sfdp_parameter_header *param_header,
3409                               struct spi_nor_flash_parameter *params)
3410{
3411        static const struct sfdp_4bait reads[] = {
3412                { SNOR_HWCAPS_READ,             BIT(0) },
3413                { SNOR_HWCAPS_READ_FAST,        BIT(1) },
3414                { SNOR_HWCAPS_READ_1_1_2,       BIT(2) },
3415                { SNOR_HWCAPS_READ_1_2_2,       BIT(3) },
3416                { SNOR_HWCAPS_READ_1_1_4,       BIT(4) },
3417                { SNOR_HWCAPS_READ_1_4_4,       BIT(5) },
3418                { SNOR_HWCAPS_READ_1_1_1_DTR,   BIT(13) },
3419                { SNOR_HWCAPS_READ_1_2_2_DTR,   BIT(14) },
3420                { SNOR_HWCAPS_READ_1_4_4_DTR,   BIT(15) },
3421        };
3422        static const struct sfdp_4bait programs[] = {
3423                { SNOR_HWCAPS_PP,               BIT(6) },
3424                { SNOR_HWCAPS_PP_1_1_4,         BIT(7) },
3425                { SNOR_HWCAPS_PP_1_4_4,         BIT(8) },
3426        };
3427        static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3428                { 0u /* not used */,            BIT(9) },
3429                { 0u /* not used */,            BIT(10) },
3430                { 0u /* not used */,            BIT(11) },
3431                { 0u /* not used */,            BIT(12) },
3432        };
3433        struct spi_nor_pp_command *params_pp = params->page_programs;
3434        struct spi_nor_erase_map *map = &nor->erase_map;
3435        struct spi_nor_erase_type *erase_type = map->erase_type;
3436        u32 *dwords;
3437        size_t len;
3438        u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3439        int i, ret;
3440
3441        if (param_header->major != SFDP_JESD216_MAJOR ||
3442            param_header->length < SFDP_4BAIT_DWORD_MAX)
3443                return -EINVAL;
3444
3445        /* Read the 4-byte Address Instruction Table. */
3446        len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3447
3448        /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3449        dwords = kmalloc(len, GFP_KERNEL);
3450        if (!dwords)
3451                return -ENOMEM;
3452
3453        addr = SFDP_PARAM_HEADER_PTP(param_header);
3454        ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3455        if (ret)
3456                return ret;
3457
3458        /* Fix endianness of the 4BAIT DWORDs. */
3459        for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3460                dwords[i] = le32_to_cpu(dwords[i]);
3461
3462        /*
3463         * Compute the subset of (Fast) Read commands for which the 4-byte
3464         * version is supported.
3465         */
3466        discard_hwcaps = 0;
3467        read_hwcaps = 0;
3468        for (i = 0; i < ARRAY_SIZE(reads); i++) {
3469                const struct sfdp_4bait *read = &reads[i];
3470
3471                discard_hwcaps |= read->hwcaps;
3472                if ((params->hwcaps.mask & read->hwcaps) &&
3473                    (dwords[0] & read->supported_bit))
3474                        read_hwcaps |= read->hwcaps;
3475        }
3476
3477        /*
3478         * Compute the subset of Page Program commands for which the 4-byte
3479         * version is supported.
3480         */
3481        pp_hwcaps = 0;
3482        for (i = 0; i < ARRAY_SIZE(programs); i++) {
3483                const struct sfdp_4bait *program = &programs[i];
3484
3485                /*
3486                 * The 4 Byte Address Instruction (Optional) Table is the only
3487                 * SFDP table that indicates support for Page Program Commands.
3488                 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
3489                 * authority for specifying Page Program support.
3490                 */
3491                discard_hwcaps |= program->hwcaps;
3492                if (dwords[0] & program->supported_bit)
3493                        pp_hwcaps |= program->hwcaps;
3494        }
3495
3496        /*
3497         * Compute the subset of Sector Erase commands for which the 4-byte
3498         * version is supported.
3499         */
3500        erase_mask = 0;
3501        for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3502                const struct sfdp_4bait *erase = &erases[i];
3503
3504                if (dwords[0] & erase->supported_bit)
3505                        erase_mask |= BIT(i);
3506        }
3507
3508        /* Replicate the sort done for the map's erase types in BFPT. */
3509        erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3510
3511        /*
3512         * We need at least one 4-byte op code per read, program and erase
3513         * operation; the .read(), .write() and .erase() hooks share the
3514         * nor->addr_width value.
3515         */
3516        if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3517                goto out;
3518
3519        /*
3520         * Discard all operations from the 4-byte instruction set which are
3521         * not supported by this memory.
3522         */
3523        params->hwcaps.mask &= ~discard_hwcaps;
3524        params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3525
3526        /* Use the 4-byte address instruction set. */
3527        for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3528                struct spi_nor_read_command *read_cmd = &params->reads[i];
3529
3530                read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
3531        }
3532
3533        /* 4BAIT is the only SFDP table that indicates page program support. */
3534        if (pp_hwcaps & SNOR_HWCAPS_PP)
3535                spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
3536                                        SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
3537        if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
3538                spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
3539                                        SPINOR_OP_PP_1_1_4_4B,
3540                                        SNOR_PROTO_1_1_4);
3541        if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
3542                spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
3543                                        SPINOR_OP_PP_1_4_4_4B,
3544                                        SNOR_PROTO_1_4_4);
3545
3546        for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3547                if (erase_mask & BIT(i))
3548                        erase_type[i].opcode = (dwords[1] >>
3549                                                erase_type[i].idx * 8) & 0xFF;
3550                else
3551                        spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
3552        }
3553
3554        /*
3555         * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
3556         * later because we already did the conversion to 4byte opcodes. Also,
3557         * this latest function implements a legacy quirk for the erase size of
3558         * Spansion memory. However this quirk is no longer needed with new
3559         * SFDP compliant memories.
3560         */
3561        nor->addr_width = 4;
3562        nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
3563
3564        /* fall through */
3565out:
3566        kfree(dwords);
3567        return ret;
3568}
3569
3570/**
3571 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
3572 * @nor:                pointer to a 'struct spi_nor'
3573 * @params:             pointer to the 'struct spi_nor_flash_parameter' to be
3574 *                      filled
3575 *
3576 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
3577 * specification. This is a standard which tends to supported by almost all
3578 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
3579 * runtime the main parameters needed to perform basic SPI flash operations such
3580 * as Fast Read, Page Program or Sector Erase commands.
3581 *
3582 * Return: 0 on success, -errno otherwise.
3583 */
3584static int spi_nor_parse_sfdp(struct spi_nor *nor,
3585                              struct spi_nor_flash_parameter *params)
3586{
3587        const struct sfdp_parameter_header *param_header, *bfpt_header;
3588        struct sfdp_parameter_header *param_headers = NULL;
3589        struct sfdp_header header;
3590        struct device *dev = nor->dev;
3591        size_t psize;
3592        int i, err;
3593
3594        /* Get the SFDP header. */
3595        err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
3596        if (err < 0)
3597                return err;
3598
3599        /* Check the SFDP header version. */
3600        if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
3601            header.major != SFDP_JESD216_MAJOR)
3602                return -EINVAL;
3603
3604        /*
3605         * Verify that the first and only mandatory parameter header is a
3606         * Basic Flash Parameter Table header as specified in JESD216.
3607         */
3608        bfpt_header = &header.bfpt_header;
3609        if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
3610            bfpt_header->major != SFDP_JESD216_MAJOR)
3611                return -EINVAL;
3612
3613        /*
3614         * Allocate memory then read all parameter headers with a single
3615         * Read SFDP command. These parameter headers will actually be parsed
3616         * twice: a first time to get the latest revision of the basic flash
3617         * parameter table, then a second time to handle the supported optional
3618         * tables.
3619         * Hence we read the parameter headers once for all to reduce the
3620         * processing time. Also we use kmalloc() instead of devm_kmalloc()
3621         * because we don't need to keep these parameter headers: the allocated
3622         * memory is always released with kfree() before exiting this function.
3623         */
3624        if (header.nph) {
3625                psize = header.nph * sizeof(*param_headers);
3626
3627                param_headers = kmalloc(psize, GFP_KERNEL);
3628                if (!param_headers)
3629                        return -ENOMEM;
3630
3631                err = spi_nor_read_sfdp(nor, sizeof(header),
3632                                        psize, param_headers);
3633                if (err < 0) {
3634                        dev_err(dev, "failed to read SFDP parameter headers\n");
3635                        goto exit;
3636                }
3637        }
3638
3639        /*
3640         * Check other parameter headers to get the latest revision of
3641         * the basic flash parameter table.
3642         */
3643        for (i = 0; i < header.nph; i++) {
3644                param_header = &param_headers[i];
3645
3646                if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
3647                    param_header->major == SFDP_JESD216_MAJOR &&
3648                    (param_header->minor > bfpt_header->minor ||
3649                     (param_header->minor == bfpt_header->minor &&
3650                      param_header->length > bfpt_header->length)))
3651                        bfpt_header = param_header;
3652        }
3653
3654        err = spi_nor_parse_bfpt(nor, bfpt_header, params);
3655        if (err)
3656                goto exit;
3657
3658        /* Parse optional parameter tables. */
3659        for (i = 0; i < header.nph; i++) {
3660                param_header = &param_headers[i];
3661
3662                switch (SFDP_PARAM_HEADER_ID(param_header)) {
3663                case SFDP_SECTOR_MAP_ID:
3664                        err = spi_nor_parse_smpt(nor, param_header);
3665                        break;
3666
3667                case SFDP_4BAIT_ID:
3668                        err = spi_nor_parse_4bait(nor, param_header, params);
3669                        break;
3670
3671                default:
3672                        break;
3673                }
3674
3675                if (err) {
3676                        dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3677                                 SFDP_PARAM_HEADER_ID(param_header));
3678                        /*
3679                         * Let's not drop all information we extracted so far
3680                         * if optional table parsers fail. In case of failing,
3681                         * each optional parser is responsible to roll back to
3682                         * the previously known spi_nor data.
3683                         */
3684                        err = 0;
3685                }
3686        }
3687
3688exit:
3689        kfree(param_headers);
3690        return err;
3691}
3692
3693static int spi_nor_init_params(struct spi_nor *nor,
3694                               struct spi_nor_flash_parameter *params)
3695{
3696        struct spi_nor_erase_map *map = &nor->erase_map;
3697        const struct flash_info *info = nor->info;
3698        u8 i, erase_mask;
3699
3700        /* Set legacy flash parameters as default. */
3701        memset(params, 0, sizeof(*params));
3702
3703        /* Set SPI NOR sizes. */
3704        params->size = (u64)info->sector_size * info->n_sectors;
3705        params->page_size = info->page_size;
3706
3707        /* (Fast) Read settings. */
3708        params->hwcaps.mask |= SNOR_HWCAPS_READ;
3709        spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
3710                                  0, 0, SPINOR_OP_READ,
3711                                  SNOR_PROTO_1_1_1);
3712
3713        if (!(info->flags & SPI_NOR_NO_FR)) {
3714                params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
3715                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
3716                                          0, 8, SPINOR_OP_READ_FAST,
3717                                          SNOR_PROTO_1_1_1);
3718        }
3719
3720        if (info->flags & SPI_NOR_DUAL_READ) {
3721                params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
3722                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
3723                                          0, 8, SPINOR_OP_READ_1_1_2,
3724                                          SNOR_PROTO_1_1_2);
3725        }
3726
3727        if (info->flags & SPI_NOR_QUAD_READ) {
3728                params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
3729                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
3730                                          0, 8, SPINOR_OP_READ_1_1_4,
3731                                          SNOR_PROTO_1_1_4);
3732        }
3733
3734        if (info->flags & SPI_NOR_OCTAL_READ) {
3735                params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
3736                spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
3737                                          0, 8, SPINOR_OP_READ_1_1_8,
3738                                          SNOR_PROTO_1_1_8);
3739        }
3740
3741        /* Page Program settings. */
3742        params->hwcaps.mask |= SNOR_HWCAPS_PP;
3743        spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
3744                                SPINOR_OP_PP, SNOR_PROTO_1_1_1);
3745
3746        /*
3747         * Sector Erase settings. Sort Erase Types in ascending order, with the
3748         * smallest erase size starting at BIT(0).
3749         */
3750        erase_mask = 0;
3751        i = 0;
3752        if (info->flags & SECT_4K_PMC) {
3753                erase_mask |= BIT(i);
3754                spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3755                                       SPINOR_OP_BE_4K_PMC);
3756                i++;
3757        } else if (info->flags & SECT_4K) {
3758                erase_mask |= BIT(i);
3759                spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3760                                       SPINOR_OP_BE_4K);
3761                i++;
3762        }
3763        erase_mask |= BIT(i);
3764        spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
3765                               SPINOR_OP_SE);
3766        spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3767
3768        /* Select the procedure to set the Quad Enable bit. */
3769        if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
3770                                   SNOR_HWCAPS_PP_QUAD)) {
3771                switch (JEDEC_MFR(info)) {
3772                case SNOR_MFR_MACRONIX:
3773                        params->quad_enable = macronix_quad_enable;
3774                        break;
3775
3776                case SNOR_MFR_ST:
3777                case SNOR_MFR_MICRON:
3778                        break;
3779
3780                default:
3781                        /* Kept only for backward compatibility purpose. */
3782                        params->quad_enable = spansion_quad_enable;
3783                        break;
3784                }
3785
3786                /*
3787                 * Some manufacturer like GigaDevice may use different
3788                 * bit to set QE on different memories, so the MFR can't
3789                 * indicate the quad_enable method for this case, we need
3790                 * set it in flash info list.
3791                 */
3792                if (info->quad_enable)
3793                        params->quad_enable = info->quad_enable;
3794        }
3795
3796        if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
3797            !(info->flags & SPI_NOR_SKIP_SFDP)) {
3798                struct spi_nor_flash_parameter sfdp_params;
3799                struct spi_nor_erase_map prev_map;
3800
3801                memcpy(&sfdp_params, params, sizeof(sfdp_params));
3802                memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
3803
3804                if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
3805                        nor->addr_width = 0;
3806                        nor->flags &= ~SNOR_F_4B_OPCODES;
3807                        /* restore previous erase map */
3808                        memcpy(&nor->erase_map, &prev_map,
3809                               sizeof(nor->erase_map));
3810                } else {
3811                        memcpy(params, &sfdp_params, sizeof(*params));
3812                }
3813        }
3814
3815        return 0;
3816}
3817
3818static int spi_nor_select_read(struct spi_nor *nor,
3819                               const struct spi_nor_flash_parameter *params,
3820                               u32 shared_hwcaps)
3821{
3822        int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
3823        const struct spi_nor_read_command *read;
3824
3825        if (best_match < 0)
3826                return -EINVAL;
3827
3828        cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
3829        if (cmd < 0)
3830                return -EINVAL;
3831
3832        read = &params->reads[cmd];
3833        nor->read_opcode = read->opcode;
3834        nor->read_proto = read->proto;
3835
3836        /*
3837         * In the spi-nor framework, we don't need to make the difference
3838         * between mode clock cycles and wait state clock cycles.
3839         * Indeed, the value of the mode clock cycles is used by a QSPI
3840         * flash memory to know whether it should enter or leave its 0-4-4
3841         * (Continuous Read / XIP) mode.
3842         * eXecution In Place is out of the scope of the mtd sub-system.
3843         * Hence we choose to merge both mode and wait state clock cycles
3844         * into the so called dummy clock cycles.
3845         */
3846        nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
3847        return 0;
3848}
3849
3850static int spi_nor_select_pp(struct spi_nor *nor,
3851                             const struct spi_nor_flash_parameter *params,
3852                             u32 shared_hwcaps)
3853{
3854        int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
3855        const struct spi_nor_pp_command *pp;
3856
3857        if (best_match < 0)
3858                return -EINVAL;
3859
3860        cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
3861        if (cmd < 0)
3862                return -EINVAL;
3863
3864        pp = &params->page_programs[cmd];
3865        nor->program_opcode = pp->opcode;
3866        nor->write_proto = pp->proto;
3867        return 0;
3868}
3869
3870/**
3871 * spi_nor_select_uniform_erase() - select optimum uniform erase type
3872 * @map:                the erase map of the SPI NOR
3873 * @wanted_size:        the erase type size to search for. Contains the value of
3874 *                      info->sector_size or of the "small sector" size in case
3875 *                      CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
3876 *
3877 * Once the optimum uniform sector erase command is found, disable all the
3878 * other.
3879 *
3880 * Return: pointer to erase type on success, NULL otherwise.
3881 */
3882static const struct spi_nor_erase_type *
3883spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
3884                             const u32 wanted_size)
3885{
3886        const struct spi_nor_erase_type *tested_erase, *erase = NULL;
3887        int i;
3888        u8 uniform_erase_type = map->uniform_erase_type;
3889
3890        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3891                if (!(uniform_erase_type & BIT(i)))
3892                        continue;
3893
3894                tested_erase = &map->erase_type[i];
3895
3896                /*
3897                 * If the current erase size is the one, stop here:
3898                 * we have found the right uniform Sector Erase command.
3899                 */
3900                if (tested_erase->size == wanted_size) {
3901                        erase = tested_erase;
3902                        break;
3903                }
3904
3905                /*
3906                 * Otherwise, the current erase size is still a valid canditate.
3907                 * Select the biggest valid candidate.
3908                 */
3909                if (!erase && tested_erase->size)
3910                        erase = tested_erase;
3911                        /* keep iterating to find the wanted_size */
3912        }
3913
3914        if (!erase)
3915                return NULL;
3916
3917        /* Disable all other Sector Erase commands. */
3918        map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
3919        map->uniform_erase_type |= BIT(erase - map->erase_type);
3920        return erase;
3921}
3922
3923static int spi_nor_select_erase(struct spi_nor *nor, u32 wanted_size)
3924{
3925        struct spi_nor_erase_map *map = &nor->erase_map;
3926        const struct spi_nor_erase_type *erase = NULL;
3927        struct mtd_info *mtd = &nor->mtd;
3928        int i;
3929
3930        /*
3931         * The previous implementation handling Sector Erase commands assumed
3932         * that the SPI flash memory has an uniform layout then used only one
3933         * of the supported erase sizes for all Sector Erase commands.
3934         * So to be backward compatible, the new implementation also tries to
3935         * manage the SPI flash memory as uniform with a single erase sector
3936         * size, when possible.
3937         */
3938#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
3939        /* prefer "small sector" erase if possible */
3940        wanted_size = 4096u;
3941#endif
3942
3943        if (spi_nor_has_uniform_erase(nor)) {
3944                erase = spi_nor_select_uniform_erase(map, wanted_size);
3945                if (!erase)
3946                        return -EINVAL;
3947                nor->erase_opcode = erase->opcode;
3948                mtd->erasesize = erase->size;
3949                return 0;
3950        }
3951
3952        /*
3953         * For non-uniform SPI flash memory, set mtd->erasesize to the
3954         * maximum erase sector size. No need to set nor->erase_opcode.
3955         */
3956        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3957                if (map->erase_type[i].size) {
3958                        erase = &map->erase_type[i];
3959                        break;
3960                }
3961        }
3962
3963        if (!erase)
3964                return -EINVAL;
3965
3966        mtd->erasesize = erase->size;
3967        return 0;
3968}
3969
3970static int spi_nor_setup(struct spi_nor *nor,
3971                         const struct spi_nor_flash_parameter *params,
3972                         const struct spi_nor_hwcaps *hwcaps)
3973{
3974        u32 ignored_mask, shared_mask;
3975        bool enable_quad_io;
3976        int err;
3977
3978        /*
3979         * Keep only the hardware capabilities supported by both the SPI
3980         * controller and the SPI flash memory.
3981         */
3982        shared_mask = hwcaps->mask & params->hwcaps.mask;
3983
3984        /* SPI n-n-n protocols are not supported yet. */
3985        ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
3986                        SNOR_HWCAPS_READ_4_4_4 |
3987                        SNOR_HWCAPS_READ_8_8_8 |
3988                        SNOR_HWCAPS_PP_4_4_4 |
3989                        SNOR_HWCAPS_PP_8_8_8);
3990        if (shared_mask & ignored_mask) {
3991                dev_dbg(nor->dev,
3992                        "SPI n-n-n protocols are not supported yet.\n");
3993                shared_mask &= ~ignored_mask;
3994        }
3995
3996        /* Select the (Fast) Read command. */
3997        err = spi_nor_select_read(nor, params, shared_mask);
3998        if (err) {
3999                dev_err(nor->dev,
4000                        "can't select read settings supported by both the SPI controller and memory.\n");
4001                return err;
4002        }
4003
4004        /* Select the Page Program command. */
4005        err = spi_nor_select_pp(nor, params, shared_mask);
4006        if (err) {
4007                dev_err(nor->dev,
4008                        "can't select write settings supported by both the SPI controller and memory.\n");
4009                return err;
4010        }
4011
4012        /* Select the Sector Erase command. */
4013        err = spi_nor_select_erase(nor, nor->info->sector_size);
4014        if (err) {
4015                dev_err(nor->dev,
4016                        "can't select erase settings supported by both the SPI controller and memory.\n");
4017                return err;
4018        }
4019
4020        /* Enable Quad I/O if needed. */
4021        enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
4022                          spi_nor_get_protocol_width(nor->write_proto) == 4);
4023        if (enable_quad_io && params->quad_enable)
4024                nor->quad_enable = params->quad_enable;
4025        else
4026                nor->quad_enable = NULL;
4027
4028        return 0;
4029}
4030
4031static int spi_nor_init(struct spi_nor *nor)
4032{
4033        int err;
4034
4035        if (nor->clear_sr_bp) {
4036                if (nor->quad_enable == spansion_quad_enable)
4037                        nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4038
4039                err = nor->clear_sr_bp(nor);
4040                if (err) {
4041                        dev_err(nor->dev,
4042                                "fail to clear block protection bits\n");
4043                        return err;
4044                }
4045        }
4046
4047        if (nor->quad_enable) {
4048                err = nor->quad_enable(nor);
4049                if (err) {
4050                        dev_err(nor->dev, "quad mode not supported\n");
4051                        return err;
4052                }
4053        }
4054
4055        if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
4056                /*
4057                 * If the RESET# pin isn't hooked up properly, or the system
4058                 * otherwise doesn't perform a reset command in the boot
4059                 * sequence, it's impossible to 100% protect against unexpected
4060                 * reboots (e.g., crashes). Warn the user (or hopefully, system
4061                 * designer) that this is bad.
4062                 */
4063                WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
4064                          "enabling reset hack; may not recover from unexpected reboots\n");
4065                set_4byte(nor, true);
4066        }
4067
4068        return 0;
4069}
4070
4071/* mtd resume handler */
4072static void spi_nor_resume(struct mtd_info *mtd)
4073{
4074        struct spi_nor *nor = mtd_to_spi_nor(mtd);
4075        struct device *dev = nor->dev;
4076        int ret;
4077
4078        /* re-initialize the nor chip */
4079        ret = spi_nor_init(nor);
4080        if (ret)
4081                dev_err(dev, "resume() failed\n");
4082}
4083
4084void spi_nor_restore(struct spi_nor *nor)
4085{
4086        /* restore the addressing mode */
4087        if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
4088            nor->flags & SNOR_F_BROKEN_RESET)
4089                set_4byte(nor, false);
4090}
4091EXPORT_SYMBOL_GPL(spi_nor_restore);
4092
4093static const struct flash_info *spi_nor_match_id(const char *name)
4094{
4095        const struct flash_info *id = spi_nor_ids;
4096
4097        while (id->name) {
4098                if (!strcmp(name, id->name))
4099                        return id;
4100                id++;
4101        }
4102        return NULL;
4103}
4104
4105int spi_nor_scan(struct spi_nor *nor, const char *name,
4106                 const struct spi_nor_hwcaps *hwcaps)
4107{
4108        struct spi_nor_flash_parameter params;
4109        const struct flash_info *info = NULL;
4110        struct device *dev = nor->dev;
4111        struct mtd_info *mtd = &nor->mtd;
4112        struct device_node *np = spi_nor_get_flash_node(nor);
4113        int ret;
4114        int i;
4115
4116        ret = spi_nor_check(nor);
4117        if (ret)
4118                return ret;
4119
4120        /* Reset SPI protocol for all commands. */
4121        nor->reg_proto = SNOR_PROTO_1_1_1;
4122        nor->read_proto = SNOR_PROTO_1_1_1;
4123        nor->write_proto = SNOR_PROTO_1_1_1;
4124
4125        if (name)
4126                info = spi_nor_match_id(name);
4127        /* Try to auto-detect if chip name wasn't specified or not found */
4128        if (!info)
4129                info = spi_nor_read_id(nor);
4130        if (IS_ERR_OR_NULL(info))
4131                return -ENOENT;
4132
4133        /*
4134         * If caller has specified name of flash model that can normally be
4135         * detected using JEDEC, let's verify it.
4136         */
4137        if (name && info->id_len) {
4138                const struct flash_info *jinfo;
4139
4140                jinfo = spi_nor_read_id(nor);
4141                if (IS_ERR(jinfo)) {
4142                        return PTR_ERR(jinfo);
4143                } else if (jinfo != info) {
4144                        /*
4145                         * JEDEC knows better, so overwrite platform ID. We
4146                         * can't trust partitions any longer, but we'll let
4147                         * mtd apply them anyway, since some partitions may be
4148                         * marked read-only, and we don't want to lose that
4149                         * information, even if it's not 100% accurate.
4150                         */
4151                        dev_warn(dev, "found %s, expected %s\n",
4152                                 jinfo->name, info->name);
4153                        info = jinfo;
4154                }
4155        }
4156
4157        nor->info = info;
4158
4159        mutex_init(&nor->lock);
4160
4161        /*
4162         * Make sure the XSR_RDY flag is set before calling
4163         * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
4164         * with Atmel spi-nor
4165         */
4166        if (info->flags & SPI_S3AN)
4167                nor->flags |=  SNOR_F_READY_XSR_RDY;
4168
4169        /*
4170         * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
4171         * with the software protection bits set.
4172         */
4173        if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
4174            JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
4175            JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
4176            nor->info->flags & SPI_NOR_HAS_LOCK)
4177                nor->clear_sr_bp = spi_nor_clear_sr_bp;
4178
4179        /* Parse the Serial Flash Discoverable Parameters table. */
4180        ret = spi_nor_init_params(nor, &params);
4181        if (ret)
4182                return ret;
4183
4184        if (!mtd->name)
4185                mtd->name = dev_name(dev);
4186        mtd->priv = nor;
4187        mtd->type = MTD_NORFLASH;
4188        mtd->writesize = 1;
4189        mtd->flags = MTD_CAP_NORFLASH;
4190        mtd->size = params.size;
4191        mtd->_erase = spi_nor_erase;
4192        mtd->_read = spi_nor_read;
4193        mtd->_resume = spi_nor_resume;
4194
4195        /* NOR protection support for STmicro/Micron chips and similar */
4196        if (JEDEC_MFR(info) == SNOR_MFR_ST ||
4197            JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4198            info->flags & SPI_NOR_HAS_LOCK) {
4199                nor->flash_lock = stm_lock;
4200                nor->flash_unlock = stm_unlock;
4201                nor->flash_is_locked = stm_is_locked;
4202        }
4203
4204        if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
4205                mtd->_lock = spi_nor_lock;
4206                mtd->_unlock = spi_nor_unlock;
4207                mtd->_is_locked = spi_nor_is_locked;
4208        }
4209
4210        /* sst nor chips use AAI word program */
4211        if (info->flags & SST_WRITE)
4212                mtd->_write = sst_write;
4213        else
4214                mtd->_write = spi_nor_write;
4215
4216        if (info->flags & USE_FSR)
4217                nor->flags |= SNOR_F_USE_FSR;
4218        if (info->flags & SPI_NOR_HAS_TB)
4219                nor->flags |= SNOR_F_HAS_SR_TB;
4220        if (info->flags & NO_CHIP_ERASE)
4221                nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
4222        if (info->flags & USE_CLSR)
4223                nor->flags |= SNOR_F_USE_CLSR;
4224
4225        if (info->flags & SPI_NOR_NO_ERASE)
4226                mtd->flags |= MTD_NO_ERASE;
4227
4228        mtd->dev.parent = dev;
4229        nor->page_size = params.page_size;
4230        mtd->writebufsize = nor->page_size;
4231
4232        if (np) {
4233                /* If we were instantiated by DT, use it */
4234                if (of_property_read_bool(np, "m25p,fast-read"))
4235                        params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4236                else
4237                        params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4238        } else {
4239                /* If we weren't instantiated by DT, default to fast-read */
4240                params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4241        }
4242
4243        if (of_property_read_bool(np, "broken-flash-reset"))
4244                nor->flags |= SNOR_F_BROKEN_RESET;
4245
4246        /* Some devices cannot do fast-read, no matter what DT tells us */
4247        if (info->flags & SPI_NOR_NO_FR)
4248                params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4249
4250        /*
4251         * Configure the SPI memory:
4252         * - select op codes for (Fast) Read, Page Program and Sector Erase.
4253         * - set the number of dummy cycles (mode cycles + wait states).
4254         * - set the SPI protocols for register and memory accesses.
4255         * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
4256         */
4257        ret = spi_nor_setup(nor, &params, hwcaps);
4258        if (ret)
4259                return ret;
4260
4261        if (nor->addr_width) {
4262                /* already configured from SFDP */
4263        } else if (info->addr_width) {
4264                nor->addr_width = info->addr_width;
4265        } else if (mtd->size > 0x1000000) {
4266                /* enable 4-byte addressing if the device exceeds 16MiB */
4267                nor->addr_width = 4;
4268        } else {
4269                nor->addr_width = 3;
4270        }
4271
4272        if (info->flags & SPI_NOR_4B_OPCODES ||
4273            (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
4274                nor->flags |= SNOR_F_4B_OPCODES;
4275
4276        if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4277            !(nor->flags & SNOR_F_HAS_4BAIT))
4278                spi_nor_set_4byte_opcodes(nor);
4279
4280        if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4281                dev_err(dev, "address width is too large: %u\n",
4282                        nor->addr_width);
4283                return -EINVAL;
4284        }
4285
4286        if (info->flags & SPI_S3AN) {
4287                ret = s3an_nor_scan(nor);
4288                if (ret)
4289                        return ret;
4290        }
4291
4292        /* Send all the required SPI flash commands to initialize device */
4293        ret = spi_nor_init(nor);
4294        if (ret)
4295                return ret;
4296
4297        dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4298                        (long long)mtd->size >> 10);
4299
4300        dev_dbg(dev,
4301                "mtd .name = %s, .size = 0x%llx (%lldMiB), "
4302                ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4303                mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4304                mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4305
4306        if (mtd->numeraseregions)
4307                for (i = 0; i < mtd->numeraseregions; i++)
4308                        dev_dbg(dev,
4309                                "mtd.eraseregions[%d] = { .offset = 0x%llx, "
4310                                ".erasesize = 0x%.8x (%uKiB), "
4311                                ".numblocks = %d }\n",
4312                                i, (long long)mtd->eraseregions[i].offset,
4313                                mtd->eraseregions[i].erasesize,
4314                                mtd->eraseregions[i].erasesize / 1024,
4315                                mtd->eraseregions[i].numblocks);
4316        return 0;
4317}
4318EXPORT_SYMBOL_GPL(spi_nor_scan);
4319
4320MODULE_LICENSE("GPL v2");
4321MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
4322MODULE_AUTHOR("Mike Lavender");
4323MODULE_DESCRIPTION("framework for SPI NOR");
4324