linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <asm/io.h>
  28#include <asm/byteorder.h>
  29
  30#include <linux/errno.h>
  31#include <linux/slab.h>
  32#include <linux/delay.h>
  33#include <linux/interrupt.h>
  34#include <linux/reboot.h>
  35#include <linux/of.h>
  36#include <linux/of_platform.h>
  37#include <linux/mtd/map.h>
  38#include <linux/mtd/mtd.h>
  39#include <linux/mtd/cfi.h>
  40#include <linux/mtd/xip.h>
  41
  42#define AMD_BOOTLOC_BUG
  43#define FORCE_WORD_WRITE 0
  44
  45#define MAX_RETRIES 3
  46
  47#define SST49LF004B             0x0060
  48#define SST49LF040B             0x0050
  49#define SST49LF008A             0x005a
  50#define AT49BV6416              0x00d6
  51
  52/*
  53 * Status Register bit description. Used by flash devices that don't
  54 * support DQ polling (e.g. HyperFlash)
  55 */
  56#define CFI_SR_DRB              BIT(7)
  57#define CFI_SR_ESB              BIT(5)
  58#define CFI_SR_PSB              BIT(4)
  59#define CFI_SR_WBASB            BIT(3)
  60#define CFI_SR_SLSB             BIT(1)
  61
  62static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  63static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  64#if !FORCE_WORD_WRITE
  65static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  66#endif
  67static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  68static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  69static void cfi_amdstd_sync (struct mtd_info *);
  70static int cfi_amdstd_suspend (struct mtd_info *);
  71static void cfi_amdstd_resume (struct mtd_info *);
  72static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  73static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
  74                                         size_t *, struct otp_info *);
  75static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
  76                                         size_t *, struct otp_info *);
  77static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  78static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
  79                                         size_t *, u_char *);
  80static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
  81                                         size_t *, u_char *);
  82static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
  83                                          size_t *, const u_char *);
  84static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
  85
  86static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  87                                  size_t *retlen, const u_char *buf);
  88
  89static void cfi_amdstd_destroy(struct mtd_info *);
  90
  91struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  92static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  93
  94static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  95static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  96#include "fwh_lock.h"
  97
  98static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  99static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 100
 101static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 102static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 103static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 104
 105static struct mtd_chip_driver cfi_amdstd_chipdrv = {
 106        .probe          = NULL, /* Not usable directly */
 107        .destroy        = cfi_amdstd_destroy,
 108        .name           = "cfi_cmdset_0002",
 109        .module         = THIS_MODULE
 110};
 111
 112/*
 113 * Use status register to poll for Erase/write completion when DQ is not
 114 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
 115 * CFI Primary Vendor-Specific Extended Query table 1.5
 116 */
 117static int cfi_use_status_reg(struct cfi_private *cfi)
 118{
 119        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 120        u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
 121
 122        return extp && extp->MinorVersion >= '5' &&
 123                (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
 124}
 125
 126static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
 127                                unsigned long adr)
 128{
 129        struct cfi_private *cfi = map->fldrv_priv;
 130        map_word status;
 131
 132        if (!cfi_use_status_reg(cfi))
 133                return 0;
 134
 135        cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 136                         cfi->device_type, NULL);
 137        status = map_read(map, adr);
 138
 139        /* The error bits are invalid while the chip's busy */
 140        if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
 141                return 0;
 142
 143        if (map_word_bitsset(map, status, CMD(0x3a))) {
 144                unsigned long chipstatus = MERGESTATUS(status);
 145
 146                if (chipstatus & CFI_SR_ESB)
 147                        pr_err("%s erase operation failed, status %lx\n",
 148                               map->name, chipstatus);
 149                if (chipstatus & CFI_SR_PSB)
 150                        pr_err("%s program operation failed, status %lx\n",
 151                               map->name, chipstatus);
 152                if (chipstatus & CFI_SR_WBASB)
 153                        pr_err("%s buffer program command aborted, status %lx\n",
 154                               map->name, chipstatus);
 155                if (chipstatus & CFI_SR_SLSB)
 156                        pr_err("%s sector write protected, status %lx\n",
 157                               map->name, chipstatus);
 158
 159                /* Erase/Program status bits are set on the operation failure */
 160                if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
 161                        return 1;
 162        }
 163        return 0;
 164}
 165
 166/* #define DEBUG_CFI_FEATURES */
 167
 168
 169#ifdef DEBUG_CFI_FEATURES
 170static void cfi_tell_features(struct cfi_pri_amdstd *extp)
 171{
 172        const char* erase_suspend[3] = {
 173                "Not supported", "Read only", "Read/write"
 174        };
 175        const char* top_bottom[6] = {
 176                "No WP", "8x8KiB sectors at top & bottom, no WP",
 177                "Bottom boot", "Top boot",
 178                "Uniform, Bottom WP", "Uniform, Top WP"
 179        };
 180
 181        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 182        printk("  Address sensitive unlock: %s\n",
 183               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 184
 185        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 186                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 187        else
 188                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 189
 190        if (extp->BlkProt == 0)
 191                printk("  Block protection: Not supported\n");
 192        else
 193                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 194
 195
 196        printk("  Temporary block unprotect: %s\n",
 197               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 198        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 199        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 200        printk("  Burst mode: %s\n",
 201               extp->BurstMode ? "Supported" : "Not supported");
 202        if (extp->PageMode == 0)
 203                printk("  Page mode: Not supported\n");
 204        else
 205                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 206
 207        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 208               extp->VppMin >> 4, extp->VppMin & 0xf);
 209        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 210               extp->VppMax >> 4, extp->VppMax & 0xf);
 211
 212        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 213                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 214        else
 215                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 216}
 217#endif
 218
 219#ifdef AMD_BOOTLOC_BUG
 220/* Wheee. Bring me the head of someone at AMD. */
 221static void fixup_amd_bootblock(struct mtd_info *mtd)
 222{
 223        struct map_info *map = mtd->priv;
 224        struct cfi_private *cfi = map->fldrv_priv;
 225        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 226        __u8 major = extp->MajorVersion;
 227        __u8 minor = extp->MinorVersion;
 228
 229        if (((major << 8) | minor) < 0x3131) {
 230                /* CFI version 1.0 => don't trust bootloc */
 231
 232                pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 233                        map->name, cfi->mfr, cfi->id);
 234
 235                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 236                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 237                 * These were badly detected as they have the 0x80 bit set
 238                 * so treat them as a special case.
 239                 */
 240                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 241
 242                        /* Macronix added CFI to their 2nd generation
 243                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 244                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 245                         * has CFI.
 246                         *
 247                         * Therefore also check the manufacturer.
 248                         * This reduces the risk of false detection due to
 249                         * the 8-bit device ID.
 250                         */
 251                        (cfi->mfr == CFI_MFR_MACRONIX)) {
 252                        pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 253                                " detected\n", map->name);
 254                        extp->TopBottom = 2;    /* bottom boot */
 255                } else
 256                if (cfi->id & 0x80) {
 257                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 258                        extp->TopBottom = 3;    /* top boot */
 259                } else {
 260                        extp->TopBottom = 2;    /* bottom boot */
 261                }
 262
 263                pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 264                        " deduced %s from Device ID\n", map->name, major, minor,
 265                        extp->TopBottom == 2 ? "bottom" : "top");
 266        }
 267}
 268#endif
 269
 270#if !FORCE_WORD_WRITE
 271static void fixup_use_write_buffers(struct mtd_info *mtd)
 272{
 273        struct map_info *map = mtd->priv;
 274        struct cfi_private *cfi = map->fldrv_priv;
 275
 276        if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
 277                return;
 278
 279        if (cfi->cfiq->BufWriteTimeoutTyp) {
 280                pr_debug("Using buffer write method\n");
 281                mtd->_write = cfi_amdstd_write_buffers;
 282        }
 283}
 284#endif /* !FORCE_WORD_WRITE */
 285
 286/* Atmel chips don't use the same PRI format as AMD chips */
 287static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 288{
 289        struct map_info *map = mtd->priv;
 290        struct cfi_private *cfi = map->fldrv_priv;
 291        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 292        struct cfi_pri_atmel atmel_pri;
 293
 294        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 295        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 296
 297        if (atmel_pri.Features & 0x02)
 298                extp->EraseSuspend = 2;
 299
 300        /* Some chips got it backwards... */
 301        if (cfi->id == AT49BV6416) {
 302                if (atmel_pri.BottomBoot)
 303                        extp->TopBottom = 3;
 304                else
 305                        extp->TopBottom = 2;
 306        } else {
 307                if (atmel_pri.BottomBoot)
 308                        extp->TopBottom = 2;
 309                else
 310                        extp->TopBottom = 3;
 311        }
 312
 313        /* burst write mode not supported */
 314        cfi->cfiq->BufWriteTimeoutTyp = 0;
 315        cfi->cfiq->BufWriteTimeoutMax = 0;
 316}
 317
 318static void fixup_use_secsi(struct mtd_info *mtd)
 319{
 320        /* Setup for chips with a secsi area */
 321        mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
 322        mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 323}
 324
 325static void fixup_use_erase_chip(struct mtd_info *mtd)
 326{
 327        struct map_info *map = mtd->priv;
 328        struct cfi_private *cfi = map->fldrv_priv;
 329        if ((cfi->cfiq->NumEraseRegions == 1) &&
 330                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 331                mtd->_erase = cfi_amdstd_erase_chip;
 332        }
 333
 334}
 335
 336/*
 337 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 338 * locked by default.
 339 */
 340static void fixup_use_atmel_lock(struct mtd_info *mtd)
 341{
 342        mtd->_lock = cfi_atmel_lock;
 343        mtd->_unlock = cfi_atmel_unlock;
 344        mtd->flags |= MTD_POWERUP_LOCK;
 345}
 346
 347static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 348{
 349        struct map_info *map = mtd->priv;
 350        struct cfi_private *cfi = map->fldrv_priv;
 351
 352        /*
 353         * These flashes report two separate eraseblock regions based on the
 354         * sector_erase-size and block_erase-size, although they both operate on the
 355         * same memory. This is not allowed according to CFI, so we just pick the
 356         * sector_erase-size.
 357         */
 358        cfi->cfiq->NumEraseRegions = 1;
 359}
 360
 361static void fixup_sst39vf(struct mtd_info *mtd)
 362{
 363        struct map_info *map = mtd->priv;
 364        struct cfi_private *cfi = map->fldrv_priv;
 365
 366        fixup_old_sst_eraseregion(mtd);
 367
 368        cfi->addr_unlock1 = 0x5555;
 369        cfi->addr_unlock2 = 0x2AAA;
 370}
 371
 372static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 373{
 374        struct map_info *map = mtd->priv;
 375        struct cfi_private *cfi = map->fldrv_priv;
 376
 377        fixup_old_sst_eraseregion(mtd);
 378
 379        cfi->addr_unlock1 = 0x555;
 380        cfi->addr_unlock2 = 0x2AA;
 381
 382        cfi->sector_erase_cmd = CMD(0x50);
 383}
 384
 385static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 386{
 387        struct map_info *map = mtd->priv;
 388        struct cfi_private *cfi = map->fldrv_priv;
 389
 390        fixup_sst39vf_rev_b(mtd);
 391
 392        /*
 393         * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 394         * it should report a size of 8KBytes (0x0020*256).
 395         */
 396        cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 397        pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
 398                mtd->name);
 399}
 400
 401static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 402{
 403        struct map_info *map = mtd->priv;
 404        struct cfi_private *cfi = map->fldrv_priv;
 405
 406        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 407                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 408                pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
 409                        mtd->name);
 410        }
 411}
 412
 413static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 414{
 415        struct map_info *map = mtd->priv;
 416        struct cfi_private *cfi = map->fldrv_priv;
 417
 418        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 419                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 420                pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
 421                        mtd->name);
 422        }
 423}
 424
 425static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
 426{
 427        struct map_info *map = mtd->priv;
 428        struct cfi_private *cfi = map->fldrv_priv;
 429
 430        /*
 431         *  S29NS512P flash uses more than 8bits to report number of sectors,
 432         * which is not permitted by CFI.
 433         */
 434        cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
 435        pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
 436                mtd->name);
 437}
 438
 439/* Used to fix CFI-Tables of chips without Extended Query Tables */
 440static struct cfi_fixup cfi_nopri_fixup_table[] = {
 441        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 442        { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 443        { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 444        { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 445        { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 446        { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 447        { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 448        { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 449        { 0, 0, NULL }
 450};
 451
 452static struct cfi_fixup cfi_fixup_table[] = {
 453        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 454#ifdef AMD_BOOTLOC_BUG
 455        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 456        { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 457        { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 458#endif
 459        { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 460        { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 461        { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 462        { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 463        { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 464        { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 465        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 466        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 467        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 468        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 469        { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 470        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 471        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 472        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 473        { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 474#if !FORCE_WORD_WRITE
 475        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 476#endif
 477        { 0, 0, NULL }
 478};
 479static struct cfi_fixup jedec_fixup_table[] = {
 480        { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 481        { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 482        { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 483        { 0, 0, NULL }
 484};
 485
 486static struct cfi_fixup fixup_table[] = {
 487        /* The CFI vendor ids and the JEDEC vendor IDs appear
 488         * to be common.  It is like the devices id's are as
 489         * well.  This table is to pick all cases where
 490         * we know that is the case.
 491         */
 492        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 493        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 494        { 0, 0, NULL }
 495};
 496
 497
 498static void cfi_fixup_major_minor(struct cfi_private *cfi,
 499                                  struct cfi_pri_amdstd *extp)
 500{
 501        if (cfi->mfr == CFI_MFR_SAMSUNG) {
 502                if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 503                    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 504                        /*
 505                         * Samsung K8P2815UQB and K8D6x16UxM chips
 506                         * report major=0 / minor=0.
 507                         * K8D3x16UxC chips report major=3 / minor=3.
 508                         */
 509                        printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 510                               " Extended Query version to 1.%c\n",
 511                               extp->MinorVersion);
 512                        extp->MajorVersion = '1';
 513                }
 514        }
 515
 516        /*
 517         * SST 38VF640x chips report major=0xFF / minor=0xFF.
 518         */
 519        if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 520                extp->MajorVersion = '1';
 521                extp->MinorVersion = '0';
 522        }
 523}
 524
 525static int is_m29ew(struct cfi_private *cfi)
 526{
 527        if (cfi->mfr == CFI_MFR_INTEL &&
 528            ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
 529             (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
 530                return 1;
 531        return 0;
 532}
 533
 534/*
 535 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
 536 * Some revisions of the M29EW suffer from erase suspend hang ups. In
 537 * particular, it can occur when the sequence
 538 * Erase Confirm -> Suspend -> Program -> Resume
 539 * causes a lockup due to internal timing issues. The consequence is that the
 540 * erase cannot be resumed without inserting a dummy command after programming
 541 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
 542 * that writes an F0 command code before the RESUME command.
 543 */
 544static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
 545                                          unsigned long adr)
 546{
 547        struct cfi_private *cfi = map->fldrv_priv;
 548        /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
 549        if (is_m29ew(cfi))
 550                map_write(map, CMD(0xF0), adr);
 551}
 552
 553/*
 554 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
 555 *
 556 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
 557 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
 558 * command is issued after an ERASE RESUME operation without waiting for a
 559 * minimum delay.  The result is that once the ERASE seems to be completed
 560 * (no bits are toggling), the contents of the Flash memory block on which
 561 * the erase was ongoing could be inconsistent with the expected values
 562 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
 563 * values), causing a consequent failure of the ERASE operation.
 564 * The occurrence of this issue could be high, especially when file system
 565 * operations on the Flash are intensive.  As a result, it is recommended
 566 * that a patch be applied.  Intensive file system operations can cause many
 567 * calls to the garbage routine to free Flash space (also by erasing physical
 568 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
 569 * commands can occur.  The problem disappears when a delay is inserted after
 570 * the RESUME command by using the udelay() function available in Linux.
 571 * The DELAY value must be tuned based on the customer's platform.
 572 * The maximum value that fixes the problem in all cases is 500us.
 573 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
 574 * in most cases.
 575 * We have chosen 500µs because this latency is acceptable.
 576 */
 577static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
 578{
 579        /*
 580         * Resolving the Delay After Resume Issue see Micron TN-13-07
 581         * Worst case delay must be 500µs but 30-50µs should be ok as well
 582         */
 583        if (is_m29ew(cfi))
 584                cfi_udelay(500);
 585}
 586
 587struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 588{
 589        struct cfi_private *cfi = map->fldrv_priv;
 590        struct device_node __maybe_unused *np = map->device_node;
 591        struct mtd_info *mtd;
 592        int i;
 593
 594        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 595        if (!mtd)
 596                return NULL;
 597        mtd->priv = map;
 598        mtd->type = MTD_NORFLASH;
 599
 600        /* Fill in the default mtd operations */
 601        mtd->_erase   = cfi_amdstd_erase_varsize;
 602        mtd->_write   = cfi_amdstd_write_words;
 603        mtd->_read    = cfi_amdstd_read;
 604        mtd->_sync    = cfi_amdstd_sync;
 605        mtd->_suspend = cfi_amdstd_suspend;
 606        mtd->_resume  = cfi_amdstd_resume;
 607        mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
 608        mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
 609        mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
 610        mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
 611        mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
 612        mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
 613        mtd->flags   = MTD_CAP_NORFLASH;
 614        mtd->name    = map->name;
 615        mtd->writesize = 1;
 616        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 617
 618        pr_debug("MTD %s(): write buffer size %d\n", __func__,
 619                        mtd->writebufsize);
 620
 621        mtd->_panic_write = cfi_amdstd_panic_write;
 622        mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 623
 624        if (cfi->cfi_mode==CFI_MODE_CFI){
 625                unsigned char bootloc;
 626                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 627                struct cfi_pri_amdstd *extp;
 628
 629                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 630                if (extp) {
 631                        /*
 632                         * It's a real CFI chip, not one for which the probe
 633                         * routine faked a CFI structure.
 634                         */
 635                        cfi_fixup_major_minor(cfi, extp);
 636
 637                        /*
 638                         * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 639                         * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 640                         *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 641                         *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 642                         *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 643                         */
 644                        if (extp->MajorVersion != '1' ||
 645                            (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 646                                printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 647                                       "version %c.%c (%#02x/%#02x).\n",
 648                                       extp->MajorVersion, extp->MinorVersion,
 649                                       extp->MajorVersion, extp->MinorVersion);
 650                                kfree(extp);
 651                                kfree(mtd);
 652                                return NULL;
 653                        }
 654
 655                        printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 656                               extp->MajorVersion, extp->MinorVersion);
 657
 658                        /* Install our own private info structure */
 659                        cfi->cmdset_priv = extp;
 660
 661                        /* Apply cfi device specific fixups */
 662                        cfi_fixup(mtd, cfi_fixup_table);
 663
 664#ifdef DEBUG_CFI_FEATURES
 665                        /* Tell the user about it in lots of lovely detail */
 666                        cfi_tell_features(extp);
 667#endif
 668
 669#ifdef CONFIG_OF
 670                        if (np && of_property_read_bool(
 671                                    np, "use-advanced-sector-protection")
 672                            && extp->BlkProtUnprot == 8) {
 673                                printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
 674                                mtd->_lock = cfi_ppb_lock;
 675                                mtd->_unlock = cfi_ppb_unlock;
 676                                mtd->_is_locked = cfi_ppb_is_locked;
 677                        }
 678#endif
 679
 680                        bootloc = extp->TopBottom;
 681                        if ((bootloc < 2) || (bootloc > 5)) {
 682                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 683                                       "bank location (%d). Assuming bottom.\n",
 684                                       map->name, bootloc);
 685                                bootloc = 2;
 686                        }
 687
 688                        if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 689                                printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 690
 691                                for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 692                                        int j = (cfi->cfiq->NumEraseRegions-1)-i;
 693
 694                                        swap(cfi->cfiq->EraseRegionInfo[i],
 695                                             cfi->cfiq->EraseRegionInfo[j]);
 696                                }
 697                        }
 698                        /* Set the default CFI lock/unlock addresses */
 699                        cfi->addr_unlock1 = 0x555;
 700                        cfi->addr_unlock2 = 0x2aa;
 701                }
 702                cfi_fixup(mtd, cfi_nopri_fixup_table);
 703
 704                if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 705                        kfree(mtd);
 706                        return NULL;
 707                }
 708
 709        } /* CFI mode */
 710        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 711                /* Apply jedec specific fixups */
 712                cfi_fixup(mtd, jedec_fixup_table);
 713        }
 714        /* Apply generic fixups */
 715        cfi_fixup(mtd, fixup_table);
 716
 717        for (i=0; i< cfi->numchips; i++) {
 718                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 719                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 720                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 721                /*
 722                 * First calculate the timeout max according to timeout field
 723                 * of struct cfi_ident that probed from chip's CFI aera, if
 724                 * available. Specify a minimum of 2000us, in case the CFI data
 725                 * is wrong.
 726                 */
 727                if (cfi->cfiq->BufWriteTimeoutTyp &&
 728                    cfi->cfiq->BufWriteTimeoutMax)
 729                        cfi->chips[i].buffer_write_time_max =
 730                                1 << (cfi->cfiq->BufWriteTimeoutTyp +
 731                                      cfi->cfiq->BufWriteTimeoutMax);
 732                else
 733                        cfi->chips[i].buffer_write_time_max = 0;
 734
 735                cfi->chips[i].buffer_write_time_max =
 736                        max(cfi->chips[i].buffer_write_time_max, 2000);
 737
 738                cfi->chips[i].ref_point_counter = 0;
 739                init_waitqueue_head(&(cfi->chips[i].wq));
 740        }
 741
 742        map->fldrv = &cfi_amdstd_chipdrv;
 743
 744        return cfi_amdstd_setup(mtd);
 745}
 746struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 747struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 748EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 749EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 750EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 751
 752static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 753{
 754        struct map_info *map = mtd->priv;
 755        struct cfi_private *cfi = map->fldrv_priv;
 756        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 757        unsigned long offset = 0;
 758        int i,j;
 759
 760        printk(KERN_NOTICE "number of %s chips: %d\n",
 761               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 762        /* Select the correct geometry setup */
 763        mtd->size = devsize * cfi->numchips;
 764
 765        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 766        mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
 767                                          sizeof(struct mtd_erase_region_info),
 768                                          GFP_KERNEL);
 769        if (!mtd->eraseregions)
 770                goto setup_err;
 771
 772        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 773                unsigned long ernum, ersize;
 774                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 775                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 776
 777                if (mtd->erasesize < ersize) {
 778                        mtd->erasesize = ersize;
 779                }
 780                for (j=0; j<cfi->numchips; j++) {
 781                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 782                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 783                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 784                }
 785                offset += (ersize * ernum);
 786        }
 787        if (offset != devsize) {
 788                /* Argh */
 789                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 790                goto setup_err;
 791        }
 792
 793        __module_get(THIS_MODULE);
 794        register_reboot_notifier(&mtd->reboot_notifier);
 795        return mtd;
 796
 797 setup_err:
 798        kfree(mtd->eraseregions);
 799        kfree(mtd);
 800        kfree(cfi->cmdset_priv);
 801        return NULL;
 802}
 803
 804/*
 805 * Return true if the chip is ready.
 806 *
 807 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 808 * non-suspended sector) and is indicated by no toggle bits toggling.
 809 *
 810 * Note that anything more complicated than checking if no bits are toggling
 811 * (including checking DQ5 for an error status) is tricky to get working
 812 * correctly and is therefore not done  (particularly with interleaved chips
 813 * as each chip must be checked independently of the others).
 814 */
 815static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
 816                               unsigned long addr)
 817{
 818        struct cfi_private *cfi = map->fldrv_priv;
 819        map_word d, t;
 820
 821        if (cfi_use_status_reg(cfi)) {
 822                map_word ready = CMD(CFI_SR_DRB);
 823                /*
 824                 * For chips that support status register, check device
 825                 * ready bit
 826                 */
 827                cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 828                                 cfi->device_type, NULL);
 829                d = map_read(map, addr);
 830
 831                return map_word_andequal(map, d, ready, ready);
 832        }
 833
 834        d = map_read(map, addr);
 835        t = map_read(map, addr);
 836
 837        return map_word_equal(map, d, t);
 838}
 839
 840/*
 841 * Return true if the chip is ready and has the correct value.
 842 *
 843 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 844 * non-suspended sector) and it is indicated by no bits toggling.
 845 *
 846 * Error are indicated by toggling bits or bits held with the wrong value,
 847 * or with bits toggling.
 848 *
 849 * Note that anything more complicated than checking if no bits are toggling
 850 * (including checking DQ5 for an error status) is tricky to get working
 851 * correctly and is therefore not done  (particularly with interleaved chips
 852 * as each chip must be checked independently of the others).
 853 *
 854 */
 855static int __xipram chip_good(struct map_info *map, struct flchip *chip,
 856                              unsigned long addr, map_word expected)
 857{
 858        struct cfi_private *cfi = map->fldrv_priv;
 859        map_word oldd, curd;
 860
 861        if (cfi_use_status_reg(cfi)) {
 862                map_word ready = CMD(CFI_SR_DRB);
 863
 864                /*
 865                 * For chips that support status register, check device
 866                 * ready bit
 867                 */
 868                cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 869                                 cfi->device_type, NULL);
 870                curd = map_read(map, addr);
 871
 872                return map_word_andequal(map, curd, ready, ready);
 873        }
 874
 875        oldd = map_read(map, addr);
 876        curd = map_read(map, addr);
 877
 878        return  map_word_equal(map, oldd, curd) &&
 879                map_word_equal(map, curd, expected);
 880}
 881
 882static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 883{
 884        DECLARE_WAITQUEUE(wait, current);
 885        struct cfi_private *cfi = map->fldrv_priv;
 886        unsigned long timeo;
 887        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 888
 889 resettime:
 890        timeo = jiffies + HZ;
 891 retry:
 892        switch (chip->state) {
 893
 894        case FL_STATUS:
 895                for (;;) {
 896                        if (chip_ready(map, chip, adr))
 897                                break;
 898
 899                        if (time_after(jiffies, timeo)) {
 900                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 901                                return -EIO;
 902                        }
 903                        mutex_unlock(&chip->mutex);
 904                        cfi_udelay(1);
 905                        mutex_lock(&chip->mutex);
 906                        /* Someone else might have been playing with it. */
 907                        goto retry;
 908                }
 909                return 0;
 910
 911        case FL_READY:
 912        case FL_CFI_QUERY:
 913        case FL_JEDEC_QUERY:
 914                return 0;
 915
 916        case FL_ERASING:
 917                if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 918                    !(mode == FL_READY || mode == FL_POINT ||
 919                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 920                        goto sleep;
 921
 922                /* Do not allow suspend iff read/write to EB address */
 923                if ((adr & chip->in_progress_block_mask) ==
 924                    chip->in_progress_block_addr)
 925                        goto sleep;
 926
 927                /* Erase suspend */
 928                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 929                 * commands when the erase algorithm isn't in progress. */
 930                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 931                chip->oldstate = FL_ERASING;
 932                chip->state = FL_ERASE_SUSPENDING;
 933                chip->erase_suspended = 1;
 934                for (;;) {
 935                        if (chip_ready(map, chip, adr))
 936                                break;
 937
 938                        if (time_after(jiffies, timeo)) {
 939                                /* Should have suspended the erase by now.
 940                                 * Send an Erase-Resume command as either
 941                                 * there was an error (so leave the erase
 942                                 * routine to recover from it) or we trying to
 943                                 * use the erase-in-progress sector. */
 944                                put_chip(map, chip, adr);
 945                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 946                                return -EIO;
 947                        }
 948
 949                        mutex_unlock(&chip->mutex);
 950                        cfi_udelay(1);
 951                        mutex_lock(&chip->mutex);
 952                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 953                           So we can just loop here. */
 954                }
 955                chip->state = FL_READY;
 956                return 0;
 957
 958        case FL_XIP_WHILE_ERASING:
 959                if (mode != FL_READY && mode != FL_POINT &&
 960                    (!cfip || !(cfip->EraseSuspend&2)))
 961                        goto sleep;
 962                chip->oldstate = chip->state;
 963                chip->state = FL_READY;
 964                return 0;
 965
 966        case FL_SHUTDOWN:
 967                /* The machine is rebooting */
 968                return -EIO;
 969
 970        case FL_POINT:
 971                /* Only if there's no operation suspended... */
 972                if (mode == FL_READY && chip->oldstate == FL_READY)
 973                        return 0;
 974                fallthrough;
 975        default:
 976        sleep:
 977                set_current_state(TASK_UNINTERRUPTIBLE);
 978                add_wait_queue(&chip->wq, &wait);
 979                mutex_unlock(&chip->mutex);
 980                schedule();
 981                remove_wait_queue(&chip->wq, &wait);
 982                mutex_lock(&chip->mutex);
 983                goto resettime;
 984        }
 985}
 986
 987
 988static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 989{
 990        struct cfi_private *cfi = map->fldrv_priv;
 991
 992        switch(chip->oldstate) {
 993        case FL_ERASING:
 994                cfi_fixup_m29ew_erase_suspend(map,
 995                        chip->in_progress_block_addr);
 996                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 997                cfi_fixup_m29ew_delay_after_resume(cfi);
 998                chip->oldstate = FL_READY;
 999                chip->state = FL_ERASING;
1000                break;
1001
1002        case FL_XIP_WHILE_ERASING:
1003                chip->state = chip->oldstate;
1004                chip->oldstate = FL_READY;
1005                break;
1006
1007        case FL_READY:
1008        case FL_STATUS:
1009                break;
1010        default:
1011                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1012        }
1013        wake_up(&chip->wq);
1014}
1015
1016#ifdef CONFIG_MTD_XIP
1017
1018/*
1019 * No interrupt what so ever can be serviced while the flash isn't in array
1020 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1021 * enclosing any code path where the flash is known not to be in array mode.
1022 * And within a XIP disabled code path, only functions marked with __xipram
1023 * may be called and nothing else (it's a good thing to inspect generated
1024 * assembly to make sure inline functions were actually inlined and that gcc
1025 * didn't emit calls to its own support functions). Also configuring MTD CFI
1026 * support to a single buswidth and a single interleave is also recommended.
1027 */
1028
1029static void xip_disable(struct map_info *map, struct flchip *chip,
1030                        unsigned long adr)
1031{
1032        /* TODO: chips with no XIP use should ignore and return */
1033        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1034        local_irq_disable();
1035}
1036
1037static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1038                                unsigned long adr)
1039{
1040        struct cfi_private *cfi = map->fldrv_priv;
1041
1042        if (chip->state != FL_POINT && chip->state != FL_READY) {
1043                map_write(map, CMD(0xf0), adr);
1044                chip->state = FL_READY;
1045        }
1046        (void) map_read(map, adr);
1047        xip_iprefetch();
1048        local_irq_enable();
1049}
1050
1051/*
1052 * When a delay is required for the flash operation to complete, the
1053 * xip_udelay() function is polling for both the given timeout and pending
1054 * (but still masked) hardware interrupts.  Whenever there is an interrupt
1055 * pending then the flash erase operation is suspended, array mode restored
1056 * and interrupts unmasked.  Task scheduling might also happen at that
1057 * point.  The CPU eventually returns from the interrupt or the call to
1058 * schedule() and the suspended flash operation is resumed for the remaining
1059 * of the delay period.
1060 *
1061 * Warning: this function _will_ fool interrupt latency tracing tools.
1062 */
1063
1064static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1065                                unsigned long adr, int usec)
1066{
1067        struct cfi_private *cfi = map->fldrv_priv;
1068        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1069        map_word status, OK = CMD(0x80);
1070        unsigned long suspended, start = xip_currtime();
1071        flstate_t oldstate;
1072
1073        do {
1074                cpu_relax();
1075                if (xip_irqpending() && extp &&
1076                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1077                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1078                        /*
1079                         * Let's suspend the erase operation when supported.
1080                         * Note that we currently don't try to suspend
1081                         * interleaved chips if there is already another
1082                         * operation suspended (imagine what happens
1083                         * when one chip was already done with the current
1084                         * operation while another chip suspended it, then
1085                         * we resume the whole thing at once).  Yes, it
1086                         * can happen!
1087                         */
1088                        map_write(map, CMD(0xb0), adr);
1089                        usec -= xip_elapsed_since(start);
1090                        suspended = xip_currtime();
1091                        do {
1092                                if (xip_elapsed_since(suspended) > 100000) {
1093                                        /*
1094                                         * The chip doesn't want to suspend
1095                                         * after waiting for 100 msecs.
1096                                         * This is a critical error but there
1097                                         * is not much we can do here.
1098                                         */
1099                                        return;
1100                                }
1101                                status = map_read(map, adr);
1102                        } while (!map_word_andequal(map, status, OK, OK));
1103
1104                        /* Suspend succeeded */
1105                        oldstate = chip->state;
1106                        if (!map_word_bitsset(map, status, CMD(0x40)))
1107                                break;
1108                        chip->state = FL_XIP_WHILE_ERASING;
1109                        chip->erase_suspended = 1;
1110                        map_write(map, CMD(0xf0), adr);
1111                        (void) map_read(map, adr);
1112                        xip_iprefetch();
1113                        local_irq_enable();
1114                        mutex_unlock(&chip->mutex);
1115                        xip_iprefetch();
1116                        cond_resched();
1117
1118                        /*
1119                         * We're back.  However someone else might have
1120                         * decided to go write to the chip if we are in
1121                         * a suspended erase state.  If so let's wait
1122                         * until it's done.
1123                         */
1124                        mutex_lock(&chip->mutex);
1125                        while (chip->state != FL_XIP_WHILE_ERASING) {
1126                                DECLARE_WAITQUEUE(wait, current);
1127                                set_current_state(TASK_UNINTERRUPTIBLE);
1128                                add_wait_queue(&chip->wq, &wait);
1129                                mutex_unlock(&chip->mutex);
1130                                schedule();
1131                                remove_wait_queue(&chip->wq, &wait);
1132                                mutex_lock(&chip->mutex);
1133                        }
1134                        /* Disallow XIP again */
1135                        local_irq_disable();
1136
1137                        /* Correct Erase Suspend Hangups for M29EW */
1138                        cfi_fixup_m29ew_erase_suspend(map, adr);
1139                        /* Resume the write or erase operation */
1140                        map_write(map, cfi->sector_erase_cmd, adr);
1141                        chip->state = oldstate;
1142                        start = xip_currtime();
1143                } else if (usec >= 1000000/HZ) {
1144                        /*
1145                         * Try to save on CPU power when waiting delay
1146                         * is at least a system timer tick period.
1147                         * No need to be extremely accurate here.
1148                         */
1149                        xip_cpu_idle();
1150                }
1151                status = map_read(map, adr);
1152        } while (!map_word_andequal(map, status, OK, OK)
1153                 && xip_elapsed_since(start) < usec);
1154}
1155
1156#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1157
1158/*
1159 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1160 * the flash is actively programming or erasing since we have to poll for
1161 * the operation to complete anyway.  We can't do that in a generic way with
1162 * a XIP setup so do it before the actual flash operation in this case
1163 * and stub it out from INVALIDATE_CACHE_UDELAY.
1164 */
1165#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1166        INVALIDATE_CACHED_RANGE(map, from, size)
1167
1168#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1169        UDELAY(map, chip, adr, usec)
1170
1171/*
1172 * Extra notes:
1173 *
1174 * Activating this XIP support changes the way the code works a bit.  For
1175 * example the code to suspend the current process when concurrent access
1176 * happens is never executed because xip_udelay() will always return with the
1177 * same chip state as it was entered with.  This is why there is no care for
1178 * the presence of add_wait_queue() or schedule() calls from within a couple
1179 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1180 * The queueing and scheduling are always happening within xip_udelay().
1181 *
1182 * Similarly, get_chip() and put_chip() just happen to always be executed
1183 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1184 * is in array mode, therefore never executing many cases therein and not
1185 * causing any problem with XIP.
1186 */
1187
1188#else
1189
1190#define xip_disable(map, chip, adr)
1191#define xip_enable(map, chip, adr)
1192#define XIP_INVAL_CACHED_RANGE(x...)
1193
1194#define UDELAY(map, chip, adr, usec)  \
1195do {  \
1196        mutex_unlock(&chip->mutex);  \
1197        cfi_udelay(usec);  \
1198        mutex_lock(&chip->mutex);  \
1199} while (0)
1200
1201#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1202do {  \
1203        mutex_unlock(&chip->mutex);  \
1204        INVALIDATE_CACHED_RANGE(map, adr, len);  \
1205        cfi_udelay(usec);  \
1206        mutex_lock(&chip->mutex);  \
1207} while (0)
1208
1209#endif
1210
1211static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1212{
1213        unsigned long cmd_addr;
1214        struct cfi_private *cfi = map->fldrv_priv;
1215        int ret;
1216
1217        adr += chip->start;
1218
1219        /* Ensure cmd read/writes are aligned. */
1220        cmd_addr = adr & ~(map_bankwidth(map)-1);
1221
1222        mutex_lock(&chip->mutex);
1223        ret = get_chip(map, chip, cmd_addr, FL_READY);
1224        if (ret) {
1225                mutex_unlock(&chip->mutex);
1226                return ret;
1227        }
1228
1229        if (chip->state != FL_POINT && chip->state != FL_READY) {
1230                map_write(map, CMD(0xf0), cmd_addr);
1231                chip->state = FL_READY;
1232        }
1233
1234        map_copy_from(map, buf, adr, len);
1235
1236        put_chip(map, chip, cmd_addr);
1237
1238        mutex_unlock(&chip->mutex);
1239        return 0;
1240}
1241
1242
1243static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1244{
1245        struct map_info *map = mtd->priv;
1246        struct cfi_private *cfi = map->fldrv_priv;
1247        unsigned long ofs;
1248        int chipnum;
1249        int ret = 0;
1250
1251        /* ofs: offset within the first chip that the first read should start */
1252        chipnum = (from >> cfi->chipshift);
1253        ofs = from - (chipnum <<  cfi->chipshift);
1254
1255        while (len) {
1256                unsigned long thislen;
1257
1258                if (chipnum >= cfi->numchips)
1259                        break;
1260
1261                if ((len + ofs -1) >> cfi->chipshift)
1262                        thislen = (1<<cfi->chipshift) - ofs;
1263                else
1264                        thislen = len;
1265
1266                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1267                if (ret)
1268                        break;
1269
1270                *retlen += thislen;
1271                len -= thislen;
1272                buf += thislen;
1273
1274                ofs = 0;
1275                chipnum++;
1276        }
1277        return ret;
1278}
1279
1280typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1281                        loff_t adr, size_t len, u_char *buf, size_t grouplen);
1282
1283static inline void otp_enter(struct map_info *map, struct flchip *chip,
1284                             loff_t adr, size_t len)
1285{
1286        struct cfi_private *cfi = map->fldrv_priv;
1287
1288        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1289                         cfi->device_type, NULL);
1290        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1291                         cfi->device_type, NULL);
1292        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1293                         cfi->device_type, NULL);
1294
1295        INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1296}
1297
1298static inline void otp_exit(struct map_info *map, struct flchip *chip,
1299                            loff_t adr, size_t len)
1300{
1301        struct cfi_private *cfi = map->fldrv_priv;
1302
1303        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1304                         cfi->device_type, NULL);
1305        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1306                         cfi->device_type, NULL);
1307        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1308                         cfi->device_type, NULL);
1309        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1310                         cfi->device_type, NULL);
1311
1312        INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1313}
1314
1315static inline int do_read_secsi_onechip(struct map_info *map,
1316                                        struct flchip *chip, loff_t adr,
1317                                        size_t len, u_char *buf,
1318                                        size_t grouplen)
1319{
1320        DECLARE_WAITQUEUE(wait, current);
1321
1322 retry:
1323        mutex_lock(&chip->mutex);
1324
1325        if (chip->state != FL_READY){
1326                set_current_state(TASK_UNINTERRUPTIBLE);
1327                add_wait_queue(&chip->wq, &wait);
1328
1329                mutex_unlock(&chip->mutex);
1330
1331                schedule();
1332                remove_wait_queue(&chip->wq, &wait);
1333
1334                goto retry;
1335        }
1336
1337        adr += chip->start;
1338
1339        chip->state = FL_READY;
1340
1341        otp_enter(map, chip, adr, len);
1342        map_copy_from(map, buf, adr, len);
1343        otp_exit(map, chip, adr, len);
1344
1345        wake_up(&chip->wq);
1346        mutex_unlock(&chip->mutex);
1347
1348        return 0;
1349}
1350
1351static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1352{
1353        struct map_info *map = mtd->priv;
1354        struct cfi_private *cfi = map->fldrv_priv;
1355        unsigned long ofs;
1356        int chipnum;
1357        int ret = 0;
1358
1359        /* ofs: offset within the first chip that the first read should start */
1360        /* 8 secsi bytes per chip */
1361        chipnum=from>>3;
1362        ofs=from & 7;
1363
1364        while (len) {
1365                unsigned long thislen;
1366
1367                if (chipnum >= cfi->numchips)
1368                        break;
1369
1370                if ((len + ofs -1) >> 3)
1371                        thislen = (1<<3) - ofs;
1372                else
1373                        thislen = len;
1374
1375                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1376                                            thislen, buf, 0);
1377                if (ret)
1378                        break;
1379
1380                *retlen += thislen;
1381                len -= thislen;
1382                buf += thislen;
1383
1384                ofs = 0;
1385                chipnum++;
1386        }
1387        return ret;
1388}
1389
1390static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1391                                     unsigned long adr, map_word datum,
1392                                     int mode);
1393
1394static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1395                        size_t len, u_char *buf, size_t grouplen)
1396{
1397        int ret;
1398        while (len) {
1399                unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1400                int gap = adr - bus_ofs;
1401                int n = min_t(int, len, map_bankwidth(map) - gap);
1402                map_word datum = map_word_ff(map);
1403
1404                if (n != map_bankwidth(map)) {
1405                        /* partial write of a word, load old contents */
1406                        otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1407                        datum = map_read(map, bus_ofs);
1408                        otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1409                }
1410
1411                datum = map_word_load_partial(map, datum, buf, gap, n);
1412                ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1413                if (ret)
1414                        return ret;
1415
1416                adr += n;
1417                buf += n;
1418                len -= n;
1419        }
1420
1421        return 0;
1422}
1423
1424static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1425                       size_t len, u_char *buf, size_t grouplen)
1426{
1427        struct cfi_private *cfi = map->fldrv_priv;
1428        uint8_t lockreg;
1429        unsigned long timeo;
1430        int ret;
1431
1432        /* make sure area matches group boundaries */
1433        if ((adr != 0) || (len != grouplen))
1434                return -EINVAL;
1435
1436        mutex_lock(&chip->mutex);
1437        ret = get_chip(map, chip, chip->start, FL_LOCKING);
1438        if (ret) {
1439                mutex_unlock(&chip->mutex);
1440                return ret;
1441        }
1442        chip->state = FL_LOCKING;
1443
1444        /* Enter lock register command */
1445        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1446                         cfi->device_type, NULL);
1447        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1448                         cfi->device_type, NULL);
1449        cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1450                         cfi->device_type, NULL);
1451
1452        /* read lock register */
1453        lockreg = cfi_read_query(map, 0);
1454
1455        /* set bit 0 to protect extended memory block */
1456        lockreg &= ~0x01;
1457
1458        /* set bit 0 to protect extended memory block */
1459        /* write lock register */
1460        map_write(map, CMD(0xA0), chip->start);
1461        map_write(map, CMD(lockreg), chip->start);
1462
1463        /* wait for chip to become ready */
1464        timeo = jiffies + msecs_to_jiffies(2);
1465        for (;;) {
1466                if (chip_ready(map, chip, adr))
1467                        break;
1468
1469                if (time_after(jiffies, timeo)) {
1470                        pr_err("Waiting for chip to be ready timed out.\n");
1471                        ret = -EIO;
1472                        break;
1473                }
1474                UDELAY(map, chip, 0, 1);
1475        }
1476
1477        /* exit protection commands */
1478        map_write(map, CMD(0x90), chip->start);
1479        map_write(map, CMD(0x00), chip->start);
1480
1481        chip->state = FL_READY;
1482        put_chip(map, chip, chip->start);
1483        mutex_unlock(&chip->mutex);
1484
1485        return ret;
1486}
1487
1488static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1489                               size_t *retlen, u_char *buf,
1490                               otp_op_t action, int user_regs)
1491{
1492        struct map_info *map = mtd->priv;
1493        struct cfi_private *cfi = map->fldrv_priv;
1494        int ofs_factor = cfi->interleave * cfi->device_type;
1495        unsigned long base;
1496        int chipnum;
1497        struct flchip *chip;
1498        uint8_t otp, lockreg;
1499        int ret;
1500
1501        size_t user_size, factory_size, otpsize;
1502        loff_t user_offset, factory_offset, otpoffset;
1503        int user_locked = 0, otplocked;
1504
1505        *retlen = 0;
1506
1507        for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1508                chip = &cfi->chips[chipnum];
1509                factory_size = 0;
1510                user_size = 0;
1511
1512                /* Micron M29EW family */
1513                if (is_m29ew(cfi)) {
1514                        base = chip->start;
1515
1516                        /* check whether secsi area is factory locked
1517                           or user lockable */
1518                        mutex_lock(&chip->mutex);
1519                        ret = get_chip(map, chip, base, FL_CFI_QUERY);
1520                        if (ret) {
1521                                mutex_unlock(&chip->mutex);
1522                                return ret;
1523                        }
1524                        cfi_qry_mode_on(base, map, cfi);
1525                        otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1526                        cfi_qry_mode_off(base, map, cfi);
1527                        put_chip(map, chip, base);
1528                        mutex_unlock(&chip->mutex);
1529
1530                        if (otp & 0x80) {
1531                                /* factory locked */
1532                                factory_offset = 0;
1533                                factory_size = 0x100;
1534                        } else {
1535                                /* customer lockable */
1536                                user_offset = 0;
1537                                user_size = 0x100;
1538
1539                                mutex_lock(&chip->mutex);
1540                                ret = get_chip(map, chip, base, FL_LOCKING);
1541                                if (ret) {
1542                                        mutex_unlock(&chip->mutex);
1543                                        return ret;
1544                                }
1545
1546                                /* Enter lock register command */
1547                                cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1548                                                 chip->start, map, cfi,
1549                                                 cfi->device_type, NULL);
1550                                cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1551                                                 chip->start, map, cfi,
1552                                                 cfi->device_type, NULL);
1553                                cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1554                                                 chip->start, map, cfi,
1555                                                 cfi->device_type, NULL);
1556                                /* read lock register */
1557                                lockreg = cfi_read_query(map, 0);
1558                                /* exit protection commands */
1559                                map_write(map, CMD(0x90), chip->start);
1560                                map_write(map, CMD(0x00), chip->start);
1561                                put_chip(map, chip, chip->start);
1562                                mutex_unlock(&chip->mutex);
1563
1564                                user_locked = ((lockreg & 0x01) == 0x00);
1565                        }
1566                }
1567
1568                otpsize = user_regs ? user_size : factory_size;
1569                if (!otpsize)
1570                        continue;
1571                otpoffset = user_regs ? user_offset : factory_offset;
1572                otplocked = user_regs ? user_locked : 1;
1573
1574                if (!action) {
1575                        /* return otpinfo */
1576                        struct otp_info *otpinfo;
1577                        len -= sizeof(*otpinfo);
1578                        if (len <= 0)
1579                                return -ENOSPC;
1580                        otpinfo = (struct otp_info *)buf;
1581                        otpinfo->start = from;
1582                        otpinfo->length = otpsize;
1583                        otpinfo->locked = otplocked;
1584                        buf += sizeof(*otpinfo);
1585                        *retlen += sizeof(*otpinfo);
1586                        from += otpsize;
1587                } else if ((from < otpsize) && (len > 0)) {
1588                        size_t size;
1589                        size = (len < otpsize - from) ? len : otpsize - from;
1590                        ret = action(map, chip, otpoffset + from, size, buf,
1591                                     otpsize);
1592                        if (ret < 0)
1593                                return ret;
1594
1595                        buf += size;
1596                        len -= size;
1597                        *retlen += size;
1598                        from = 0;
1599                } else {
1600                        from -= otpsize;
1601                }
1602        }
1603        return 0;
1604}
1605
1606static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1607                                         size_t *retlen, struct otp_info *buf)
1608{
1609        return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1610                                   NULL, 0);
1611}
1612
1613static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1614                                         size_t *retlen, struct otp_info *buf)
1615{
1616        return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1617                                   NULL, 1);
1618}
1619
1620static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1621                                         size_t len, size_t *retlen,
1622                                         u_char *buf)
1623{
1624        return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1625                                   buf, do_read_secsi_onechip, 0);
1626}
1627
1628static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1629                                         size_t len, size_t *retlen,
1630                                         u_char *buf)
1631{
1632        return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1633                                   buf, do_read_secsi_onechip, 1);
1634}
1635
1636static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1637                                          size_t len, size_t *retlen,
1638                                          const u_char *buf)
1639{
1640        return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
1641                                   do_otp_write, 1);
1642}
1643
1644static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1645                                         size_t len)
1646{
1647        size_t retlen;
1648        return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1649                                   do_otp_lock, 1);
1650}
1651
1652static int __xipram do_write_oneword_once(struct map_info *map,
1653                                          struct flchip *chip,
1654                                          unsigned long adr, map_word datum,
1655                                          int mode, struct cfi_private *cfi)
1656{
1657        unsigned long timeo;
1658        /*
1659         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1660         * have a max write time of a few hundreds usec). However, we should
1661         * use the maximum timeout value given by the chip at probe time
1662         * instead.  Unfortunately, struct flchip does have a field for
1663         * maximum timeout, only for typical which can be far too short
1664         * depending of the conditions.  The ' + 1' is to avoid having a
1665         * timeout of 0 jiffies if HZ is smaller than 1000.
1666         */
1667        unsigned long uWriteTimeout = (HZ / 1000) + 1;
1668        int ret = 0;
1669
1670        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1671        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1672        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1673        map_write(map, datum, adr);
1674        chip->state = mode;
1675
1676        INVALIDATE_CACHE_UDELAY(map, chip,
1677                                adr, map_bankwidth(map),
1678                                chip->word_write_time);
1679
1680        /* See comment above for timeout value. */
1681        timeo = jiffies + uWriteTimeout;
1682        for (;;) {
1683                if (chip->state != mode) {
1684                        /* Someone's suspended the write. Sleep */
1685                        DECLARE_WAITQUEUE(wait, current);
1686
1687                        set_current_state(TASK_UNINTERRUPTIBLE);
1688                        add_wait_queue(&chip->wq, &wait);
1689                        mutex_unlock(&chip->mutex);
1690                        schedule();
1691                        remove_wait_queue(&chip->wq, &wait);
1692                        timeo = jiffies + (HZ / 2); /* FIXME */
1693                        mutex_lock(&chip->mutex);
1694                        continue;
1695                }
1696
1697                /*
1698                 * We check "time_after" and "!chip_good" before checking
1699                 * "chip_good" to avoid the failure due to scheduling.
1700                 */
1701                if (time_after(jiffies, timeo) &&
1702                    !chip_good(map, chip, adr, datum)) {
1703                        xip_enable(map, chip, adr);
1704                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1705                        xip_disable(map, chip, adr);
1706                        ret = -EIO;
1707                        break;
1708                }
1709
1710                if (chip_good(map, chip, adr, datum)) {
1711                        if (cfi_check_err_status(map, chip, adr))
1712                                ret = -EIO;
1713                        break;
1714                }
1715
1716                /* Latency issues. Drop the lock, wait a while and retry */
1717                UDELAY(map, chip, adr, 1);
1718        }
1719
1720        return ret;
1721}
1722
1723static int __xipram do_write_oneword_start(struct map_info *map,
1724                                           struct flchip *chip,
1725                                           unsigned long adr, int mode)
1726{
1727        int ret;
1728
1729        mutex_lock(&chip->mutex);
1730
1731        ret = get_chip(map, chip, adr, mode);
1732        if (ret) {
1733                mutex_unlock(&chip->mutex);
1734                return ret;
1735        }
1736
1737        if (mode == FL_OTP_WRITE)
1738                otp_enter(map, chip, adr, map_bankwidth(map));
1739
1740        return ret;
1741}
1742
1743static void __xipram do_write_oneword_done(struct map_info *map,
1744                                           struct flchip *chip,
1745                                           unsigned long adr, int mode)
1746{
1747        if (mode == FL_OTP_WRITE)
1748                otp_exit(map, chip, adr, map_bankwidth(map));
1749
1750        chip->state = FL_READY;
1751        DISABLE_VPP(map);
1752        put_chip(map, chip, adr);
1753
1754        mutex_unlock(&chip->mutex);
1755}
1756
1757static int __xipram do_write_oneword_retry(struct map_info *map,
1758                                           struct flchip *chip,
1759                                           unsigned long adr, map_word datum,
1760                                           int mode)
1761{
1762        struct cfi_private *cfi = map->fldrv_priv;
1763        int ret = 0;
1764        map_word oldd;
1765        int retry_cnt = 0;
1766
1767        /*
1768         * Check for a NOP for the case when the datum to write is already
1769         * present - it saves time and works around buggy chips that corrupt
1770         * data at other locations when 0xff is written to a location that
1771         * already contains 0xff.
1772         */
1773        oldd = map_read(map, adr);
1774        if (map_word_equal(map, oldd, datum)) {
1775                pr_debug("MTD %s(): NOP\n", __func__);
1776                return ret;
1777        }
1778
1779        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1780        ENABLE_VPP(map);
1781        xip_disable(map, chip, adr);
1782
1783 retry:
1784        ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1785        if (ret) {
1786                /* reset on all failures. */
1787                map_write(map, CMD(0xF0), chip->start);
1788                /* FIXME - should have reset delay before continuing */
1789
1790                if (++retry_cnt <= MAX_RETRIES) {
1791                        ret = 0;
1792                        goto retry;
1793                }
1794        }
1795        xip_enable(map, chip, adr);
1796
1797        return ret;
1798}
1799
1800static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1801                                     unsigned long adr, map_word datum,
1802                                     int mode)
1803{
1804        int ret;
1805
1806        adr += chip->start;
1807
1808        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1809                 datum.x[0]);
1810
1811        ret = do_write_oneword_start(map, chip, adr, mode);
1812        if (ret)
1813                return ret;
1814
1815        ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1816
1817        do_write_oneword_done(map, chip, adr, mode);
1818
1819        return ret;
1820}
1821
1822
1823static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1824                                  size_t *retlen, const u_char *buf)
1825{
1826        struct map_info *map = mtd->priv;
1827        struct cfi_private *cfi = map->fldrv_priv;
1828        int ret;
1829        int chipnum;
1830        unsigned long ofs, chipstart;
1831        DECLARE_WAITQUEUE(wait, current);
1832
1833        chipnum = to >> cfi->chipshift;
1834        ofs = to  - (chipnum << cfi->chipshift);
1835        chipstart = cfi->chips[chipnum].start;
1836
1837        /* If it's not bus-aligned, do the first byte write */
1838        if (ofs & (map_bankwidth(map)-1)) {
1839                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1840                int i = ofs - bus_ofs;
1841                int n = 0;
1842                map_word tmp_buf;
1843
1844 retry:
1845                mutex_lock(&cfi->chips[chipnum].mutex);
1846
1847                if (cfi->chips[chipnum].state != FL_READY) {
1848                        set_current_state(TASK_UNINTERRUPTIBLE);
1849                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1850
1851                        mutex_unlock(&cfi->chips[chipnum].mutex);
1852
1853                        schedule();
1854                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1855                        goto retry;
1856                }
1857
1858                /* Load 'tmp_buf' with old contents of flash */
1859                tmp_buf = map_read(map, bus_ofs+chipstart);
1860
1861                mutex_unlock(&cfi->chips[chipnum].mutex);
1862
1863                /* Number of bytes to copy from buffer */
1864                n = min_t(int, len, map_bankwidth(map)-i);
1865
1866                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1867
1868                ret = do_write_oneword(map, &cfi->chips[chipnum],
1869                                       bus_ofs, tmp_buf, FL_WRITING);
1870                if (ret)
1871                        return ret;
1872
1873                ofs += n;
1874                buf += n;
1875                (*retlen) += n;
1876                len -= n;
1877
1878                if (ofs >> cfi->chipshift) {
1879                        chipnum ++;
1880                        ofs = 0;
1881                        if (chipnum == cfi->numchips)
1882                                return 0;
1883                }
1884        }
1885
1886        /* We are now aligned, write as much as possible */
1887        while(len >= map_bankwidth(map)) {
1888                map_word datum;
1889
1890                datum = map_word_load(map, buf);
1891
1892                ret = do_write_oneword(map, &cfi->chips[chipnum],
1893                                       ofs, datum, FL_WRITING);
1894                if (ret)
1895                        return ret;
1896
1897                ofs += map_bankwidth(map);
1898                buf += map_bankwidth(map);
1899                (*retlen) += map_bankwidth(map);
1900                len -= map_bankwidth(map);
1901
1902                if (ofs >> cfi->chipshift) {
1903                        chipnum ++;
1904                        ofs = 0;
1905                        if (chipnum == cfi->numchips)
1906                                return 0;
1907                        chipstart = cfi->chips[chipnum].start;
1908                }
1909        }
1910
1911        /* Write the trailing bytes if any */
1912        if (len & (map_bankwidth(map)-1)) {
1913                map_word tmp_buf;
1914
1915 retry1:
1916                mutex_lock(&cfi->chips[chipnum].mutex);
1917
1918                if (cfi->chips[chipnum].state != FL_READY) {
1919                        set_current_state(TASK_UNINTERRUPTIBLE);
1920                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1921
1922                        mutex_unlock(&cfi->chips[chipnum].mutex);
1923
1924                        schedule();
1925                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1926                        goto retry1;
1927                }
1928
1929                tmp_buf = map_read(map, ofs + chipstart);
1930
1931                mutex_unlock(&cfi->chips[chipnum].mutex);
1932
1933                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1934
1935                ret = do_write_oneword(map, &cfi->chips[chipnum],
1936                                       ofs, tmp_buf, FL_WRITING);
1937                if (ret)
1938                        return ret;
1939
1940                (*retlen) += len;
1941        }
1942
1943        return 0;
1944}
1945
1946#if !FORCE_WORD_WRITE
1947static int __xipram do_write_buffer_wait(struct map_info *map,
1948                                         struct flchip *chip, unsigned long adr,
1949                                         map_word datum)
1950{
1951        unsigned long timeo;
1952        unsigned long u_write_timeout;
1953        int ret = 0;
1954
1955        /*
1956         * Timeout is calculated according to CFI data, if available.
1957         * See more comments in cfi_cmdset_0002().
1958         */
1959        u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1960        timeo = jiffies + u_write_timeout;
1961
1962        for (;;) {
1963                if (chip->state != FL_WRITING) {
1964                        /* Someone's suspended the write. Sleep */
1965                        DECLARE_WAITQUEUE(wait, current);
1966
1967                        set_current_state(TASK_UNINTERRUPTIBLE);
1968                        add_wait_queue(&chip->wq, &wait);
1969                        mutex_unlock(&chip->mutex);
1970                        schedule();
1971                        remove_wait_queue(&chip->wq, &wait);
1972                        timeo = jiffies + (HZ / 2); /* FIXME */
1973                        mutex_lock(&chip->mutex);
1974                        continue;
1975                }
1976
1977                /*
1978                 * We check "time_after" and "!chip_good" before checking
1979                 * "chip_good" to avoid the failure due to scheduling.
1980                 */
1981                if (time_after(jiffies, timeo) &&
1982                    !chip_good(map, chip, adr, datum)) {
1983                        pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1984                               __func__, adr);
1985                        ret = -EIO;
1986                        break;
1987                }
1988
1989                if (chip_good(map, chip, adr, datum)) {
1990                        if (cfi_check_err_status(map, chip, adr))
1991                                ret = -EIO;
1992                        break;
1993                }
1994
1995                /* Latency issues. Drop the lock, wait a while and retry */
1996                UDELAY(map, chip, adr, 1);
1997        }
1998
1999        return ret;
2000}
2001
2002static void __xipram do_write_buffer_reset(struct map_info *map,
2003                                           struct flchip *chip,
2004                                           struct cfi_private *cfi)
2005{
2006        /*
2007         * Recovery from write-buffer programming failures requires
2008         * the write-to-buffer-reset sequence.  Since the last part
2009         * of the sequence also works as a normal reset, we can run
2010         * the same commands regardless of why we are here.
2011         * See e.g.
2012         * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2013         */
2014        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2015                         cfi->device_type, NULL);
2016        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2017                         cfi->device_type, NULL);
2018        cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2019                         cfi->device_type, NULL);
2020
2021        /* FIXME - should have reset delay before continuing */
2022}
2023
2024/*
2025 * FIXME: interleaved mode not tested, and probably not supported!
2026 */
2027static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2028                                    unsigned long adr, const u_char *buf,
2029                                    int len)
2030{
2031        struct cfi_private *cfi = map->fldrv_priv;
2032        int ret;
2033        unsigned long cmd_adr;
2034        int z, words;
2035        map_word datum;
2036
2037        adr += chip->start;
2038        cmd_adr = adr;
2039
2040        mutex_lock(&chip->mutex);
2041        ret = get_chip(map, chip, adr, FL_WRITING);
2042        if (ret) {
2043                mutex_unlock(&chip->mutex);
2044                return ret;
2045        }
2046
2047        datum = map_word_load(map, buf);
2048
2049        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2050                 __func__, adr, datum.x[0]);
2051
2052        XIP_INVAL_CACHED_RANGE(map, adr, len);
2053        ENABLE_VPP(map);
2054        xip_disable(map, chip, cmd_adr);
2055
2056        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2057        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2058
2059        /* Write Buffer Load */
2060        map_write(map, CMD(0x25), cmd_adr);
2061
2062        chip->state = FL_WRITING_TO_BUFFER;
2063
2064        /* Write length of data to come */
2065        words = len / map_bankwidth(map);
2066        map_write(map, CMD(words - 1), cmd_adr);
2067        /* Write data */
2068        z = 0;
2069        while(z < words * map_bankwidth(map)) {
2070                datum = map_word_load(map, buf);
2071                map_write(map, datum, adr + z);
2072
2073                z += map_bankwidth(map);
2074                buf += map_bankwidth(map);
2075        }
2076        z -= map_bankwidth(map);
2077
2078        adr += z;
2079
2080        /* Write Buffer Program Confirm: GO GO GO */
2081        map_write(map, CMD(0x29), cmd_adr);
2082        chip->state = FL_WRITING;
2083
2084        INVALIDATE_CACHE_UDELAY(map, chip,
2085                                adr, map_bankwidth(map),
2086                                chip->word_write_time);
2087
2088        ret = do_write_buffer_wait(map, chip, adr, datum);
2089        if (ret)
2090                do_write_buffer_reset(map, chip, cfi);
2091
2092        xip_enable(map, chip, adr);
2093
2094        chip->state = FL_READY;
2095        DISABLE_VPP(map);
2096        put_chip(map, chip, adr);
2097        mutex_unlock(&chip->mutex);
2098
2099        return ret;
2100}
2101
2102
2103static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2104                                    size_t *retlen, const u_char *buf)
2105{
2106        struct map_info *map = mtd->priv;
2107        struct cfi_private *cfi = map->fldrv_priv;
2108        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2109        int ret;
2110        int chipnum;
2111        unsigned long ofs;
2112
2113        chipnum = to >> cfi->chipshift;
2114        ofs = to  - (chipnum << cfi->chipshift);
2115
2116        /* If it's not bus-aligned, do the first word write */
2117        if (ofs & (map_bankwidth(map)-1)) {
2118                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2119                if (local_len > len)
2120                        local_len = len;
2121                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2122                                             local_len, retlen, buf);
2123                if (ret)
2124                        return ret;
2125                ofs += local_len;
2126                buf += local_len;
2127                len -= local_len;
2128
2129                if (ofs >> cfi->chipshift) {
2130                        chipnum ++;
2131                        ofs = 0;
2132                        if (chipnum == cfi->numchips)
2133                                return 0;
2134                }
2135        }
2136
2137        /* Write buffer is worth it only if more than one word to write... */
2138        while (len >= map_bankwidth(map) * 2) {
2139                /* We must not cross write block boundaries */
2140                int size = wbufsize - (ofs & (wbufsize-1));
2141
2142                if (size > len)
2143                        size = len;
2144                if (size % map_bankwidth(map))
2145                        size -= size % map_bankwidth(map);
2146
2147                ret = do_write_buffer(map, &cfi->chips[chipnum],
2148                                      ofs, buf, size);
2149                if (ret)
2150                        return ret;
2151
2152                ofs += size;
2153                buf += size;
2154                (*retlen) += size;
2155                len -= size;
2156
2157                if (ofs >> cfi->chipshift) {
2158                        chipnum ++;
2159                        ofs = 0;
2160                        if (chipnum == cfi->numchips)
2161                                return 0;
2162                }
2163        }
2164
2165        if (len) {
2166                size_t retlen_dregs = 0;
2167
2168                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2169                                             len, &retlen_dregs, buf);
2170
2171                *retlen += retlen_dregs;
2172                return ret;
2173        }
2174
2175        return 0;
2176}
2177#endif /* !FORCE_WORD_WRITE */
2178
2179/*
2180 * Wait for the flash chip to become ready to write data
2181 *
2182 * This is only called during the panic_write() path. When panic_write()
2183 * is called, the kernel is in the process of a panic, and will soon be
2184 * dead. Therefore we don't take any locks, and attempt to get access
2185 * to the chip as soon as possible.
2186 */
2187static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2188                                 unsigned long adr)
2189{
2190        struct cfi_private *cfi = map->fldrv_priv;
2191        int retries = 10;
2192        int i;
2193
2194        /*
2195         * If the driver thinks the chip is idle, and no toggle bits
2196         * are changing, then the chip is actually idle for sure.
2197         */
2198        if (chip->state == FL_READY && chip_ready(map, chip, adr))
2199                return 0;
2200
2201        /*
2202         * Try several times to reset the chip and then wait for it
2203         * to become idle. The upper limit of a few milliseconds of
2204         * delay isn't a big problem: the kernel is dying anyway. It
2205         * is more important to save the messages.
2206         */
2207        while (retries > 0) {
2208                const unsigned long timeo = (HZ / 1000) + 1;
2209
2210                /* send the reset command */
2211                map_write(map, CMD(0xF0), chip->start);
2212
2213                /* wait for the chip to become ready */
2214                for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2215                        if (chip_ready(map, chip, adr))
2216                                return 0;
2217
2218                        udelay(1);
2219                }
2220
2221                retries--;
2222        }
2223
2224        /* the chip never became ready */
2225        return -EBUSY;
2226}
2227
2228/*
2229 * Write out one word of data to a single flash chip during a kernel panic
2230 *
2231 * This is only called during the panic_write() path. When panic_write()
2232 * is called, the kernel is in the process of a panic, and will soon be
2233 * dead. Therefore we don't take any locks, and attempt to get access
2234 * to the chip as soon as possible.
2235 *
2236 * The implementation of this routine is intentionally similar to
2237 * do_write_oneword(), in order to ease code maintenance.
2238 */
2239static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2240                                  unsigned long adr, map_word datum)
2241{
2242        const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2243        struct cfi_private *cfi = map->fldrv_priv;
2244        int retry_cnt = 0;
2245        map_word oldd;
2246        int ret;
2247        int i;
2248
2249        adr += chip->start;
2250
2251        ret = cfi_amdstd_panic_wait(map, chip, adr);
2252        if (ret)
2253                return ret;
2254
2255        pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2256                        __func__, adr, datum.x[0]);
2257
2258        /*
2259         * Check for a NOP for the case when the datum to write is already
2260         * present - it saves time and works around buggy chips that corrupt
2261         * data at other locations when 0xff is written to a location that
2262         * already contains 0xff.
2263         */
2264        oldd = map_read(map, adr);
2265        if (map_word_equal(map, oldd, datum)) {
2266                pr_debug("MTD %s(): NOP\n", __func__);
2267                goto op_done;
2268        }
2269
2270        ENABLE_VPP(map);
2271
2272retry:
2273        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2274        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2275        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2276        map_write(map, datum, adr);
2277
2278        for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2279                if (chip_ready(map, chip, adr))
2280                        break;
2281
2282                udelay(1);
2283        }
2284
2285        if (!chip_good(map, chip, adr, datum) ||
2286            cfi_check_err_status(map, chip, adr)) {
2287                /* reset on all failures. */
2288                map_write(map, CMD(0xF0), chip->start);
2289                /* FIXME - should have reset delay before continuing */
2290
2291                if (++retry_cnt <= MAX_RETRIES)
2292                        goto retry;
2293
2294                ret = -EIO;
2295        }
2296
2297op_done:
2298        DISABLE_VPP(map);
2299        return ret;
2300}
2301
2302/*
2303 * Write out some data during a kernel panic
2304 *
2305 * This is used by the mtdoops driver to save the dying messages from a
2306 * kernel which has panic'd.
2307 *
2308 * This routine ignores all of the locking used throughout the rest of the
2309 * driver, in order to ensure that the data gets written out no matter what
2310 * state this driver (and the flash chip itself) was in when the kernel crashed.
2311 *
2312 * The implementation of this routine is intentionally similar to
2313 * cfi_amdstd_write_words(), in order to ease code maintenance.
2314 */
2315static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2316                                  size_t *retlen, const u_char *buf)
2317{
2318        struct map_info *map = mtd->priv;
2319        struct cfi_private *cfi = map->fldrv_priv;
2320        unsigned long ofs, chipstart;
2321        int ret;
2322        int chipnum;
2323
2324        chipnum = to >> cfi->chipshift;
2325        ofs = to - (chipnum << cfi->chipshift);
2326        chipstart = cfi->chips[chipnum].start;
2327
2328        /* If it's not bus aligned, do the first byte write */
2329        if (ofs & (map_bankwidth(map) - 1)) {
2330                unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2331                int i = ofs - bus_ofs;
2332                int n = 0;
2333                map_word tmp_buf;
2334
2335                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2336                if (ret)
2337                        return ret;
2338
2339                /* Load 'tmp_buf' with old contents of flash */
2340                tmp_buf = map_read(map, bus_ofs + chipstart);
2341
2342                /* Number of bytes to copy from buffer */
2343                n = min_t(int, len, map_bankwidth(map) - i);
2344
2345                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2346
2347                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2348                                             bus_ofs, tmp_buf);
2349                if (ret)
2350                        return ret;
2351
2352                ofs += n;
2353                buf += n;
2354                (*retlen) += n;
2355                len -= n;
2356
2357                if (ofs >> cfi->chipshift) {
2358                        chipnum++;
2359                        ofs = 0;
2360                        if (chipnum == cfi->numchips)
2361                                return 0;
2362                }
2363        }
2364
2365        /* We are now aligned, write as much as possible */
2366        while (len >= map_bankwidth(map)) {
2367                map_word datum;
2368
2369                datum = map_word_load(map, buf);
2370
2371                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2372                                             ofs, datum);
2373                if (ret)
2374                        return ret;
2375
2376                ofs += map_bankwidth(map);
2377                buf += map_bankwidth(map);
2378                (*retlen) += map_bankwidth(map);
2379                len -= map_bankwidth(map);
2380
2381                if (ofs >> cfi->chipshift) {
2382                        chipnum++;
2383                        ofs = 0;
2384                        if (chipnum == cfi->numchips)
2385                                return 0;
2386
2387                        chipstart = cfi->chips[chipnum].start;
2388                }
2389        }
2390
2391        /* Write the trailing bytes if any */
2392        if (len & (map_bankwidth(map) - 1)) {
2393                map_word tmp_buf;
2394
2395                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2396                if (ret)
2397                        return ret;
2398
2399                tmp_buf = map_read(map, ofs + chipstart);
2400
2401                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2402
2403                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2404                                             ofs, tmp_buf);
2405                if (ret)
2406                        return ret;
2407
2408                (*retlen) += len;
2409        }
2410
2411        return 0;
2412}
2413
2414
2415/*
2416 * Handle devices with one erase region, that only implement
2417 * the chip erase command.
2418 */
2419static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2420{
2421        struct cfi_private *cfi = map->fldrv_priv;
2422        unsigned long timeo = jiffies + HZ;
2423        unsigned long int adr;
2424        DECLARE_WAITQUEUE(wait, current);
2425        int ret;
2426        int retry_cnt = 0;
2427
2428        adr = cfi->addr_unlock1;
2429
2430        mutex_lock(&chip->mutex);
2431        ret = get_chip(map, chip, adr, FL_ERASING);
2432        if (ret) {
2433                mutex_unlock(&chip->mutex);
2434                return ret;
2435        }
2436
2437        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2438               __func__, chip->start);
2439
2440        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2441        ENABLE_VPP(map);
2442        xip_disable(map, chip, adr);
2443
2444 retry:
2445        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2446        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2447        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2448        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2449        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2450        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2451
2452        chip->state = FL_ERASING;
2453        chip->erase_suspended = 0;
2454        chip->in_progress_block_addr = adr;
2455        chip->in_progress_block_mask = ~(map->size - 1);
2456
2457        INVALIDATE_CACHE_UDELAY(map, chip,
2458                                adr, map->size,
2459                                chip->erase_time*500);
2460
2461        timeo = jiffies + (HZ*20);
2462
2463        for (;;) {
2464                if (chip->state != FL_ERASING) {
2465                        /* Someone's suspended the erase. Sleep */
2466                        set_current_state(TASK_UNINTERRUPTIBLE);
2467                        add_wait_queue(&chip->wq, &wait);
2468                        mutex_unlock(&chip->mutex);
2469                        schedule();
2470                        remove_wait_queue(&chip->wq, &wait);
2471                        mutex_lock(&chip->mutex);
2472                        continue;
2473                }
2474                if (chip->erase_suspended) {
2475                        /* This erase was suspended and resumed.
2476                           Adjust the timeout */
2477                        timeo = jiffies + (HZ*20); /* FIXME */
2478                        chip->erase_suspended = 0;
2479                }
2480
2481                if (chip_good(map, chip, adr, map_word_ff(map))) {
2482                        if (cfi_check_err_status(map, chip, adr))
2483                                ret = -EIO;
2484                        break;
2485                }
2486
2487                if (time_after(jiffies, timeo)) {
2488                        printk(KERN_WARNING "MTD %s(): software timeout\n",
2489                               __func__);
2490                        ret = -EIO;
2491                        break;
2492                }
2493
2494                /* Latency issues. Drop the lock, wait a while and retry */
2495                UDELAY(map, chip, adr, 1000000/HZ);
2496        }
2497        /* Did we succeed? */
2498        if (ret) {
2499                /* reset on all failures. */
2500                map_write(map, CMD(0xF0), chip->start);
2501                /* FIXME - should have reset delay before continuing */
2502
2503                if (++retry_cnt <= MAX_RETRIES) {
2504                        ret = 0;
2505                        goto retry;
2506                }
2507        }
2508
2509        chip->state = FL_READY;
2510        xip_enable(map, chip, adr);
2511        DISABLE_VPP(map);
2512        put_chip(map, chip, adr);
2513        mutex_unlock(&chip->mutex);
2514
2515        return ret;
2516}
2517
2518
2519static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2520{
2521        struct cfi_private *cfi = map->fldrv_priv;
2522        unsigned long timeo = jiffies + HZ;
2523        DECLARE_WAITQUEUE(wait, current);
2524        int ret;
2525        int retry_cnt = 0;
2526
2527        adr += chip->start;
2528
2529        mutex_lock(&chip->mutex);
2530        ret = get_chip(map, chip, adr, FL_ERASING);
2531        if (ret) {
2532                mutex_unlock(&chip->mutex);
2533                return ret;
2534        }
2535
2536        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2537                 __func__, adr);
2538
2539        XIP_INVAL_CACHED_RANGE(map, adr, len);
2540        ENABLE_VPP(map);
2541        xip_disable(map, chip, adr);
2542
2543 retry:
2544        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2545        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2546        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2547        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2548        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2549        map_write(map, cfi->sector_erase_cmd, adr);
2550
2551        chip->state = FL_ERASING;
2552        chip->erase_suspended = 0;
2553        chip->in_progress_block_addr = adr;
2554        chip->in_progress_block_mask = ~(len - 1);
2555
2556        INVALIDATE_CACHE_UDELAY(map, chip,
2557                                adr, len,
2558                                chip->erase_time*500);
2559
2560        timeo = jiffies + (HZ*20);
2561
2562        for (;;) {
2563                if (chip->state != FL_ERASING) {
2564                        /* Someone's suspended the erase. Sleep */
2565                        set_current_state(TASK_UNINTERRUPTIBLE);
2566                        add_wait_queue(&chip->wq, &wait);
2567                        mutex_unlock(&chip->mutex);
2568                        schedule();
2569                        remove_wait_queue(&chip->wq, &wait);
2570                        mutex_lock(&chip->mutex);
2571                        continue;
2572                }
2573                if (chip->erase_suspended) {
2574                        /* This erase was suspended and resumed.
2575                           Adjust the timeout */
2576                        timeo = jiffies + (HZ*20); /* FIXME */
2577                        chip->erase_suspended = 0;
2578                }
2579
2580                if (chip_good(map, chip, adr, map_word_ff(map))) {
2581                        if (cfi_check_err_status(map, chip, adr))
2582                                ret = -EIO;
2583                        break;
2584                }
2585
2586                if (time_after(jiffies, timeo)) {
2587                        printk(KERN_WARNING "MTD %s(): software timeout\n",
2588                               __func__);
2589                        ret = -EIO;
2590                        break;
2591                }
2592
2593                /* Latency issues. Drop the lock, wait a while and retry */
2594                UDELAY(map, chip, adr, 1000000/HZ);
2595        }
2596        /* Did we succeed? */
2597        if (ret) {
2598                /* reset on all failures. */
2599                map_write(map, CMD(0xF0), chip->start);
2600                /* FIXME - should have reset delay before continuing */
2601
2602                if (++retry_cnt <= MAX_RETRIES) {
2603                        ret = 0;
2604                        goto retry;
2605                }
2606        }
2607
2608        chip->state = FL_READY;
2609        xip_enable(map, chip, adr);
2610        DISABLE_VPP(map);
2611        put_chip(map, chip, adr);
2612        mutex_unlock(&chip->mutex);
2613        return ret;
2614}
2615
2616
2617static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2618{
2619        return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2620                                instr->len, NULL);
2621}
2622
2623
2624static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2625{
2626        struct map_info *map = mtd->priv;
2627        struct cfi_private *cfi = map->fldrv_priv;
2628
2629        if (instr->addr != 0)
2630                return -EINVAL;
2631
2632        if (instr->len != mtd->size)
2633                return -EINVAL;
2634
2635        return do_erase_chip(map, &cfi->chips[0]);
2636}
2637
2638static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2639                         unsigned long adr, int len, void *thunk)
2640{
2641        struct cfi_private *cfi = map->fldrv_priv;
2642        int ret;
2643
2644        mutex_lock(&chip->mutex);
2645        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2646        if (ret)
2647                goto out_unlock;
2648        chip->state = FL_LOCKING;
2649
2650        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2651
2652        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2653                         cfi->device_type, NULL);
2654        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2655                         cfi->device_type, NULL);
2656        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2657                         cfi->device_type, NULL);
2658        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2659                         cfi->device_type, NULL);
2660        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2661                         cfi->device_type, NULL);
2662        map_write(map, CMD(0x40), chip->start + adr);
2663
2664        chip->state = FL_READY;
2665        put_chip(map, chip, adr + chip->start);
2666        ret = 0;
2667
2668out_unlock:
2669        mutex_unlock(&chip->mutex);
2670        return ret;
2671}
2672
2673static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2674                           unsigned long adr, int len, void *thunk)
2675{
2676        struct cfi_private *cfi = map->fldrv_priv;
2677        int ret;
2678
2679        mutex_lock(&chip->mutex);
2680        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2681        if (ret)
2682                goto out_unlock;
2683        chip->state = FL_UNLOCKING;
2684
2685        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2686
2687        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2688                         cfi->device_type, NULL);
2689        map_write(map, CMD(0x70), adr);
2690
2691        chip->state = FL_READY;
2692        put_chip(map, chip, adr + chip->start);
2693        ret = 0;
2694
2695out_unlock:
2696        mutex_unlock(&chip->mutex);
2697        return ret;
2698}
2699
2700static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2701{
2702        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2703}
2704
2705static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2706{
2707        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2708}
2709
2710/*
2711 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2712 */
2713
2714struct ppb_lock {
2715        struct flchip *chip;
2716        unsigned long adr;
2717        int locked;
2718};
2719
2720#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *)1)
2721#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *)2)
2722#define DO_XXLOCK_ONEBLOCK_GETLOCK      ((void *)3)
2723
2724static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2725                                        struct flchip *chip,
2726                                        unsigned long adr, int len, void *thunk)
2727{
2728        struct cfi_private *cfi = map->fldrv_priv;
2729        unsigned long timeo;
2730        int ret;
2731
2732        adr += chip->start;
2733        mutex_lock(&chip->mutex);
2734        ret = get_chip(map, chip, adr, FL_LOCKING);
2735        if (ret) {
2736                mutex_unlock(&chip->mutex);
2737                return ret;
2738        }
2739
2740        pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2741
2742        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2743                         cfi->device_type, NULL);
2744        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2745                         cfi->device_type, NULL);
2746        /* PPB entry command */
2747        cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2748                         cfi->device_type, NULL);
2749
2750        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2751                chip->state = FL_LOCKING;
2752                map_write(map, CMD(0xA0), adr);
2753                map_write(map, CMD(0x00), adr);
2754        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2755                /*
2756                 * Unlocking of one specific sector is not supported, so we
2757                 * have to unlock all sectors of this device instead
2758                 */
2759                chip->state = FL_UNLOCKING;
2760                map_write(map, CMD(0x80), chip->start);
2761                map_write(map, CMD(0x30), chip->start);
2762        } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2763                chip->state = FL_JEDEC_QUERY;
2764                /* Return locked status: 0->locked, 1->unlocked */
2765                ret = !cfi_read_query(map, adr);
2766        } else
2767                BUG();
2768
2769        /*
2770         * Wait for some time as unlocking of all sectors takes quite long
2771         */
2772        timeo = jiffies + msecs_to_jiffies(2000);       /* 2s max (un)locking */
2773        for (;;) {
2774                if (chip_ready(map, chip, adr))
2775                        break;
2776
2777                if (time_after(jiffies, timeo)) {
2778                        printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2779                        ret = -EIO;
2780                        break;
2781                }
2782
2783                UDELAY(map, chip, adr, 1);
2784        }
2785
2786        /* Exit BC commands */
2787        map_write(map, CMD(0x90), chip->start);
2788        map_write(map, CMD(0x00), chip->start);
2789
2790        chip->state = FL_READY;
2791        put_chip(map, chip, adr);
2792        mutex_unlock(&chip->mutex);
2793
2794        return ret;
2795}
2796
2797static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2798                                       uint64_t len)
2799{
2800        return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2801                                DO_XXLOCK_ONEBLOCK_LOCK);
2802}
2803
2804static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2805                                         uint64_t len)
2806{
2807        struct mtd_erase_region_info *regions = mtd->eraseregions;
2808        struct map_info *map = mtd->priv;
2809        struct cfi_private *cfi = map->fldrv_priv;
2810        struct ppb_lock *sect;
2811        unsigned long adr;
2812        loff_t offset;
2813        uint64_t length;
2814        int chipnum;
2815        int i;
2816        int sectors;
2817        int ret;
2818        int max_sectors;
2819
2820        /*
2821         * PPB unlocking always unlocks all sectors of the flash chip.
2822         * We need to re-lock all previously locked sectors. So lets
2823         * first check the locking status of all sectors and save
2824         * it for future use.
2825         */
2826        max_sectors = 0;
2827        for (i = 0; i < mtd->numeraseregions; i++)
2828                max_sectors += regions[i].numblocks;
2829
2830        sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2831        if (!sect)
2832                return -ENOMEM;
2833
2834        /*
2835         * This code to walk all sectors is a slightly modified version
2836         * of the cfi_varsize_frob() code.
2837         */
2838        i = 0;
2839        chipnum = 0;
2840        adr = 0;
2841        sectors = 0;
2842        offset = 0;
2843        length = mtd->size;
2844
2845        while (length) {
2846                int size = regions[i].erasesize;
2847
2848                /*
2849                 * Only test sectors that shall not be unlocked. The other
2850                 * sectors shall be unlocked, so lets keep their locking
2851                 * status at "unlocked" (locked=0) for the final re-locking.
2852                 */
2853                if ((offset < ofs) || (offset >= (ofs + len))) {
2854                        sect[sectors].chip = &cfi->chips[chipnum];
2855                        sect[sectors].adr = adr;
2856                        sect[sectors].locked = do_ppb_xxlock(
2857                                map, &cfi->chips[chipnum], adr, 0,
2858                                DO_XXLOCK_ONEBLOCK_GETLOCK);
2859                }
2860
2861                adr += size;
2862                offset += size;
2863                length -= size;
2864
2865                if (offset == regions[i].offset + size * regions[i].numblocks)
2866                        i++;
2867
2868                if (adr >> cfi->chipshift) {
2869                        if (offset >= (ofs + len))
2870                                break;
2871                        adr = 0;
2872                        chipnum++;
2873
2874                        if (chipnum >= cfi->numchips)
2875                                break;
2876                }
2877
2878                sectors++;
2879                if (sectors >= max_sectors) {
2880                        printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2881                               max_sectors);
2882                        kfree(sect);
2883                        return -EINVAL;
2884                }
2885        }
2886
2887        /* Now unlock the whole chip */
2888        ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2889                               DO_XXLOCK_ONEBLOCK_UNLOCK);
2890        if (ret) {
2891                kfree(sect);
2892                return ret;
2893        }
2894
2895        /*
2896         * PPB unlocking always unlocks all sectors of the flash chip.
2897         * We need to re-lock all previously locked sectors.
2898         */
2899        for (i = 0; i < sectors; i++) {
2900                if (sect[i].locked)
2901                        do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2902                                      DO_XXLOCK_ONEBLOCK_LOCK);
2903        }
2904
2905        kfree(sect);
2906        return ret;
2907}
2908
2909static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2910                                            uint64_t len)
2911{
2912        return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2913                                DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2914}
2915
2916static void cfi_amdstd_sync (struct mtd_info *mtd)
2917{
2918        struct map_info *map = mtd->priv;
2919        struct cfi_private *cfi = map->fldrv_priv;
2920        int i;
2921        struct flchip *chip;
2922        int ret = 0;
2923        DECLARE_WAITQUEUE(wait, current);
2924
2925        for (i=0; !ret && i<cfi->numchips; i++) {
2926                chip = &cfi->chips[i];
2927
2928        retry:
2929                mutex_lock(&chip->mutex);
2930
2931                switch(chip->state) {
2932                case FL_READY:
2933                case FL_STATUS:
2934                case FL_CFI_QUERY:
2935                case FL_JEDEC_QUERY:
2936                        chip->oldstate = chip->state;
2937                        chip->state = FL_SYNCING;
2938                        /* No need to wake_up() on this state change -
2939                         * as the whole point is that nobody can do anything
2940                         * with the chip now anyway.
2941                         */
2942                        fallthrough;
2943                case FL_SYNCING:
2944                        mutex_unlock(&chip->mutex);
2945                        break;
2946
2947                default:
2948                        /* Not an idle state */
2949                        set_current_state(TASK_UNINTERRUPTIBLE);
2950                        add_wait_queue(&chip->wq, &wait);
2951
2952                        mutex_unlock(&chip->mutex);
2953
2954                        schedule();
2955
2956                        remove_wait_queue(&chip->wq, &wait);
2957
2958                        goto retry;
2959                }
2960        }
2961
2962        /* Unlock the chips again */
2963
2964        for (i--; i >=0; i--) {
2965                chip = &cfi->chips[i];
2966
2967                mutex_lock(&chip->mutex);
2968
2969                if (chip->state == FL_SYNCING) {
2970                        chip->state = chip->oldstate;
2971                        wake_up(&chip->wq);
2972                }
2973                mutex_unlock(&chip->mutex);
2974        }
2975}
2976
2977
2978static int cfi_amdstd_suspend(struct mtd_info *mtd)
2979{
2980        struct map_info *map = mtd->priv;
2981        struct cfi_private *cfi = map->fldrv_priv;
2982        int i;
2983        struct flchip *chip;
2984        int ret = 0;
2985
2986        for (i=0; !ret && i<cfi->numchips; i++) {
2987                chip = &cfi->chips[i];
2988
2989                mutex_lock(&chip->mutex);
2990
2991                switch(chip->state) {
2992                case FL_READY:
2993                case FL_STATUS:
2994                case FL_CFI_QUERY:
2995                case FL_JEDEC_QUERY:
2996                        chip->oldstate = chip->state;
2997                        chip->state = FL_PM_SUSPENDED;
2998                        /* No need to wake_up() on this state change -
2999                         * as the whole point is that nobody can do anything
3000                         * with the chip now anyway.
3001                         */
3002                        break;
3003                case FL_PM_SUSPENDED:
3004                        break;
3005
3006                default:
3007                        ret = -EAGAIN;
3008                        break;
3009                }
3010                mutex_unlock(&chip->mutex);
3011        }
3012
3013        /* Unlock the chips again */
3014
3015        if (ret) {
3016                for (i--; i >=0; i--) {
3017                        chip = &cfi->chips[i];
3018
3019                        mutex_lock(&chip->mutex);
3020
3021                        if (chip->state == FL_PM_SUSPENDED) {
3022                                chip->state = chip->oldstate;
3023                                wake_up(&chip->wq);
3024                        }
3025                        mutex_unlock(&chip->mutex);
3026                }
3027        }
3028
3029        return ret;
3030}
3031
3032
3033static void cfi_amdstd_resume(struct mtd_info *mtd)
3034{
3035        struct map_info *map = mtd->priv;
3036        struct cfi_private *cfi = map->fldrv_priv;
3037        int i;
3038        struct flchip *chip;
3039
3040        for (i=0; i<cfi->numchips; i++) {
3041
3042                chip = &cfi->chips[i];
3043
3044                mutex_lock(&chip->mutex);
3045
3046                if (chip->state == FL_PM_SUSPENDED) {
3047                        chip->state = FL_READY;
3048                        map_write(map, CMD(0xF0), chip->start);
3049                        wake_up(&chip->wq);
3050                }
3051                else
3052                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3053
3054                mutex_unlock(&chip->mutex);
3055        }
3056}
3057
3058
3059/*
3060 * Ensure that the flash device is put back into read array mode before
3061 * unloading the driver or rebooting.  On some systems, rebooting while
3062 * the flash is in query/program/erase mode will prevent the CPU from
3063 * fetching the bootloader code, requiring a hard reset or power cycle.
3064 */
3065static int cfi_amdstd_reset(struct mtd_info *mtd)
3066{
3067        struct map_info *map = mtd->priv;
3068        struct cfi_private *cfi = map->fldrv_priv;
3069        int i, ret;
3070        struct flchip *chip;
3071
3072        for (i = 0; i < cfi->numchips; i++) {
3073
3074                chip = &cfi->chips[i];
3075
3076                mutex_lock(&chip->mutex);
3077
3078                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3079                if (!ret) {
3080                        map_write(map, CMD(0xF0), chip->start);
3081                        chip->state = FL_SHUTDOWN;
3082                        put_chip(map, chip, chip->start);
3083                }
3084
3085                mutex_unlock(&chip->mutex);
3086        }
3087
3088        return 0;
3089}
3090
3091
3092static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3093                               void *v)
3094{
3095        struct mtd_info *mtd;
3096
3097        mtd = container_of(nb, struct mtd_info, reboot_notifier);
3098        cfi_amdstd_reset(mtd);
3099        return NOTIFY_DONE;
3100}
3101
3102
3103static void cfi_amdstd_destroy(struct mtd_info *mtd)
3104{
3105        struct map_info *map = mtd->priv;
3106        struct cfi_private *cfi = map->fldrv_priv;
3107
3108        cfi_amdstd_reset(mtd);
3109        unregister_reboot_notifier(&mtd->reboot_notifier);
3110        kfree(cfi->cmdset_priv);
3111        kfree(cfi->cfiq);
3112        kfree(cfi);
3113        kfree(mtd->eraseregions);
3114}
3115
3116MODULE_LICENSE("GPL");
3117MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3118MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3119MODULE_ALIAS("cfi_cmdset_0006");
3120MODULE_ALIAS("cfi_cmdset_0701");
3121