linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <asm/io.h>
  28#include <asm/byteorder.h>
  29
  30#include <linux/errno.h>
  31#include <linux/slab.h>
  32#include <linux/delay.h>
  33#include <linux/interrupt.h>
  34#include <linux/reboot.h>
  35#include <linux/of.h>
  36#include <linux/of_platform.h>
  37#include <linux/mtd/map.h>
  38#include <linux/mtd/mtd.h>
  39#include <linux/mtd/cfi.h>
  40#include <linux/mtd/xip.h>
  41
  42#define AMD_BOOTLOC_BUG
  43#define FORCE_WORD_WRITE 0
  44
  45#define MAX_WORD_RETRIES 3
  46
  47#define SST49LF004B             0x0060
  48#define SST49LF040B             0x0050
  49#define SST49LF008A             0x005a
  50#define AT49BV6416              0x00d6
  51
  52static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  53static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  54static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  55static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  56static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  57static void cfi_amdstd_sync (struct mtd_info *);
  58static int cfi_amdstd_suspend (struct mtd_info *);
  59static void cfi_amdstd_resume (struct mtd_info *);
  60static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  61static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  62
  63static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  64                                  size_t *retlen, const u_char *buf);
  65
  66static void cfi_amdstd_destroy(struct mtd_info *);
  67
  68struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  69static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  70
  71static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  72static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  73#include "fwh_lock.h"
  74
  75static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  76static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  77
  78static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  79static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  80static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  81
  82static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  83        .probe          = NULL, /* Not usable directly */
  84        .destroy        = cfi_amdstd_destroy,
  85        .name           = "cfi_cmdset_0002",
  86        .module         = THIS_MODULE
  87};
  88
  89
  90/* #define DEBUG_CFI_FEATURES */
  91
  92
  93#ifdef DEBUG_CFI_FEATURES
  94static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  95{
  96        const char* erase_suspend[3] = {
  97                "Not supported", "Read only", "Read/write"
  98        };
  99        const char* top_bottom[6] = {
 100                "No WP", "8x8KiB sectors at top & bottom, no WP",
 101                "Bottom boot", "Top boot",
 102                "Uniform, Bottom WP", "Uniform, Top WP"
 103        };
 104
 105        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 106        printk("  Address sensitive unlock: %s\n",
 107               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 108
 109        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 110                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 111        else
 112                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 113
 114        if (extp->BlkProt == 0)
 115                printk("  Block protection: Not supported\n");
 116        else
 117                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 118
 119
 120        printk("  Temporary block unprotect: %s\n",
 121               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 122        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 123        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 124        printk("  Burst mode: %s\n",
 125               extp->BurstMode ? "Supported" : "Not supported");
 126        if (extp->PageMode == 0)
 127                printk("  Page mode: Not supported\n");
 128        else
 129                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 130
 131        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 132               extp->VppMin >> 4, extp->VppMin & 0xf);
 133        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 134               extp->VppMax >> 4, extp->VppMax & 0xf);
 135
 136        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 137                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 138        else
 139                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 140}
 141#endif
 142
 143#ifdef AMD_BOOTLOC_BUG
 144/* Wheee. Bring me the head of someone at AMD. */
 145static void fixup_amd_bootblock(struct mtd_info *mtd)
 146{
 147        struct map_info *map = mtd->priv;
 148        struct cfi_private *cfi = map->fldrv_priv;
 149        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 150        __u8 major = extp->MajorVersion;
 151        __u8 minor = extp->MinorVersion;
 152
 153        if (((major << 8) | minor) < 0x3131) {
 154                /* CFI version 1.0 => don't trust bootloc */
 155
 156                pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 157                        map->name, cfi->mfr, cfi->id);
 158
 159                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 160                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 161                 * These were badly detected as they have the 0x80 bit set
 162                 * so treat them as a special case.
 163                 */
 164                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 165
 166                        /* Macronix added CFI to their 2nd generation
 167                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 168                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 169                         * has CFI.
 170                         *
 171                         * Therefore also check the manufacturer.
 172                         * This reduces the risk of false detection due to
 173                         * the 8-bit device ID.
 174                         */
 175                        (cfi->mfr == CFI_MFR_MACRONIX)) {
 176                        pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 177                                " detected\n", map->name);
 178                        extp->TopBottom = 2;    /* bottom boot */
 179                } else
 180                if (cfi->id & 0x80) {
 181                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 182                        extp->TopBottom = 3;    /* top boot */
 183                } else {
 184                        extp->TopBottom = 2;    /* bottom boot */
 185                }
 186
 187                pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 188                        " deduced %s from Device ID\n", map->name, major, minor,
 189                        extp->TopBottom == 2 ? "bottom" : "top");
 190        }
 191}
 192#endif
 193
 194static void fixup_use_write_buffers(struct mtd_info *mtd)
 195{
 196        struct map_info *map = mtd->priv;
 197        struct cfi_private *cfi = map->fldrv_priv;
 198        if (cfi->cfiq->BufWriteTimeoutTyp) {
 199                pr_debug("Using buffer write method\n" );
 200                mtd->_write = cfi_amdstd_write_buffers;
 201        }
 202}
 203
 204/* Atmel chips don't use the same PRI format as AMD chips */
 205static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 206{
 207        struct map_info *map = mtd->priv;
 208        struct cfi_private *cfi = map->fldrv_priv;
 209        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 210        struct cfi_pri_atmel atmel_pri;
 211
 212        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 213        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 214
 215        if (atmel_pri.Features & 0x02)
 216                extp->EraseSuspend = 2;
 217
 218        /* Some chips got it backwards... */
 219        if (cfi->id == AT49BV6416) {
 220                if (atmel_pri.BottomBoot)
 221                        extp->TopBottom = 3;
 222                else
 223                        extp->TopBottom = 2;
 224        } else {
 225                if (atmel_pri.BottomBoot)
 226                        extp->TopBottom = 2;
 227                else
 228                        extp->TopBottom = 3;
 229        }
 230
 231        /* burst write mode not supported */
 232        cfi->cfiq->BufWriteTimeoutTyp = 0;
 233        cfi->cfiq->BufWriteTimeoutMax = 0;
 234}
 235
 236static void fixup_use_secsi(struct mtd_info *mtd)
 237{
 238        /* Setup for chips with a secsi area */
 239        mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
 240        mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 241}
 242
 243static void fixup_use_erase_chip(struct mtd_info *mtd)
 244{
 245        struct map_info *map = mtd->priv;
 246        struct cfi_private *cfi = map->fldrv_priv;
 247        if ((cfi->cfiq->NumEraseRegions == 1) &&
 248                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 249                mtd->_erase = cfi_amdstd_erase_chip;
 250        }
 251
 252}
 253
 254/*
 255 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 256 * locked by default.
 257 */
 258static void fixup_use_atmel_lock(struct mtd_info *mtd)
 259{
 260        mtd->_lock = cfi_atmel_lock;
 261        mtd->_unlock = cfi_atmel_unlock;
 262        mtd->flags |= MTD_POWERUP_LOCK;
 263}
 264
 265static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 266{
 267        struct map_info *map = mtd->priv;
 268        struct cfi_private *cfi = map->fldrv_priv;
 269
 270        /*
 271         * These flashes report two separate eraseblock regions based on the
 272         * sector_erase-size and block_erase-size, although they both operate on the
 273         * same memory. This is not allowed according to CFI, so we just pick the
 274         * sector_erase-size.
 275         */
 276        cfi->cfiq->NumEraseRegions = 1;
 277}
 278
 279static void fixup_sst39vf(struct mtd_info *mtd)
 280{
 281        struct map_info *map = mtd->priv;
 282        struct cfi_private *cfi = map->fldrv_priv;
 283
 284        fixup_old_sst_eraseregion(mtd);
 285
 286        cfi->addr_unlock1 = 0x5555;
 287        cfi->addr_unlock2 = 0x2AAA;
 288}
 289
 290static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 291{
 292        struct map_info *map = mtd->priv;
 293        struct cfi_private *cfi = map->fldrv_priv;
 294
 295        fixup_old_sst_eraseregion(mtd);
 296
 297        cfi->addr_unlock1 = 0x555;
 298        cfi->addr_unlock2 = 0x2AA;
 299
 300        cfi->sector_erase_cmd = CMD(0x50);
 301}
 302
 303static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 304{
 305        struct map_info *map = mtd->priv;
 306        struct cfi_private *cfi = map->fldrv_priv;
 307
 308        fixup_sst39vf_rev_b(mtd);
 309
 310        /*
 311         * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 312         * it should report a size of 8KBytes (0x0020*256).
 313         */
 314        cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 315        pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 316}
 317
 318static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 319{
 320        struct map_info *map = mtd->priv;
 321        struct cfi_private *cfi = map->fldrv_priv;
 322
 323        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 324                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 325                pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
 326        }
 327}
 328
 329static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 330{
 331        struct map_info *map = mtd->priv;
 332        struct cfi_private *cfi = map->fldrv_priv;
 333
 334        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 335                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 336                pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
 337        }
 338}
 339
 340static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
 341{
 342        struct map_info *map = mtd->priv;
 343        struct cfi_private *cfi = map->fldrv_priv;
 344
 345        /*
 346         *  S29NS512P flash uses more than 8bits to report number of sectors,
 347         * which is not permitted by CFI.
 348         */
 349        cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
 350        pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
 351}
 352
 353/* Used to fix CFI-Tables of chips without Extended Query Tables */
 354static struct cfi_fixup cfi_nopri_fixup_table[] = {
 355        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 356        { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 357        { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 358        { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 359        { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 360        { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 361        { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 362        { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 363        { 0, 0, NULL }
 364};
 365
 366static struct cfi_fixup cfi_fixup_table[] = {
 367        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 368#ifdef AMD_BOOTLOC_BUG
 369        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 370        { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 371        { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 372#endif
 373        { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 374        { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 375        { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 376        { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 377        { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 378        { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 379        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 380        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 381        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 382        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 383        { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 384        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 385        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 386        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 387        { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 388#if !FORCE_WORD_WRITE
 389        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 390#endif
 391        { 0, 0, NULL }
 392};
 393static struct cfi_fixup jedec_fixup_table[] = {
 394        { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 395        { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 396        { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 397        { 0, 0, NULL }
 398};
 399
 400static struct cfi_fixup fixup_table[] = {
 401        /* The CFI vendor ids and the JEDEC vendor IDs appear
 402         * to be common.  It is like the devices id's are as
 403         * well.  This table is to pick all cases where
 404         * we know that is the case.
 405         */
 406        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 407        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 408        { 0, 0, NULL }
 409};
 410
 411
 412static void cfi_fixup_major_minor(struct cfi_private *cfi,
 413                                  struct cfi_pri_amdstd *extp)
 414{
 415        if (cfi->mfr == CFI_MFR_SAMSUNG) {
 416                if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 417                    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 418                        /*
 419                         * Samsung K8P2815UQB and K8D6x16UxM chips
 420                         * report major=0 / minor=0.
 421                         * K8D3x16UxC chips report major=3 / minor=3.
 422                         */
 423                        printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 424                               " Extended Query version to 1.%c\n",
 425                               extp->MinorVersion);
 426                        extp->MajorVersion = '1';
 427                }
 428        }
 429
 430        /*
 431         * SST 38VF640x chips report major=0xFF / minor=0xFF.
 432         */
 433        if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 434                extp->MajorVersion = '1';
 435                extp->MinorVersion = '0';
 436        }
 437}
 438
 439static int is_m29ew(struct cfi_private *cfi)
 440{
 441        if (cfi->mfr == CFI_MFR_INTEL &&
 442            ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
 443             (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
 444                return 1;
 445        return 0;
 446}
 447
 448/*
 449 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
 450 * Some revisions of the M29EW suffer from erase suspend hang ups. In
 451 * particular, it can occur when the sequence
 452 * Erase Confirm -> Suspend -> Program -> Resume
 453 * causes a lockup due to internal timing issues. The consequence is that the
 454 * erase cannot be resumed without inserting a dummy command after programming
 455 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
 456 * that writes an F0 command code before the RESUME command.
 457 */
 458static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
 459                                          unsigned long adr)
 460{
 461        struct cfi_private *cfi = map->fldrv_priv;
 462        /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
 463        if (is_m29ew(cfi))
 464                map_write(map, CMD(0xF0), adr);
 465}
 466
 467/*
 468 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
 469 *
 470 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
 471 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
 472 * command is issued after an ERASE RESUME operation without waiting for a
 473 * minimum delay.  The result is that once the ERASE seems to be completed
 474 * (no bits are toggling), the contents of the Flash memory block on which
 475 * the erase was ongoing could be inconsistent with the expected values
 476 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
 477 * values), causing a consequent failure of the ERASE operation.
 478 * The occurrence of this issue could be high, especially when file system
 479 * operations on the Flash are intensive.  As a result, it is recommended
 480 * that a patch be applied.  Intensive file system operations can cause many
 481 * calls to the garbage routine to free Flash space (also by erasing physical
 482 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
 483 * commands can occur.  The problem disappears when a delay is inserted after
 484 * the RESUME command by using the udelay() function available in Linux.
 485 * The DELAY value must be tuned based on the customer's platform.
 486 * The maximum value that fixes the problem in all cases is 500us.
 487 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
 488 * in most cases.
 489 * We have chosen 500µs because this latency is acceptable.
 490 */
 491static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
 492{
 493        /*
 494         * Resolving the Delay After Resume Issue see Micron TN-13-07
 495         * Worst case delay must be 500µs but 30-50µs should be ok as well
 496         */
 497        if (is_m29ew(cfi))
 498                cfi_udelay(500);
 499}
 500
 501struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 502{
 503        struct cfi_private *cfi = map->fldrv_priv;
 504        struct device_node __maybe_unused *np = map->device_node;
 505        struct mtd_info *mtd;
 506        int i;
 507
 508        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 509        if (!mtd)
 510                return NULL;
 511        mtd->priv = map;
 512        mtd->type = MTD_NORFLASH;
 513
 514        /* Fill in the default mtd operations */
 515        mtd->_erase   = cfi_amdstd_erase_varsize;
 516        mtd->_write   = cfi_amdstd_write_words;
 517        mtd->_read    = cfi_amdstd_read;
 518        mtd->_sync    = cfi_amdstd_sync;
 519        mtd->_suspend = cfi_amdstd_suspend;
 520        mtd->_resume  = cfi_amdstd_resume;
 521        mtd->flags   = MTD_CAP_NORFLASH;
 522        mtd->name    = map->name;
 523        mtd->writesize = 1;
 524        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 525
 526        pr_debug("MTD %s(): write buffer size %d\n", __func__,
 527                        mtd->writebufsize);
 528
 529        mtd->_panic_write = cfi_amdstd_panic_write;
 530        mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 531
 532        if (cfi->cfi_mode==CFI_MODE_CFI){
 533                unsigned char bootloc;
 534                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 535                struct cfi_pri_amdstd *extp;
 536
 537                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 538                if (extp) {
 539                        /*
 540                         * It's a real CFI chip, not one for which the probe
 541                         * routine faked a CFI structure.
 542                         */
 543                        cfi_fixup_major_minor(cfi, extp);
 544
 545                        /*
 546                         * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 547                         * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 548                         *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 549                         *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 550                         *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 551                         */
 552                        if (extp->MajorVersion != '1' ||
 553                            (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 554                                printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 555                                       "version %c.%c (%#02x/%#02x).\n",
 556                                       extp->MajorVersion, extp->MinorVersion,
 557                                       extp->MajorVersion, extp->MinorVersion);
 558                                kfree(extp);
 559                                kfree(mtd);
 560                                return NULL;
 561                        }
 562
 563                        printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 564                               extp->MajorVersion, extp->MinorVersion);
 565
 566                        /* Install our own private info structure */
 567                        cfi->cmdset_priv = extp;
 568
 569                        /* Apply cfi device specific fixups */
 570                        cfi_fixup(mtd, cfi_fixup_table);
 571
 572#ifdef DEBUG_CFI_FEATURES
 573                        /* Tell the user about it in lots of lovely detail */
 574                        cfi_tell_features(extp);
 575#endif
 576
 577#ifdef CONFIG_OF
 578                        if (np && of_property_read_bool(
 579                                    np, "use-advanced-sector-protection")
 580                            && extp->BlkProtUnprot == 8) {
 581                                printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
 582                                mtd->_lock = cfi_ppb_lock;
 583                                mtd->_unlock = cfi_ppb_unlock;
 584                                mtd->_is_locked = cfi_ppb_is_locked;
 585                        }
 586#endif
 587
 588                        bootloc = extp->TopBottom;
 589                        if ((bootloc < 2) || (bootloc > 5)) {
 590                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 591                                       "bank location (%d). Assuming bottom.\n",
 592                                       map->name, bootloc);
 593                                bootloc = 2;
 594                        }
 595
 596                        if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 597                                printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 598
 599                                for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 600                                        int j = (cfi->cfiq->NumEraseRegions-1)-i;
 601                                        __u32 swap;
 602
 603                                        swap = cfi->cfiq->EraseRegionInfo[i];
 604                                        cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
 605                                        cfi->cfiq->EraseRegionInfo[j] = swap;
 606                                }
 607                        }
 608                        /* Set the default CFI lock/unlock addresses */
 609                        cfi->addr_unlock1 = 0x555;
 610                        cfi->addr_unlock2 = 0x2aa;
 611                }
 612                cfi_fixup(mtd, cfi_nopri_fixup_table);
 613
 614                if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 615                        kfree(mtd);
 616                        return NULL;
 617                }
 618
 619        } /* CFI mode */
 620        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 621                /* Apply jedec specific fixups */
 622                cfi_fixup(mtd, jedec_fixup_table);
 623        }
 624        /* Apply generic fixups */
 625        cfi_fixup(mtd, fixup_table);
 626
 627        for (i=0; i< cfi->numchips; i++) {
 628                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 629                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 630                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 631                cfi->chips[i].ref_point_counter = 0;
 632                init_waitqueue_head(&(cfi->chips[i].wq));
 633        }
 634
 635        map->fldrv = &cfi_amdstd_chipdrv;
 636
 637        return cfi_amdstd_setup(mtd);
 638}
 639struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 640struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 641EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 642EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 643EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 644
 645static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 646{
 647        struct map_info *map = mtd->priv;
 648        struct cfi_private *cfi = map->fldrv_priv;
 649        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 650        unsigned long offset = 0;
 651        int i,j;
 652
 653        printk(KERN_NOTICE "number of %s chips: %d\n",
 654               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 655        /* Select the correct geometry setup */
 656        mtd->size = devsize * cfi->numchips;
 657
 658        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 659        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 660                                    * mtd->numeraseregions, GFP_KERNEL);
 661        if (!mtd->eraseregions)
 662                goto setup_err;
 663
 664        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 665                unsigned long ernum, ersize;
 666                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 667                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 668
 669                if (mtd->erasesize < ersize) {
 670                        mtd->erasesize = ersize;
 671                }
 672                for (j=0; j<cfi->numchips; j++) {
 673                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 674                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 675                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 676                }
 677                offset += (ersize * ernum);
 678        }
 679        if (offset != devsize) {
 680                /* Argh */
 681                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 682                goto setup_err;
 683        }
 684
 685        __module_get(THIS_MODULE);
 686        register_reboot_notifier(&mtd->reboot_notifier);
 687        return mtd;
 688
 689 setup_err:
 690        kfree(mtd->eraseregions);
 691        kfree(mtd);
 692        kfree(cfi->cmdset_priv);
 693        kfree(cfi->cfiq);
 694        return NULL;
 695}
 696
 697/*
 698 * Return true if the chip is ready.
 699 *
 700 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 701 * non-suspended sector) and is indicated by no toggle bits toggling.
 702 *
 703 * Note that anything more complicated than checking if no bits are toggling
 704 * (including checking DQ5 for an error status) is tricky to get working
 705 * correctly and is therefore not done  (particularly with interleaved chips
 706 * as each chip must be checked independently of the others).
 707 */
 708static int __xipram chip_ready(struct map_info *map, unsigned long addr)
 709{
 710        map_word d, t;
 711
 712        d = map_read(map, addr);
 713        t = map_read(map, addr);
 714
 715        return map_word_equal(map, d, t);
 716}
 717
 718/*
 719 * Return true if the chip is ready and has the correct value.
 720 *
 721 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 722 * non-suspended sector) and it is indicated by no bits toggling.
 723 *
 724 * Error are indicated by toggling bits or bits held with the wrong value,
 725 * or with bits toggling.
 726 *
 727 * Note that anything more complicated than checking if no bits are toggling
 728 * (including checking DQ5 for an error status) is tricky to get working
 729 * correctly and is therefore not done  (particularly with interleaved chips
 730 * as each chip must be checked independently of the others).
 731 *
 732 */
 733static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
 734{
 735        map_word oldd, curd;
 736
 737        oldd = map_read(map, addr);
 738        curd = map_read(map, addr);
 739
 740        return  map_word_equal(map, oldd, curd) &&
 741                map_word_equal(map, curd, expected);
 742}
 743
 744static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 745{
 746        DECLARE_WAITQUEUE(wait, current);
 747        struct cfi_private *cfi = map->fldrv_priv;
 748        unsigned long timeo;
 749        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 750
 751 resettime:
 752        timeo = jiffies + HZ;
 753 retry:
 754        switch (chip->state) {
 755
 756        case FL_STATUS:
 757                for (;;) {
 758                        if (chip_ready(map, adr))
 759                                break;
 760
 761                        if (time_after(jiffies, timeo)) {
 762                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 763                                return -EIO;
 764                        }
 765                        mutex_unlock(&chip->mutex);
 766                        cfi_udelay(1);
 767                        mutex_lock(&chip->mutex);
 768                        /* Someone else might have been playing with it. */
 769                        goto retry;
 770                }
 771
 772        case FL_READY:
 773        case FL_CFI_QUERY:
 774        case FL_JEDEC_QUERY:
 775                return 0;
 776
 777        case FL_ERASING:
 778                if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 779                    !(mode == FL_READY || mode == FL_POINT ||
 780                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 781                        goto sleep;
 782
 783                /* We could check to see if we're trying to access the sector
 784                 * that is currently being erased. However, no user will try
 785                 * anything like that so we just wait for the timeout. */
 786
 787                /* Erase suspend */
 788                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 789                 * commands when the erase algorithm isn't in progress. */
 790                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 791                chip->oldstate = FL_ERASING;
 792                chip->state = FL_ERASE_SUSPENDING;
 793                chip->erase_suspended = 1;
 794                for (;;) {
 795                        if (chip_ready(map, adr))
 796                                break;
 797
 798                        if (time_after(jiffies, timeo)) {
 799                                /* Should have suspended the erase by now.
 800                                 * Send an Erase-Resume command as either
 801                                 * there was an error (so leave the erase
 802                                 * routine to recover from it) or we trying to
 803                                 * use the erase-in-progress sector. */
 804                                put_chip(map, chip, adr);
 805                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 806                                return -EIO;
 807                        }
 808
 809                        mutex_unlock(&chip->mutex);
 810                        cfi_udelay(1);
 811                        mutex_lock(&chip->mutex);
 812                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 813                           So we can just loop here. */
 814                }
 815                chip->state = FL_READY;
 816                return 0;
 817
 818        case FL_XIP_WHILE_ERASING:
 819                if (mode != FL_READY && mode != FL_POINT &&
 820                    (!cfip || !(cfip->EraseSuspend&2)))
 821                        goto sleep;
 822                chip->oldstate = chip->state;
 823                chip->state = FL_READY;
 824                return 0;
 825
 826        case FL_SHUTDOWN:
 827                /* The machine is rebooting */
 828                return -EIO;
 829
 830        case FL_POINT:
 831                /* Only if there's no operation suspended... */
 832                if (mode == FL_READY && chip->oldstate == FL_READY)
 833                        return 0;
 834
 835        default:
 836        sleep:
 837                set_current_state(TASK_UNINTERRUPTIBLE);
 838                add_wait_queue(&chip->wq, &wait);
 839                mutex_unlock(&chip->mutex);
 840                schedule();
 841                remove_wait_queue(&chip->wq, &wait);
 842                mutex_lock(&chip->mutex);
 843                goto resettime;
 844        }
 845}
 846
 847
 848static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 849{
 850        struct cfi_private *cfi = map->fldrv_priv;
 851
 852        switch(chip->oldstate) {
 853        case FL_ERASING:
 854                cfi_fixup_m29ew_erase_suspend(map,
 855                        chip->in_progress_block_addr);
 856                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 857                cfi_fixup_m29ew_delay_after_resume(cfi);
 858                chip->oldstate = FL_READY;
 859                chip->state = FL_ERASING;
 860                break;
 861
 862        case FL_XIP_WHILE_ERASING:
 863                chip->state = chip->oldstate;
 864                chip->oldstate = FL_READY;
 865                break;
 866
 867        case FL_READY:
 868        case FL_STATUS:
 869                break;
 870        default:
 871                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
 872        }
 873        wake_up(&chip->wq);
 874}
 875
 876#ifdef CONFIG_MTD_XIP
 877
 878/*
 879 * No interrupt what so ever can be serviced while the flash isn't in array
 880 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 881 * enclosing any code path where the flash is known not to be in array mode.
 882 * And within a XIP disabled code path, only functions marked with __xipram
 883 * may be called and nothing else (it's a good thing to inspect generated
 884 * assembly to make sure inline functions were actually inlined and that gcc
 885 * didn't emit calls to its own support functions). Also configuring MTD CFI
 886 * support to a single buswidth and a single interleave is also recommended.
 887 */
 888
 889static void xip_disable(struct map_info *map, struct flchip *chip,
 890                        unsigned long adr)
 891{
 892        /* TODO: chips with no XIP use should ignore and return */
 893        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 894        local_irq_disable();
 895}
 896
 897static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 898                                unsigned long adr)
 899{
 900        struct cfi_private *cfi = map->fldrv_priv;
 901
 902        if (chip->state != FL_POINT && chip->state != FL_READY) {
 903                map_write(map, CMD(0xf0), adr);
 904                chip->state = FL_READY;
 905        }
 906        (void) map_read(map, adr);
 907        xip_iprefetch();
 908        local_irq_enable();
 909}
 910
 911/*
 912 * When a delay is required for the flash operation to complete, the
 913 * xip_udelay() function is polling for both the given timeout and pending
 914 * (but still masked) hardware interrupts.  Whenever there is an interrupt
 915 * pending then the flash erase operation is suspended, array mode restored
 916 * and interrupts unmasked.  Task scheduling might also happen at that
 917 * point.  The CPU eventually returns from the interrupt or the call to
 918 * schedule() and the suspended flash operation is resumed for the remaining
 919 * of the delay period.
 920 *
 921 * Warning: this function _will_ fool interrupt latency tracing tools.
 922 */
 923
 924static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
 925                                unsigned long adr, int usec)
 926{
 927        struct cfi_private *cfi = map->fldrv_priv;
 928        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 929        map_word status, OK = CMD(0x80);
 930        unsigned long suspended, start = xip_currtime();
 931        flstate_t oldstate;
 932
 933        do {
 934                cpu_relax();
 935                if (xip_irqpending() && extp &&
 936                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
 937                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 938                        /*
 939                         * Let's suspend the erase operation when supported.
 940                         * Note that we currently don't try to suspend
 941                         * interleaved chips if there is already another
 942                         * operation suspended (imagine what happens
 943                         * when one chip was already done with the current
 944                         * operation while another chip suspended it, then
 945                         * we resume the whole thing at once).  Yes, it
 946                         * can happen!
 947                         */
 948                        map_write(map, CMD(0xb0), adr);
 949                        usec -= xip_elapsed_since(start);
 950                        suspended = xip_currtime();
 951                        do {
 952                                if (xip_elapsed_since(suspended) > 100000) {
 953                                        /*
 954                                         * The chip doesn't want to suspend
 955                                         * after waiting for 100 msecs.
 956                                         * This is a critical error but there
 957                                         * is not much we can do here.
 958                                         */
 959                                        return;
 960                                }
 961                                status = map_read(map, adr);
 962                        } while (!map_word_andequal(map, status, OK, OK));
 963
 964                        /* Suspend succeeded */
 965                        oldstate = chip->state;
 966                        if (!map_word_bitsset(map, status, CMD(0x40)))
 967                                break;
 968                        chip->state = FL_XIP_WHILE_ERASING;
 969                        chip->erase_suspended = 1;
 970                        map_write(map, CMD(0xf0), adr);
 971                        (void) map_read(map, adr);
 972                        xip_iprefetch();
 973                        local_irq_enable();
 974                        mutex_unlock(&chip->mutex);
 975                        xip_iprefetch();
 976                        cond_resched();
 977
 978                        /*
 979                         * We're back.  However someone else might have
 980                         * decided to go write to the chip if we are in
 981                         * a suspended erase state.  If so let's wait
 982                         * until it's done.
 983                         */
 984                        mutex_lock(&chip->mutex);
 985                        while (chip->state != FL_XIP_WHILE_ERASING) {
 986                                DECLARE_WAITQUEUE(wait, current);
 987                                set_current_state(TASK_UNINTERRUPTIBLE);
 988                                add_wait_queue(&chip->wq, &wait);
 989                                mutex_unlock(&chip->mutex);
 990                                schedule();
 991                                remove_wait_queue(&chip->wq, &wait);
 992                                mutex_lock(&chip->mutex);
 993                        }
 994                        /* Disallow XIP again */
 995                        local_irq_disable();
 996
 997                        /* Correct Erase Suspend Hangups for M29EW */
 998                        cfi_fixup_m29ew_erase_suspend(map, adr);
 999                        /* Resume the write or erase operation */
1000                        map_write(map, cfi->sector_erase_cmd, adr);
1001                        chip->state = oldstate;
1002                        start = xip_currtime();
1003                } else if (usec >= 1000000/HZ) {
1004                        /*
1005                         * Try to save on CPU power when waiting delay
1006                         * is at least a system timer tick period.
1007                         * No need to be extremely accurate here.
1008                         */
1009                        xip_cpu_idle();
1010                }
1011                status = map_read(map, adr);
1012        } while (!map_word_andequal(map, status, OK, OK)
1013                 && xip_elapsed_since(start) < usec);
1014}
1015
1016#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1017
1018/*
1019 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1020 * the flash is actively programming or erasing since we have to poll for
1021 * the operation to complete anyway.  We can't do that in a generic way with
1022 * a XIP setup so do it before the actual flash operation in this case
1023 * and stub it out from INVALIDATE_CACHE_UDELAY.
1024 */
1025#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1026        INVALIDATE_CACHED_RANGE(map, from, size)
1027
1028#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1029        UDELAY(map, chip, adr, usec)
1030
1031/*
1032 * Extra notes:
1033 *
1034 * Activating this XIP support changes the way the code works a bit.  For
1035 * example the code to suspend the current process when concurrent access
1036 * happens is never executed because xip_udelay() will always return with the
1037 * same chip state as it was entered with.  This is why there is no care for
1038 * the presence of add_wait_queue() or schedule() calls from within a couple
1039 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1040 * The queueing and scheduling are always happening within xip_udelay().
1041 *
1042 * Similarly, get_chip() and put_chip() just happen to always be executed
1043 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1044 * is in array mode, therefore never executing many cases therein and not
1045 * causing any problem with XIP.
1046 */
1047
1048#else
1049
1050#define xip_disable(map, chip, adr)
1051#define xip_enable(map, chip, adr)
1052#define XIP_INVAL_CACHED_RANGE(x...)
1053
1054#define UDELAY(map, chip, adr, usec)  \
1055do {  \
1056        mutex_unlock(&chip->mutex);  \
1057        cfi_udelay(usec);  \
1058        mutex_lock(&chip->mutex);  \
1059} while (0)
1060
1061#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1062do {  \
1063        mutex_unlock(&chip->mutex);  \
1064        INVALIDATE_CACHED_RANGE(map, adr, len);  \
1065        cfi_udelay(usec);  \
1066        mutex_lock(&chip->mutex);  \
1067} while (0)
1068
1069#endif
1070
1071static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1072{
1073        unsigned long cmd_addr;
1074        struct cfi_private *cfi = map->fldrv_priv;
1075        int ret;
1076
1077        adr += chip->start;
1078
1079        /* Ensure cmd read/writes are aligned. */
1080        cmd_addr = adr & ~(map_bankwidth(map)-1);
1081
1082        mutex_lock(&chip->mutex);
1083        ret = get_chip(map, chip, cmd_addr, FL_READY);
1084        if (ret) {
1085                mutex_unlock(&chip->mutex);
1086                return ret;
1087        }
1088
1089        if (chip->state != FL_POINT && chip->state != FL_READY) {
1090                map_write(map, CMD(0xf0), cmd_addr);
1091                chip->state = FL_READY;
1092        }
1093
1094        map_copy_from(map, buf, adr, len);
1095
1096        put_chip(map, chip, cmd_addr);
1097
1098        mutex_unlock(&chip->mutex);
1099        return 0;
1100}
1101
1102
1103static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1104{
1105        struct map_info *map = mtd->priv;
1106        struct cfi_private *cfi = map->fldrv_priv;
1107        unsigned long ofs;
1108        int chipnum;
1109        int ret = 0;
1110
1111        /* ofs: offset within the first chip that the first read should start */
1112        chipnum = (from >> cfi->chipshift);
1113        ofs = from - (chipnum <<  cfi->chipshift);
1114
1115        while (len) {
1116                unsigned long thislen;
1117
1118                if (chipnum >= cfi->numchips)
1119                        break;
1120
1121                if ((len + ofs -1) >> cfi->chipshift)
1122                        thislen = (1<<cfi->chipshift) - ofs;
1123                else
1124                        thislen = len;
1125
1126                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1127                if (ret)
1128                        break;
1129
1130                *retlen += thislen;
1131                len -= thislen;
1132                buf += thislen;
1133
1134                ofs = 0;
1135                chipnum++;
1136        }
1137        return ret;
1138}
1139
1140
1141static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1142{
1143        DECLARE_WAITQUEUE(wait, current);
1144        unsigned long timeo = jiffies + HZ;
1145        struct cfi_private *cfi = map->fldrv_priv;
1146
1147 retry:
1148        mutex_lock(&chip->mutex);
1149
1150        if (chip->state != FL_READY){
1151                set_current_state(TASK_UNINTERRUPTIBLE);
1152                add_wait_queue(&chip->wq, &wait);
1153
1154                mutex_unlock(&chip->mutex);
1155
1156                schedule();
1157                remove_wait_queue(&chip->wq, &wait);
1158                timeo = jiffies + HZ;
1159
1160                goto retry;
1161        }
1162
1163        adr += chip->start;
1164
1165        chip->state = FL_READY;
1166
1167        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1168        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1169        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1170
1171        map_copy_from(map, buf, adr, len);
1172
1173        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1174        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1175        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1176        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1177
1178        wake_up(&chip->wq);
1179        mutex_unlock(&chip->mutex);
1180
1181        return 0;
1182}
1183
1184static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1185{
1186        struct map_info *map = mtd->priv;
1187        struct cfi_private *cfi = map->fldrv_priv;
1188        unsigned long ofs;
1189        int chipnum;
1190        int ret = 0;
1191
1192        /* ofs: offset within the first chip that the first read should start */
1193        /* 8 secsi bytes per chip */
1194        chipnum=from>>3;
1195        ofs=from & 7;
1196
1197        while (len) {
1198                unsigned long thislen;
1199
1200                if (chipnum >= cfi->numchips)
1201                        break;
1202
1203                if ((len + ofs -1) >> 3)
1204                        thislen = (1<<3) - ofs;
1205                else
1206                        thislen = len;
1207
1208                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1209                if (ret)
1210                        break;
1211
1212                *retlen += thislen;
1213                len -= thislen;
1214                buf += thislen;
1215
1216                ofs = 0;
1217                chipnum++;
1218        }
1219        return ret;
1220}
1221
1222
1223static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1224{
1225        struct cfi_private *cfi = map->fldrv_priv;
1226        unsigned long timeo = jiffies + HZ;
1227        /*
1228         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1229         * have a max write time of a few hundreds usec). However, we should
1230         * use the maximum timeout value given by the chip at probe time
1231         * instead.  Unfortunately, struct flchip does have a field for
1232         * maximum timeout, only for typical which can be far too short
1233         * depending of the conditions.  The ' + 1' is to avoid having a
1234         * timeout of 0 jiffies if HZ is smaller than 1000.
1235         */
1236        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1237        int ret = 0;
1238        map_word oldd;
1239        int retry_cnt = 0;
1240
1241        adr += chip->start;
1242
1243        mutex_lock(&chip->mutex);
1244        ret = get_chip(map, chip, adr, FL_WRITING);
1245        if (ret) {
1246                mutex_unlock(&chip->mutex);
1247                return ret;
1248        }
1249
1250        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1251               __func__, adr, datum.x[0] );
1252
1253        /*
1254         * Check for a NOP for the case when the datum to write is already
1255         * present - it saves time and works around buggy chips that corrupt
1256         * data at other locations when 0xff is written to a location that
1257         * already contains 0xff.
1258         */
1259        oldd = map_read(map, adr);
1260        if (map_word_equal(map, oldd, datum)) {
1261                pr_debug("MTD %s(): NOP\n",
1262                       __func__);
1263                goto op_done;
1264        }
1265
1266        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1267        ENABLE_VPP(map);
1268        xip_disable(map, chip, adr);
1269 retry:
1270        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1271        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1272        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1273        map_write(map, datum, adr);
1274        chip->state = FL_WRITING;
1275
1276        INVALIDATE_CACHE_UDELAY(map, chip,
1277                                adr, map_bankwidth(map),
1278                                chip->word_write_time);
1279
1280        /* See comment above for timeout value. */
1281        timeo = jiffies + uWriteTimeout;
1282        for (;;) {
1283                if (chip->state != FL_WRITING) {
1284                        /* Someone's suspended the write. Sleep */
1285                        DECLARE_WAITQUEUE(wait, current);
1286
1287                        set_current_state(TASK_UNINTERRUPTIBLE);
1288                        add_wait_queue(&chip->wq, &wait);
1289                        mutex_unlock(&chip->mutex);
1290                        schedule();
1291                        remove_wait_queue(&chip->wq, &wait);
1292                        timeo = jiffies + (HZ / 2); /* FIXME */
1293                        mutex_lock(&chip->mutex);
1294                        continue;
1295                }
1296
1297                if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1298                        xip_enable(map, chip, adr);
1299                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1300                        xip_disable(map, chip, adr);
1301                        break;
1302                }
1303
1304                if (chip_ready(map, adr))
1305                        break;
1306
1307                /* Latency issues. Drop the lock, wait a while and retry */
1308                UDELAY(map, chip, adr, 1);
1309        }
1310        /* Did we succeed? */
1311        if (!chip_good(map, adr, datum)) {
1312                /* reset on all failures. */
1313                map_write( map, CMD(0xF0), chip->start );
1314                /* FIXME - should have reset delay before continuing */
1315
1316                if (++retry_cnt <= MAX_WORD_RETRIES)
1317                        goto retry;
1318
1319                ret = -EIO;
1320        }
1321        xip_enable(map, chip, adr);
1322 op_done:
1323        chip->state = FL_READY;
1324        DISABLE_VPP(map);
1325        put_chip(map, chip, adr);
1326        mutex_unlock(&chip->mutex);
1327
1328        return ret;
1329}
1330
1331
1332static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1333                                  size_t *retlen, const u_char *buf)
1334{
1335        struct map_info *map = mtd->priv;
1336        struct cfi_private *cfi = map->fldrv_priv;
1337        int ret = 0;
1338        int chipnum;
1339        unsigned long ofs, chipstart;
1340        DECLARE_WAITQUEUE(wait, current);
1341
1342        chipnum = to >> cfi->chipshift;
1343        ofs = to  - (chipnum << cfi->chipshift);
1344        chipstart = cfi->chips[chipnum].start;
1345
1346        /* If it's not bus-aligned, do the first byte write */
1347        if (ofs & (map_bankwidth(map)-1)) {
1348                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1349                int i = ofs - bus_ofs;
1350                int n = 0;
1351                map_word tmp_buf;
1352
1353 retry:
1354                mutex_lock(&cfi->chips[chipnum].mutex);
1355
1356                if (cfi->chips[chipnum].state != FL_READY) {
1357                        set_current_state(TASK_UNINTERRUPTIBLE);
1358                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1359
1360                        mutex_unlock(&cfi->chips[chipnum].mutex);
1361
1362                        schedule();
1363                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1364                        goto retry;
1365                }
1366
1367                /* Load 'tmp_buf' with old contents of flash */
1368                tmp_buf = map_read(map, bus_ofs+chipstart);
1369
1370                mutex_unlock(&cfi->chips[chipnum].mutex);
1371
1372                /* Number of bytes to copy from buffer */
1373                n = min_t(int, len, map_bankwidth(map)-i);
1374
1375                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1376
1377                ret = do_write_oneword(map, &cfi->chips[chipnum],
1378                                       bus_ofs, tmp_buf);
1379                if (ret)
1380                        return ret;
1381
1382                ofs += n;
1383                buf += n;
1384                (*retlen) += n;
1385                len -= n;
1386
1387                if (ofs >> cfi->chipshift) {
1388                        chipnum ++;
1389                        ofs = 0;
1390                        if (chipnum == cfi->numchips)
1391                                return 0;
1392                }
1393        }
1394
1395        /* We are now aligned, write as much as possible */
1396        while(len >= map_bankwidth(map)) {
1397                map_word datum;
1398
1399                datum = map_word_load(map, buf);
1400
1401                ret = do_write_oneword(map, &cfi->chips[chipnum],
1402                                       ofs, datum);
1403                if (ret)
1404                        return ret;
1405
1406                ofs += map_bankwidth(map);
1407                buf += map_bankwidth(map);
1408                (*retlen) += map_bankwidth(map);
1409                len -= map_bankwidth(map);
1410
1411                if (ofs >> cfi->chipshift) {
1412                        chipnum ++;
1413                        ofs = 0;
1414                        if (chipnum == cfi->numchips)
1415                                return 0;
1416                        chipstart = cfi->chips[chipnum].start;
1417                }
1418        }
1419
1420        /* Write the trailing bytes if any */
1421        if (len & (map_bankwidth(map)-1)) {
1422                map_word tmp_buf;
1423
1424 retry1:
1425                mutex_lock(&cfi->chips[chipnum].mutex);
1426
1427                if (cfi->chips[chipnum].state != FL_READY) {
1428                        set_current_state(TASK_UNINTERRUPTIBLE);
1429                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1430
1431                        mutex_unlock(&cfi->chips[chipnum].mutex);
1432
1433                        schedule();
1434                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1435                        goto retry1;
1436                }
1437
1438                tmp_buf = map_read(map, ofs + chipstart);
1439
1440                mutex_unlock(&cfi->chips[chipnum].mutex);
1441
1442                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1443
1444                ret = do_write_oneword(map, &cfi->chips[chipnum],
1445                                ofs, tmp_buf);
1446                if (ret)
1447                        return ret;
1448
1449                (*retlen) += len;
1450        }
1451
1452        return 0;
1453}
1454
1455
1456/*
1457 * FIXME: interleaved mode not tested, and probably not supported!
1458 */
1459static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1460                                    unsigned long adr, const u_char *buf,
1461                                    int len)
1462{
1463        struct cfi_private *cfi = map->fldrv_priv;
1464        unsigned long timeo = jiffies + HZ;
1465        /* see comments in do_write_oneword() regarding uWriteTimeo. */
1466        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1467        int ret = -EIO;
1468        unsigned long cmd_adr;
1469        int z, words;
1470        map_word datum;
1471
1472        adr += chip->start;
1473        cmd_adr = adr;
1474
1475        mutex_lock(&chip->mutex);
1476        ret = get_chip(map, chip, adr, FL_WRITING);
1477        if (ret) {
1478                mutex_unlock(&chip->mutex);
1479                return ret;
1480        }
1481
1482        datum = map_word_load(map, buf);
1483
1484        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1485               __func__, adr, datum.x[0] );
1486
1487        XIP_INVAL_CACHED_RANGE(map, adr, len);
1488        ENABLE_VPP(map);
1489        xip_disable(map, chip, cmd_adr);
1490
1491        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1492        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1493
1494        /* Write Buffer Load */
1495        map_write(map, CMD(0x25), cmd_adr);
1496
1497        chip->state = FL_WRITING_TO_BUFFER;
1498
1499        /* Write length of data to come */
1500        words = len / map_bankwidth(map);
1501        map_write(map, CMD(words - 1), cmd_adr);
1502        /* Write data */
1503        z = 0;
1504        while(z < words * map_bankwidth(map)) {
1505                datum = map_word_load(map, buf);
1506                map_write(map, datum, adr + z);
1507
1508                z += map_bankwidth(map);
1509                buf += map_bankwidth(map);
1510        }
1511        z -= map_bankwidth(map);
1512
1513        adr += z;
1514
1515        /* Write Buffer Program Confirm: GO GO GO */
1516        map_write(map, CMD(0x29), cmd_adr);
1517        chip->state = FL_WRITING;
1518
1519        INVALIDATE_CACHE_UDELAY(map, chip,
1520                                adr, map_bankwidth(map),
1521                                chip->word_write_time);
1522
1523        timeo = jiffies + uWriteTimeout;
1524
1525        for (;;) {
1526                if (chip->state != FL_WRITING) {
1527                        /* Someone's suspended the write. Sleep */
1528                        DECLARE_WAITQUEUE(wait, current);
1529
1530                        set_current_state(TASK_UNINTERRUPTIBLE);
1531                        add_wait_queue(&chip->wq, &wait);
1532                        mutex_unlock(&chip->mutex);
1533                        schedule();
1534                        remove_wait_queue(&chip->wq, &wait);
1535                        timeo = jiffies + (HZ / 2); /* FIXME */
1536                        mutex_lock(&chip->mutex);
1537                        continue;
1538                }
1539
1540                if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1541                        break;
1542
1543                if (chip_ready(map, adr)) {
1544                        xip_enable(map, chip, adr);
1545                        goto op_done;
1546                }
1547
1548                /* Latency issues. Drop the lock, wait a while and retry */
1549                UDELAY(map, chip, adr, 1);
1550        }
1551
1552        /*
1553         * Recovery from write-buffer programming failures requires
1554         * the write-to-buffer-reset sequence.  Since the last part
1555         * of the sequence also works as a normal reset, we can run
1556         * the same commands regardless of why we are here.
1557         * See e.g.
1558         * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1559         */
1560        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1561                         cfi->device_type, NULL);
1562        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1563                         cfi->device_type, NULL);
1564        cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1565                         cfi->device_type, NULL);
1566        xip_enable(map, chip, adr);
1567        /* FIXME - should have reset delay before continuing */
1568
1569        printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1570               __func__, adr);
1571
1572        ret = -EIO;
1573 op_done:
1574        chip->state = FL_READY;
1575        DISABLE_VPP(map);
1576        put_chip(map, chip, adr);
1577        mutex_unlock(&chip->mutex);
1578
1579        return ret;
1580}
1581
1582
1583static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1584                                    size_t *retlen, const u_char *buf)
1585{
1586        struct map_info *map = mtd->priv;
1587        struct cfi_private *cfi = map->fldrv_priv;
1588        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1589        int ret = 0;
1590        int chipnum;
1591        unsigned long ofs;
1592
1593        chipnum = to >> cfi->chipshift;
1594        ofs = to  - (chipnum << cfi->chipshift);
1595
1596        /* If it's not bus-aligned, do the first word write */
1597        if (ofs & (map_bankwidth(map)-1)) {
1598                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1599                if (local_len > len)
1600                        local_len = len;
1601                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1602                                             local_len, retlen, buf);
1603                if (ret)
1604                        return ret;
1605                ofs += local_len;
1606                buf += local_len;
1607                len -= local_len;
1608
1609                if (ofs >> cfi->chipshift) {
1610                        chipnum ++;
1611                        ofs = 0;
1612                        if (chipnum == cfi->numchips)
1613                                return 0;
1614                }
1615        }
1616
1617        /* Write buffer is worth it only if more than one word to write... */
1618        while (len >= map_bankwidth(map) * 2) {
1619                /* We must not cross write block boundaries */
1620                int size = wbufsize - (ofs & (wbufsize-1));
1621
1622                if (size > len)
1623                        size = len;
1624                if (size % map_bankwidth(map))
1625                        size -= size % map_bankwidth(map);
1626
1627                ret = do_write_buffer(map, &cfi->chips[chipnum],
1628                                      ofs, buf, size);
1629                if (ret)
1630                        return ret;
1631
1632                ofs += size;
1633                buf += size;
1634                (*retlen) += size;
1635                len -= size;
1636
1637                if (ofs >> cfi->chipshift) {
1638                        chipnum ++;
1639                        ofs = 0;
1640                        if (chipnum == cfi->numchips)
1641                                return 0;
1642                }
1643        }
1644
1645        if (len) {
1646                size_t retlen_dregs = 0;
1647
1648                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1649                                             len, &retlen_dregs, buf);
1650
1651                *retlen += retlen_dregs;
1652                return ret;
1653        }
1654
1655        return 0;
1656}
1657
1658/*
1659 * Wait for the flash chip to become ready to write data
1660 *
1661 * This is only called during the panic_write() path. When panic_write()
1662 * is called, the kernel is in the process of a panic, and will soon be
1663 * dead. Therefore we don't take any locks, and attempt to get access
1664 * to the chip as soon as possible.
1665 */
1666static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1667                                 unsigned long adr)
1668{
1669        struct cfi_private *cfi = map->fldrv_priv;
1670        int retries = 10;
1671        int i;
1672
1673        /*
1674         * If the driver thinks the chip is idle, and no toggle bits
1675         * are changing, then the chip is actually idle for sure.
1676         */
1677        if (chip->state == FL_READY && chip_ready(map, adr))
1678                return 0;
1679
1680        /*
1681         * Try several times to reset the chip and then wait for it
1682         * to become idle. The upper limit of a few milliseconds of
1683         * delay isn't a big problem: the kernel is dying anyway. It
1684         * is more important to save the messages.
1685         */
1686        while (retries > 0) {
1687                const unsigned long timeo = (HZ / 1000) + 1;
1688
1689                /* send the reset command */
1690                map_write(map, CMD(0xF0), chip->start);
1691
1692                /* wait for the chip to become ready */
1693                for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1694                        if (chip_ready(map, adr))
1695                                return 0;
1696
1697                        udelay(1);
1698                }
1699        }
1700
1701        /* the chip never became ready */
1702        return -EBUSY;
1703}
1704
1705/*
1706 * Write out one word of data to a single flash chip during a kernel panic
1707 *
1708 * This is only called during the panic_write() path. When panic_write()
1709 * is called, the kernel is in the process of a panic, and will soon be
1710 * dead. Therefore we don't take any locks, and attempt to get access
1711 * to the chip as soon as possible.
1712 *
1713 * The implementation of this routine is intentionally similar to
1714 * do_write_oneword(), in order to ease code maintenance.
1715 */
1716static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1717                                  unsigned long adr, map_word datum)
1718{
1719        const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1720        struct cfi_private *cfi = map->fldrv_priv;
1721        int retry_cnt = 0;
1722        map_word oldd;
1723        int ret = 0;
1724        int i;
1725
1726        adr += chip->start;
1727
1728        ret = cfi_amdstd_panic_wait(map, chip, adr);
1729        if (ret)
1730                return ret;
1731
1732        pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1733                        __func__, adr, datum.x[0]);
1734
1735        /*
1736         * Check for a NOP for the case when the datum to write is already
1737         * present - it saves time and works around buggy chips that corrupt
1738         * data at other locations when 0xff is written to a location that
1739         * already contains 0xff.
1740         */
1741        oldd = map_read(map, adr);
1742        if (map_word_equal(map, oldd, datum)) {
1743                pr_debug("MTD %s(): NOP\n", __func__);
1744                goto op_done;
1745        }
1746
1747        ENABLE_VPP(map);
1748
1749retry:
1750        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1751        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1752        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1753        map_write(map, datum, adr);
1754
1755        for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1756                if (chip_ready(map, adr))
1757                        break;
1758
1759                udelay(1);
1760        }
1761
1762        if (!chip_good(map, adr, datum)) {
1763                /* reset on all failures. */
1764                map_write(map, CMD(0xF0), chip->start);
1765                /* FIXME - should have reset delay before continuing */
1766
1767                if (++retry_cnt <= MAX_WORD_RETRIES)
1768                        goto retry;
1769
1770                ret = -EIO;
1771        }
1772
1773op_done:
1774        DISABLE_VPP(map);
1775        return ret;
1776}
1777
1778/*
1779 * Write out some data during a kernel panic
1780 *
1781 * This is used by the mtdoops driver to save the dying messages from a
1782 * kernel which has panic'd.
1783 *
1784 * This routine ignores all of the locking used throughout the rest of the
1785 * driver, in order to ensure that the data gets written out no matter what
1786 * state this driver (and the flash chip itself) was in when the kernel crashed.
1787 *
1788 * The implementation of this routine is intentionally similar to
1789 * cfi_amdstd_write_words(), in order to ease code maintenance.
1790 */
1791static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1792                                  size_t *retlen, const u_char *buf)
1793{
1794        struct map_info *map = mtd->priv;
1795        struct cfi_private *cfi = map->fldrv_priv;
1796        unsigned long ofs, chipstart;
1797        int ret = 0;
1798        int chipnum;
1799
1800        chipnum = to >> cfi->chipshift;
1801        ofs = to - (chipnum << cfi->chipshift);
1802        chipstart = cfi->chips[chipnum].start;
1803
1804        /* If it's not bus aligned, do the first byte write */
1805        if (ofs & (map_bankwidth(map) - 1)) {
1806                unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1807                int i = ofs - bus_ofs;
1808                int n = 0;
1809                map_word tmp_buf;
1810
1811                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1812                if (ret)
1813                        return ret;
1814
1815                /* Load 'tmp_buf' with old contents of flash */
1816                tmp_buf = map_read(map, bus_ofs + chipstart);
1817
1818                /* Number of bytes to copy from buffer */
1819                n = min_t(int, len, map_bankwidth(map) - i);
1820
1821                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1822
1823                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1824                                             bus_ofs, tmp_buf);
1825                if (ret)
1826                        return ret;
1827
1828                ofs += n;
1829                buf += n;
1830                (*retlen) += n;
1831                len -= n;
1832
1833                if (ofs >> cfi->chipshift) {
1834                        chipnum++;
1835                        ofs = 0;
1836                        if (chipnum == cfi->numchips)
1837                                return 0;
1838                }
1839        }
1840
1841        /* We are now aligned, write as much as possible */
1842        while (len >= map_bankwidth(map)) {
1843                map_word datum;
1844
1845                datum = map_word_load(map, buf);
1846
1847                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1848                                             ofs, datum);
1849                if (ret)
1850                        return ret;
1851
1852                ofs += map_bankwidth(map);
1853                buf += map_bankwidth(map);
1854                (*retlen) += map_bankwidth(map);
1855                len -= map_bankwidth(map);
1856
1857                if (ofs >> cfi->chipshift) {
1858                        chipnum++;
1859                        ofs = 0;
1860                        if (chipnum == cfi->numchips)
1861                                return 0;
1862
1863                        chipstart = cfi->chips[chipnum].start;
1864                }
1865        }
1866
1867        /* Write the trailing bytes if any */
1868        if (len & (map_bankwidth(map) - 1)) {
1869                map_word tmp_buf;
1870
1871                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1872                if (ret)
1873                        return ret;
1874
1875                tmp_buf = map_read(map, ofs + chipstart);
1876
1877                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1878
1879                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1880                                             ofs, tmp_buf);
1881                if (ret)
1882                        return ret;
1883
1884                (*retlen) += len;
1885        }
1886
1887        return 0;
1888}
1889
1890
1891/*
1892 * Handle devices with one erase region, that only implement
1893 * the chip erase command.
1894 */
1895static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1896{
1897        struct cfi_private *cfi = map->fldrv_priv;
1898        unsigned long timeo = jiffies + HZ;
1899        unsigned long int adr;
1900        DECLARE_WAITQUEUE(wait, current);
1901        int ret = 0;
1902
1903        adr = cfi->addr_unlock1;
1904
1905        mutex_lock(&chip->mutex);
1906        ret = get_chip(map, chip, adr, FL_WRITING);
1907        if (ret) {
1908                mutex_unlock(&chip->mutex);
1909                return ret;
1910        }
1911
1912        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1913               __func__, chip->start );
1914
1915        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1916        ENABLE_VPP(map);
1917        xip_disable(map, chip, adr);
1918
1919        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1920        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1921        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1922        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1923        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1924        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1925
1926        chip->state = FL_ERASING;
1927        chip->erase_suspended = 0;
1928        chip->in_progress_block_addr = adr;
1929
1930        INVALIDATE_CACHE_UDELAY(map, chip,
1931                                adr, map->size,
1932                                chip->erase_time*500);
1933
1934        timeo = jiffies + (HZ*20);
1935
1936        for (;;) {
1937                if (chip->state != FL_ERASING) {
1938                        /* Someone's suspended the erase. Sleep */
1939                        set_current_state(TASK_UNINTERRUPTIBLE);
1940                        add_wait_queue(&chip->wq, &wait);
1941                        mutex_unlock(&chip->mutex);
1942                        schedule();
1943                        remove_wait_queue(&chip->wq, &wait);
1944                        mutex_lock(&chip->mutex);
1945                        continue;
1946                }
1947                if (chip->erase_suspended) {
1948                        /* This erase was suspended and resumed.
1949                           Adjust the timeout */
1950                        timeo = jiffies + (HZ*20); /* FIXME */
1951                        chip->erase_suspended = 0;
1952                }
1953
1954                if (chip_ready(map, adr))
1955                        break;
1956
1957                if (time_after(jiffies, timeo)) {
1958                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1959                                __func__ );
1960                        break;
1961                }
1962
1963                /* Latency issues. Drop the lock, wait a while and retry */
1964                UDELAY(map, chip, adr, 1000000/HZ);
1965        }
1966        /* Did we succeed? */
1967        if (!chip_good(map, adr, map_word_ff(map))) {
1968                /* reset on all failures. */
1969                map_write( map, CMD(0xF0), chip->start );
1970                /* FIXME - should have reset delay before continuing */
1971
1972                ret = -EIO;
1973        }
1974
1975        chip->state = FL_READY;
1976        xip_enable(map, chip, adr);
1977        DISABLE_VPP(map);
1978        put_chip(map, chip, adr);
1979        mutex_unlock(&chip->mutex);
1980
1981        return ret;
1982}
1983
1984
1985static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1986{
1987        struct cfi_private *cfi = map->fldrv_priv;
1988        unsigned long timeo = jiffies + HZ;
1989        DECLARE_WAITQUEUE(wait, current);
1990        int ret = 0;
1991
1992        adr += chip->start;
1993
1994        mutex_lock(&chip->mutex);
1995        ret = get_chip(map, chip, adr, FL_ERASING);
1996        if (ret) {
1997                mutex_unlock(&chip->mutex);
1998                return ret;
1999        }
2000
2001        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2002               __func__, adr );
2003
2004        XIP_INVAL_CACHED_RANGE(map, adr, len);
2005        ENABLE_VPP(map);
2006        xip_disable(map, chip, adr);
2007
2008        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2009        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2010        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2011        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2012        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2013        map_write(map, cfi->sector_erase_cmd, adr);
2014
2015        chip->state = FL_ERASING;
2016        chip->erase_suspended = 0;
2017        chip->in_progress_block_addr = adr;
2018
2019        INVALIDATE_CACHE_UDELAY(map, chip,
2020                                adr, len,
2021                                chip->erase_time*500);
2022
2023        timeo = jiffies + (HZ*20);
2024
2025        for (;;) {
2026                if (chip->state != FL_ERASING) {
2027                        /* Someone's suspended the erase. Sleep */
2028                        set_current_state(TASK_UNINTERRUPTIBLE);
2029                        add_wait_queue(&chip->wq, &wait);
2030                        mutex_unlock(&chip->mutex);
2031                        schedule();
2032                        remove_wait_queue(&chip->wq, &wait);
2033                        mutex_lock(&chip->mutex);
2034                        continue;
2035                }
2036                if (chip->erase_suspended) {
2037                        /* This erase was suspended and resumed.
2038                           Adjust the timeout */
2039                        timeo = jiffies + (HZ*20); /* FIXME */
2040                        chip->erase_suspended = 0;
2041                }
2042
2043                if (chip_ready(map, adr)) {
2044                        xip_enable(map, chip, adr);
2045                        break;
2046                }
2047
2048                if (time_after(jiffies, timeo)) {
2049                        xip_enable(map, chip, adr);
2050                        printk(KERN_WARNING "MTD %s(): software timeout\n",
2051                                __func__ );
2052                        break;
2053                }
2054
2055                /* Latency issues. Drop the lock, wait a while and retry */
2056                UDELAY(map, chip, adr, 1000000/HZ);
2057        }
2058        /* Did we succeed? */
2059        if (!chip_good(map, adr, map_word_ff(map))) {
2060                /* reset on all failures. */
2061                map_write( map, CMD(0xF0), chip->start );
2062                /* FIXME - should have reset delay before continuing */
2063
2064                ret = -EIO;
2065        }
2066
2067        chip->state = FL_READY;
2068        DISABLE_VPP(map);
2069        put_chip(map, chip, adr);
2070        mutex_unlock(&chip->mutex);
2071        return ret;
2072}
2073
2074
2075static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2076{
2077        unsigned long ofs, len;
2078        int ret;
2079
2080        ofs = instr->addr;
2081        len = instr->len;
2082
2083        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2084        if (ret)
2085                return ret;
2086
2087        instr->state = MTD_ERASE_DONE;
2088        mtd_erase_callback(instr);
2089
2090        return 0;
2091}
2092
2093
2094static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2095{
2096        struct map_info *map = mtd->priv;
2097        struct cfi_private *cfi = map->fldrv_priv;
2098        int ret = 0;
2099
2100        if (instr->addr != 0)
2101                return -EINVAL;
2102
2103        if (instr->len != mtd->size)
2104                return -EINVAL;
2105
2106        ret = do_erase_chip(map, &cfi->chips[0]);
2107        if (ret)
2108                return ret;
2109
2110        instr->state = MTD_ERASE_DONE;
2111        mtd_erase_callback(instr);
2112
2113        return 0;
2114}
2115
2116static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2117                         unsigned long adr, int len, void *thunk)
2118{
2119        struct cfi_private *cfi = map->fldrv_priv;
2120        int ret;
2121
2122        mutex_lock(&chip->mutex);
2123        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2124        if (ret)
2125                goto out_unlock;
2126        chip->state = FL_LOCKING;
2127
2128        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2129
2130        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2131                         cfi->device_type, NULL);
2132        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2133                         cfi->device_type, NULL);
2134        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2135                         cfi->device_type, NULL);
2136        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2137                         cfi->device_type, NULL);
2138        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2139                         cfi->device_type, NULL);
2140        map_write(map, CMD(0x40), chip->start + adr);
2141
2142        chip->state = FL_READY;
2143        put_chip(map, chip, adr + chip->start);
2144        ret = 0;
2145
2146out_unlock:
2147        mutex_unlock(&chip->mutex);
2148        return ret;
2149}
2150
2151static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2152                           unsigned long adr, int len, void *thunk)
2153{
2154        struct cfi_private *cfi = map->fldrv_priv;
2155        int ret;
2156
2157        mutex_lock(&chip->mutex);
2158        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2159        if (ret)
2160                goto out_unlock;
2161        chip->state = FL_UNLOCKING;
2162
2163        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2164
2165        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2166                         cfi->device_type, NULL);
2167        map_write(map, CMD(0x70), adr);
2168
2169        chip->state = FL_READY;
2170        put_chip(map, chip, adr + chip->start);
2171        ret = 0;
2172
2173out_unlock:
2174        mutex_unlock(&chip->mutex);
2175        return ret;
2176}
2177
2178static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2179{
2180        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2181}
2182
2183static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2184{
2185        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2186}
2187
2188/*
2189 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2190 */
2191
2192struct ppb_lock {
2193        struct flchip *chip;
2194        loff_t offset;
2195        int locked;
2196};
2197
2198#define MAX_SECTORS                     512
2199
2200#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *)1)
2201#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *)2)
2202#define DO_XXLOCK_ONEBLOCK_GETLOCK      ((void *)3)
2203
2204static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2205                                        struct flchip *chip,
2206                                        unsigned long adr, int len, void *thunk)
2207{
2208        struct cfi_private *cfi = map->fldrv_priv;
2209        unsigned long timeo;
2210        int ret;
2211
2212        mutex_lock(&chip->mutex);
2213        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2214        if (ret) {
2215                mutex_unlock(&chip->mutex);
2216                return ret;
2217        }
2218
2219        pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2220
2221        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2222                         cfi->device_type, NULL);
2223        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2224                         cfi->device_type, NULL);
2225        /* PPB entry command */
2226        cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2227                         cfi->device_type, NULL);
2228
2229        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2230                chip->state = FL_LOCKING;
2231                map_write(map, CMD(0xA0), chip->start + adr);
2232                map_write(map, CMD(0x00), chip->start + adr);
2233        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2234                /*
2235                 * Unlocking of one specific sector is not supported, so we
2236                 * have to unlock all sectors of this device instead
2237                 */
2238                chip->state = FL_UNLOCKING;
2239                map_write(map, CMD(0x80), chip->start);
2240                map_write(map, CMD(0x30), chip->start);
2241        } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2242                chip->state = FL_JEDEC_QUERY;
2243                /* Return locked status: 0->locked, 1->unlocked */
2244                ret = !cfi_read_query(map, adr);
2245        } else
2246                BUG();
2247
2248        /*
2249         * Wait for some time as unlocking of all sectors takes quite long
2250         */
2251        timeo = jiffies + msecs_to_jiffies(2000);       /* 2s max (un)locking */
2252        for (;;) {
2253                if (chip_ready(map, adr))
2254                        break;
2255
2256                if (time_after(jiffies, timeo)) {
2257                        printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2258                        ret = -EIO;
2259                        break;
2260                }
2261
2262                UDELAY(map, chip, adr, 1);
2263        }
2264
2265        /* Exit BC commands */
2266        map_write(map, CMD(0x90), chip->start);
2267        map_write(map, CMD(0x00), chip->start);
2268
2269        chip->state = FL_READY;
2270        put_chip(map, chip, adr + chip->start);
2271        mutex_unlock(&chip->mutex);
2272
2273        return ret;
2274}
2275
2276static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2277                                       uint64_t len)
2278{
2279        return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2280                                DO_XXLOCK_ONEBLOCK_LOCK);
2281}
2282
2283static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2284                                         uint64_t len)
2285{
2286        struct mtd_erase_region_info *regions = mtd->eraseregions;
2287        struct map_info *map = mtd->priv;
2288        struct cfi_private *cfi = map->fldrv_priv;
2289        struct ppb_lock *sect;
2290        unsigned long adr;
2291        loff_t offset;
2292        uint64_t length;
2293        int chipnum;
2294        int i;
2295        int sectors;
2296        int ret;
2297
2298        /*
2299         * PPB unlocking always unlocks all sectors of the flash chip.
2300         * We need to re-lock all previously locked sectors. So lets
2301         * first check the locking status of all sectors and save
2302         * it for future use.
2303         */
2304        sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2305        if (!sect)
2306                return -ENOMEM;
2307
2308        /*
2309         * This code to walk all sectors is a slightly modified version
2310         * of the cfi_varsize_frob() code.
2311         */
2312        i = 0;
2313        chipnum = 0;
2314        adr = 0;
2315        sectors = 0;
2316        offset = 0;
2317        length = mtd->size;
2318
2319        while (length) {
2320                int size = regions[i].erasesize;
2321
2322                /*
2323                 * Only test sectors that shall not be unlocked. The other
2324                 * sectors shall be unlocked, so lets keep their locking
2325                 * status at "unlocked" (locked=0) for the final re-locking.
2326                 */
2327                if ((adr < ofs) || (adr >= (ofs + len))) {
2328                        sect[sectors].chip = &cfi->chips[chipnum];
2329                        sect[sectors].offset = offset;
2330                        sect[sectors].locked = do_ppb_xxlock(
2331                                map, &cfi->chips[chipnum], adr, 0,
2332                                DO_XXLOCK_ONEBLOCK_GETLOCK);
2333                }
2334
2335                adr += size;
2336                offset += size;
2337                length -= size;
2338
2339                if (offset == regions[i].offset + size * regions[i].numblocks)
2340                        i++;
2341
2342                if (adr >> cfi->chipshift) {
2343                        adr = 0;
2344                        chipnum++;
2345
2346                        if (chipnum >= cfi->numchips)
2347                                break;
2348                }
2349
2350                sectors++;
2351                if (sectors >= MAX_SECTORS) {
2352                        printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2353                               MAX_SECTORS);
2354                        kfree(sect);
2355                        return -EINVAL;
2356                }
2357        }
2358
2359        /* Now unlock the whole chip */
2360        ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2361                               DO_XXLOCK_ONEBLOCK_UNLOCK);
2362        if (ret) {
2363                kfree(sect);
2364                return ret;
2365        }
2366
2367        /*
2368         * PPB unlocking always unlocks all sectors of the flash chip.
2369         * We need to re-lock all previously locked sectors.
2370         */
2371        for (i = 0; i < sectors; i++) {
2372                if (sect[i].locked)
2373                        do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2374                                      DO_XXLOCK_ONEBLOCK_LOCK);
2375        }
2376
2377        kfree(sect);
2378        return ret;
2379}
2380
2381static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2382                                            uint64_t len)
2383{
2384        return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2385                                DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2386}
2387
2388static void cfi_amdstd_sync (struct mtd_info *mtd)
2389{
2390        struct map_info *map = mtd->priv;
2391        struct cfi_private *cfi = map->fldrv_priv;
2392        int i;
2393        struct flchip *chip;
2394        int ret = 0;
2395        DECLARE_WAITQUEUE(wait, current);
2396
2397        for (i=0; !ret && i<cfi->numchips; i++) {
2398                chip = &cfi->chips[i];
2399
2400        retry:
2401                mutex_lock(&chip->mutex);
2402
2403                switch(chip->state) {
2404                case FL_READY:
2405                case FL_STATUS:
2406                case FL_CFI_QUERY:
2407                case FL_JEDEC_QUERY:
2408                        chip->oldstate = chip->state;
2409                        chip->state = FL_SYNCING;
2410                        /* No need to wake_up() on this state change -
2411                         * as the whole point is that nobody can do anything
2412                         * with the chip now anyway.
2413                         */
2414                case FL_SYNCING:
2415                        mutex_unlock(&chip->mutex);
2416                        break;
2417
2418                default:
2419                        /* Not an idle state */
2420                        set_current_state(TASK_UNINTERRUPTIBLE);
2421                        add_wait_queue(&chip->wq, &wait);
2422
2423                        mutex_unlock(&chip->mutex);
2424
2425                        schedule();
2426
2427                        remove_wait_queue(&chip->wq, &wait);
2428
2429                        goto retry;
2430                }
2431        }
2432
2433        /* Unlock the chips again */
2434
2435        for (i--; i >=0; i--) {
2436                chip = &cfi->chips[i];
2437
2438                mutex_lock(&chip->mutex);
2439
2440                if (chip->state == FL_SYNCING) {
2441                        chip->state = chip->oldstate;
2442                        wake_up(&chip->wq);
2443                }
2444                mutex_unlock(&chip->mutex);
2445        }
2446}
2447
2448
2449static int cfi_amdstd_suspend(struct mtd_info *mtd)
2450{
2451        struct map_info *map = mtd->priv;
2452        struct cfi_private *cfi = map->fldrv_priv;
2453        int i;
2454        struct flchip *chip;
2455        int ret = 0;
2456
2457        for (i=0; !ret && i<cfi->numchips; i++) {
2458                chip = &cfi->chips[i];
2459
2460                mutex_lock(&chip->mutex);
2461
2462                switch(chip->state) {
2463                case FL_READY:
2464                case FL_STATUS:
2465                case FL_CFI_QUERY:
2466                case FL_JEDEC_QUERY:
2467                        chip->oldstate = chip->state;
2468                        chip->state = FL_PM_SUSPENDED;
2469                        /* No need to wake_up() on this state change -
2470                         * as the whole point is that nobody can do anything
2471                         * with the chip now anyway.
2472                         */
2473                case FL_PM_SUSPENDED:
2474                        break;
2475
2476                default:
2477                        ret = -EAGAIN;
2478                        break;
2479                }
2480                mutex_unlock(&chip->mutex);
2481        }
2482
2483        /* Unlock the chips again */
2484
2485        if (ret) {
2486                for (i--; i >=0; i--) {
2487                        chip = &cfi->chips[i];
2488
2489                        mutex_lock(&chip->mutex);
2490
2491                        if (chip->state == FL_PM_SUSPENDED) {
2492                                chip->state = chip->oldstate;
2493                                wake_up(&chip->wq);
2494                        }
2495                        mutex_unlock(&chip->mutex);
2496                }
2497        }
2498
2499        return ret;
2500}
2501
2502
2503static void cfi_amdstd_resume(struct mtd_info *mtd)
2504{
2505        struct map_info *map = mtd->priv;
2506        struct cfi_private *cfi = map->fldrv_priv;
2507        int i;
2508        struct flchip *chip;
2509
2510        for (i=0; i<cfi->numchips; i++) {
2511
2512                chip = &cfi->chips[i];
2513
2514                mutex_lock(&chip->mutex);
2515
2516                if (chip->state == FL_PM_SUSPENDED) {
2517                        chip->state = FL_READY;
2518                        map_write(map, CMD(0xF0), chip->start);
2519                        wake_up(&chip->wq);
2520                }
2521                else
2522                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2523
2524                mutex_unlock(&chip->mutex);
2525        }
2526}
2527
2528
2529/*
2530 * Ensure that the flash device is put back into read array mode before
2531 * unloading the driver or rebooting.  On some systems, rebooting while
2532 * the flash is in query/program/erase mode will prevent the CPU from
2533 * fetching the bootloader code, requiring a hard reset or power cycle.
2534 */
2535static int cfi_amdstd_reset(struct mtd_info *mtd)
2536{
2537        struct map_info *map = mtd->priv;
2538        struct cfi_private *cfi = map->fldrv_priv;
2539        int i, ret;
2540        struct flchip *chip;
2541
2542        for (i = 0; i < cfi->numchips; i++) {
2543
2544                chip = &cfi->chips[i];
2545
2546                mutex_lock(&chip->mutex);
2547
2548                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2549                if (!ret) {
2550                        map_write(map, CMD(0xF0), chip->start);
2551                        chip->state = FL_SHUTDOWN;
2552                        put_chip(map, chip, chip->start);
2553                }
2554
2555                mutex_unlock(&chip->mutex);
2556        }
2557
2558        return 0;
2559}
2560
2561
2562static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2563                               void *v)
2564{
2565        struct mtd_info *mtd;
2566
2567        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2568        cfi_amdstd_reset(mtd);
2569        return NOTIFY_DONE;
2570}
2571
2572
2573static void cfi_amdstd_destroy(struct mtd_info *mtd)
2574{
2575        struct map_info *map = mtd->priv;
2576        struct cfi_private *cfi = map->fldrv_priv;
2577
2578        cfi_amdstd_reset(mtd);
2579        unregister_reboot_notifier(&mtd->reboot_notifier);
2580        kfree(cfi->cmdset_priv);
2581        kfree(cfi->cfiq);
2582        kfree(cfi);
2583        kfree(mtd->eraseregions);
2584}
2585
2586MODULE_LICENSE("GPL");
2587MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2588MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2589MODULE_ALIAS("cfi_cmdset_0006");
2590MODULE_ALIAS("cfi_cmdset_0701");
2591