linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/init.h>
  28#include <asm/io.h>
  29#include <asm/byteorder.h>
  30
  31#include <linux/errno.h>
  32#include <linux/slab.h>
  33#include <linux/delay.h>
  34#include <linux/interrupt.h>
  35#include <linux/mtd/compatmac.h>
  36#include <linux/mtd/map.h>
  37#include <linux/mtd/mtd.h>
  38#include <linux/mtd/cfi.h>
  39#include <linux/mtd/xip.h>
  40
  41#define AMD_BOOTLOC_BUG
  42#define FORCE_WORD_WRITE 0
  43
  44#define MAX_WORD_RETRIES 3
  45
  46#define MANUFACTURER_AMD        0x0001
  47#define MANUFACTURER_ATMEL      0x001F
  48#define MANUFACTURER_MACRONIX   0x00C2
  49#define MANUFACTURER_SST        0x00BF
  50#define SST49LF004B             0x0060
  51#define SST49LF040B             0x0050
  52#define SST49LF008A             0x005a
  53#define AT49BV6416              0x00d6
  54
  55static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  56static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  57static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  58static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  59static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  60static void cfi_amdstd_sync (struct mtd_info *);
  61static int cfi_amdstd_suspend (struct mtd_info *);
  62static void cfi_amdstd_resume (struct mtd_info *);
  63static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  64
  65static void cfi_amdstd_destroy(struct mtd_info *);
  66
  67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  68static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  69
  70static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  72#include "fwh_lock.h"
  73
  74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  76
  77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  78        .probe          = NULL, /* Not usable directly */
  79        .destroy        = cfi_amdstd_destroy,
  80        .name           = "cfi_cmdset_0002",
  81        .module         = THIS_MODULE
  82};
  83
  84
  85/* #define DEBUG_CFI_FEATURES */
  86
  87
  88#ifdef DEBUG_CFI_FEATURES
  89static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  90{
  91        const char* erase_suspend[3] = {
  92                "Not supported", "Read only", "Read/write"
  93        };
  94        const char* top_bottom[6] = {
  95                "No WP", "8x8KiB sectors at top & bottom, no WP",
  96                "Bottom boot", "Top boot",
  97                "Uniform, Bottom WP", "Uniform, Top WP"
  98        };
  99
 100        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 101        printk("  Address sensitive unlock: %s\n",
 102               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 103
 104        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 105                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 106        else
 107                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 108
 109        if (extp->BlkProt == 0)
 110                printk("  Block protection: Not supported\n");
 111        else
 112                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 113
 114
 115        printk("  Temporary block unprotect: %s\n",
 116               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 117        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 118        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 119        printk("  Burst mode: %s\n",
 120               extp->BurstMode ? "Supported" : "Not supported");
 121        if (extp->PageMode == 0)
 122                printk("  Page mode: Not supported\n");
 123        else
 124                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 125
 126        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 127               extp->VppMin >> 4, extp->VppMin & 0xf);
 128        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 129               extp->VppMax >> 4, extp->VppMax & 0xf);
 130
 131        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 132                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 133        else
 134                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 135}
 136#endif
 137
 138#ifdef AMD_BOOTLOC_BUG
 139/* Wheee. Bring me the head of someone at AMD. */
 140static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
 141{
 142        struct map_info *map = mtd->priv;
 143        struct cfi_private *cfi = map->fldrv_priv;
 144        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 145        __u8 major = extp->MajorVersion;
 146        __u8 minor = extp->MinorVersion;
 147
 148        if (((major << 8) | minor) < 0x3131) {
 149                /* CFI version 1.0 => don't trust bootloc */
 150
 151                DEBUG(MTD_DEBUG_LEVEL1,
 152                        "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 153                        map->name, cfi->mfr, cfi->id);
 154
 155                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 156                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 157                 * These were badly detected as they have the 0x80 bit set
 158                 * so treat them as a special case.
 159                 */
 160                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 161
 162                        /* Macronix added CFI to their 2nd generation
 163                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 164                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 165                         * has CFI.
 166                         *
 167                         * Therefore also check the manufacturer.
 168                         * This reduces the risk of false detection due to
 169                         * the 8-bit device ID.
 170                         */
 171                        (cfi->mfr == MANUFACTURER_MACRONIX)) {
 172                        DEBUG(MTD_DEBUG_LEVEL1,
 173                                "%s: Macronix MX29LV400C with bottom boot block"
 174                                " detected\n", map->name);
 175                        extp->TopBottom = 2;    /* bottom boot */
 176                } else
 177                if (cfi->id & 0x80) {
 178                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 179                        extp->TopBottom = 3;    /* top boot */
 180                } else {
 181                        extp->TopBottom = 2;    /* bottom boot */
 182                }
 183
 184                DEBUG(MTD_DEBUG_LEVEL1,
 185                        "%s: AMD CFI PRI V%c.%c has no boot block field;"
 186                        " deduced %s from Device ID\n", map->name, major, minor,
 187                        extp->TopBottom == 2 ? "bottom" : "top");
 188        }
 189}
 190#endif
 191
 192static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
 193{
 194        struct map_info *map = mtd->priv;
 195        struct cfi_private *cfi = map->fldrv_priv;
 196        if (cfi->cfiq->BufWriteTimeoutTyp) {
 197                DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
 198                mtd->write = cfi_amdstd_write_buffers;
 199        }
 200}
 201
 202/* Atmel chips don't use the same PRI format as AMD chips */
 203static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
 204{
 205        struct map_info *map = mtd->priv;
 206        struct cfi_private *cfi = map->fldrv_priv;
 207        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 208        struct cfi_pri_atmel atmel_pri;
 209
 210        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 211        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 212
 213        if (atmel_pri.Features & 0x02)
 214                extp->EraseSuspend = 2;
 215
 216        /* Some chips got it backwards... */
 217        if (cfi->id == AT49BV6416) {
 218                if (atmel_pri.BottomBoot)
 219                        extp->TopBottom = 3;
 220                else
 221                        extp->TopBottom = 2;
 222        } else {
 223                if (atmel_pri.BottomBoot)
 224                        extp->TopBottom = 2;
 225                else
 226                        extp->TopBottom = 3;
 227        }
 228
 229        /* burst write mode not supported */
 230        cfi->cfiq->BufWriteTimeoutTyp = 0;
 231        cfi->cfiq->BufWriteTimeoutMax = 0;
 232}
 233
 234static void fixup_use_secsi(struct mtd_info *mtd, void *param)
 235{
 236        /* Setup for chips with a secsi area */
 237        mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
 238        mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
 239}
 240
 241static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
 242{
 243        struct map_info *map = mtd->priv;
 244        struct cfi_private *cfi = map->fldrv_priv;
 245        if ((cfi->cfiq->NumEraseRegions == 1) &&
 246                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 247                mtd->erase = cfi_amdstd_erase_chip;
 248        }
 249
 250}
 251
 252/*
 253 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 254 * locked by default.
 255 */
 256static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
 257{
 258        mtd->lock = cfi_atmel_lock;
 259        mtd->unlock = cfi_atmel_unlock;
 260        mtd->flags |= MTD_POWERUP_LOCK;
 261}
 262
 263static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
 264{
 265        struct map_info *map = mtd->priv;
 266        struct cfi_private *cfi = map->fldrv_priv;
 267
 268        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 269                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 270                pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
 271        }
 272}
 273
 274static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
 275{
 276        struct map_info *map = mtd->priv;
 277        struct cfi_private *cfi = map->fldrv_priv;
 278
 279        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 280                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 281                pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
 282        }
 283}
 284
 285static struct cfi_fixup cfi_fixup_table[] = {
 286        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 287#ifdef AMD_BOOTLOC_BUG
 288        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
 289        { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
 290#endif
 291        { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
 292        { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
 293        { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
 294        { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
 295        { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
 296        { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
 297        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
 298        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
 299        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
 300        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
 301#if !FORCE_WORD_WRITE
 302        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
 303#endif
 304        { 0, 0, NULL, NULL }
 305};
 306static struct cfi_fixup jedec_fixup_table[] = {
 307        { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
 308        { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
 309        { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
 310        { 0, 0, NULL, NULL }
 311};
 312
 313static struct cfi_fixup fixup_table[] = {
 314        /* The CFI vendor ids and the JEDEC vendor IDs appear
 315         * to be common.  It is like the devices id's are as
 316         * well.  This table is to pick all cases where
 317         * we know that is the case.
 318         */
 319        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
 320        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
 321        { 0, 0, NULL, NULL }
 322};
 323
 324
 325static void cfi_fixup_major_minor(struct cfi_private *cfi,
 326                                  struct cfi_pri_amdstd *extp)
 327{
 328        if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
 329            extp->MajorVersion == '0')
 330                extp->MajorVersion = '1';
 331}
 332
 333struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 334{
 335        struct cfi_private *cfi = map->fldrv_priv;
 336        struct mtd_info *mtd;
 337        int i;
 338
 339        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 340        if (!mtd) {
 341                printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
 342                return NULL;
 343        }
 344        mtd->priv = map;
 345        mtd->type = MTD_NORFLASH;
 346
 347        /* Fill in the default mtd operations */
 348        mtd->erase   = cfi_amdstd_erase_varsize;
 349        mtd->write   = cfi_amdstd_write_words;
 350        mtd->read    = cfi_amdstd_read;
 351        mtd->sync    = cfi_amdstd_sync;
 352        mtd->suspend = cfi_amdstd_suspend;
 353        mtd->resume  = cfi_amdstd_resume;
 354        mtd->flags   = MTD_CAP_NORFLASH;
 355        mtd->name    = map->name;
 356        mtd->writesize = 1;
 357
 358        if (cfi->cfi_mode==CFI_MODE_CFI){
 359                unsigned char bootloc;
 360                /*
 361                 * It's a real CFI chip, not one for which the probe
 362                 * routine faked a CFI structure. So we read the feature
 363                 * table from it.
 364                 */
 365                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 366                struct cfi_pri_amdstd *extp;
 367
 368                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 369                if (!extp) {
 370                        kfree(mtd);
 371                        return NULL;
 372                }
 373
 374                cfi_fixup_major_minor(cfi, extp);
 375
 376                if (extp->MajorVersion != '1' ||
 377                    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
 378                        printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 379                               "version %c.%c.\n",  extp->MajorVersion,
 380                               extp->MinorVersion);
 381                        kfree(extp);
 382                        kfree(mtd);
 383                        return NULL;
 384                }
 385
 386                /* Install our own private info structure */
 387                cfi->cmdset_priv = extp;
 388
 389                /* Apply cfi device specific fixups */
 390                cfi_fixup(mtd, cfi_fixup_table);
 391
 392#ifdef DEBUG_CFI_FEATURES
 393                /* Tell the user about it in lots of lovely detail */
 394                cfi_tell_features(extp);
 395#endif
 396
 397                bootloc = extp->TopBottom;
 398                if ((bootloc != 2) && (bootloc != 3)) {
 399                        printk(KERN_WARNING "%s: CFI does not contain boot "
 400                               "bank location. Assuming top.\n", map->name);
 401                        bootloc = 2;
 402                }
 403
 404                if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 405                        printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
 406
 407                        for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 408                                int j = (cfi->cfiq->NumEraseRegions-1)-i;
 409                                __u32 swap;
 410
 411                                swap = cfi->cfiq->EraseRegionInfo[i];
 412                                cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
 413                                cfi->cfiq->EraseRegionInfo[j] = swap;
 414                        }
 415                }
 416                /* Set the default CFI lock/unlock addresses */
 417                cfi->addr_unlock1 = 0x555;
 418                cfi->addr_unlock2 = 0x2aa;
 419
 420        } /* CFI mode */
 421        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 422                /* Apply jedec specific fixups */
 423                cfi_fixup(mtd, jedec_fixup_table);
 424        }
 425        /* Apply generic fixups */
 426        cfi_fixup(mtd, fixup_table);
 427
 428        for (i=0; i< cfi->numchips; i++) {
 429                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 430                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 431                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 432                cfi->chips[i].ref_point_counter = 0;
 433                init_waitqueue_head(&(cfi->chips[i].wq));
 434        }
 435
 436        map->fldrv = &cfi_amdstd_chipdrv;
 437
 438        return cfi_amdstd_setup(mtd);
 439}
 440EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 441
 442static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 443{
 444        struct map_info *map = mtd->priv;
 445        struct cfi_private *cfi = map->fldrv_priv;
 446        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 447        unsigned long offset = 0;
 448        int i,j;
 449
 450        printk(KERN_NOTICE "number of %s chips: %d\n",
 451               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 452        /* Select the correct geometry setup */
 453        mtd->size = devsize * cfi->numchips;
 454
 455        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 456        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 457                                    * mtd->numeraseregions, GFP_KERNEL);
 458        if (!mtd->eraseregions) {
 459                printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
 460                goto setup_err;
 461        }
 462
 463        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 464                unsigned long ernum, ersize;
 465                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 466                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 467
 468                if (mtd->erasesize < ersize) {
 469                        mtd->erasesize = ersize;
 470                }
 471                for (j=0; j<cfi->numchips; j++) {
 472                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 473                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 474                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 475                }
 476                offset += (ersize * ernum);
 477        }
 478        if (offset != devsize) {
 479                /* Argh */
 480                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 481                goto setup_err;
 482        }
 483#if 0
 484        // debug
 485        for (i=0; i<mtd->numeraseregions;i++){
 486                printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
 487                       i,mtd->eraseregions[i].offset,
 488                       mtd->eraseregions[i].erasesize,
 489                       mtd->eraseregions[i].numblocks);
 490        }
 491#endif
 492
 493        /* FIXME: erase-suspend-program is broken.  See
 494           http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
 495        printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
 496
 497        __module_get(THIS_MODULE);
 498        return mtd;
 499
 500 setup_err:
 501        if(mtd) {
 502                kfree(mtd->eraseregions);
 503                kfree(mtd);
 504        }
 505        kfree(cfi->cmdset_priv);
 506        kfree(cfi->cfiq);
 507        return NULL;
 508}
 509
 510/*
 511 * Return true if the chip is ready.
 512 *
 513 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 514 * non-suspended sector) and is indicated by no toggle bits toggling.
 515 *
 516 * Note that anything more complicated than checking if no bits are toggling
 517 * (including checking DQ5 for an error status) is tricky to get working
 518 * correctly and is therefore not done  (particulary with interleaved chips
 519 * as each chip must be checked independantly of the others).
 520 */
 521static int __xipram chip_ready(struct map_info *map, unsigned long addr)
 522{
 523        map_word d, t;
 524
 525        d = map_read(map, addr);
 526        t = map_read(map, addr);
 527
 528        return map_word_equal(map, d, t);
 529}
 530
 531/*
 532 * Return true if the chip is ready and has the correct value.
 533 *
 534 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 535 * non-suspended sector) and it is indicated by no bits toggling.
 536 *
 537 * Error are indicated by toggling bits or bits held with the wrong value,
 538 * or with bits toggling.
 539 *
 540 * Note that anything more complicated than checking if no bits are toggling
 541 * (including checking DQ5 for an error status) is tricky to get working
 542 * correctly and is therefore not done  (particulary with interleaved chips
 543 * as each chip must be checked independantly of the others).
 544 *
 545 */
 546static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
 547{
 548        map_word oldd, curd;
 549
 550        oldd = map_read(map, addr);
 551        curd = map_read(map, addr);
 552
 553        return  map_word_equal(map, oldd, curd) &&
 554                map_word_equal(map, curd, expected);
 555}
 556
 557static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 558{
 559        DECLARE_WAITQUEUE(wait, current);
 560        struct cfi_private *cfi = map->fldrv_priv;
 561        unsigned long timeo;
 562        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 563
 564 resettime:
 565        timeo = jiffies + HZ;
 566 retry:
 567        switch (chip->state) {
 568
 569        case FL_STATUS:
 570                for (;;) {
 571                        if (chip_ready(map, adr))
 572                                break;
 573
 574                        if (time_after(jiffies, timeo)) {
 575                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 576                                spin_unlock(chip->mutex);
 577                                return -EIO;
 578                        }
 579                        spin_unlock(chip->mutex);
 580                        cfi_udelay(1);
 581                        spin_lock(chip->mutex);
 582                        /* Someone else might have been playing with it. */
 583                        goto retry;
 584                }
 585
 586        case FL_READY:
 587        case FL_CFI_QUERY:
 588        case FL_JEDEC_QUERY:
 589                return 0;
 590
 591        case FL_ERASING:
 592                if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
 593                        goto sleep;
 594
 595                if (!(   mode == FL_READY
 596                      || mode == FL_POINT
 597                      || !cfip
 598                      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
 599                      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
 600                    )))
 601                        goto sleep;
 602
 603                /* We could check to see if we're trying to access the sector
 604                 * that is currently being erased. However, no user will try
 605                 * anything like that so we just wait for the timeout. */
 606
 607                /* Erase suspend */
 608                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 609                 * commands when the erase algorithm isn't in progress. */
 610                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 611                chip->oldstate = FL_ERASING;
 612                chip->state = FL_ERASE_SUSPENDING;
 613                chip->erase_suspended = 1;
 614                for (;;) {
 615                        if (chip_ready(map, adr))
 616                                break;
 617
 618                        if (time_after(jiffies, timeo)) {
 619                                /* Should have suspended the erase by now.
 620                                 * Send an Erase-Resume command as either
 621                                 * there was an error (so leave the erase
 622                                 * routine to recover from it) or we trying to
 623                                 * use the erase-in-progress sector. */
 624                                map_write(map, CMD(0x30), chip->in_progress_block_addr);
 625                                chip->state = FL_ERASING;
 626                                chip->oldstate = FL_READY;
 627                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 628                                return -EIO;
 629                        }
 630
 631                        spin_unlock(chip->mutex);
 632                        cfi_udelay(1);
 633                        spin_lock(chip->mutex);
 634                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 635                           So we can just loop here. */
 636                }
 637                chip->state = FL_READY;
 638                return 0;
 639
 640        case FL_XIP_WHILE_ERASING:
 641                if (mode != FL_READY && mode != FL_POINT &&
 642                    (!cfip || !(cfip->EraseSuspend&2)))
 643                        goto sleep;
 644                chip->oldstate = chip->state;
 645                chip->state = FL_READY;
 646                return 0;
 647
 648        case FL_POINT:
 649                /* Only if there's no operation suspended... */
 650                if (mode == FL_READY && chip->oldstate == FL_READY)
 651                        return 0;
 652
 653        default:
 654        sleep:
 655                set_current_state(TASK_UNINTERRUPTIBLE);
 656                add_wait_queue(&chip->wq, &wait);
 657                spin_unlock(chip->mutex);
 658                schedule();
 659                remove_wait_queue(&chip->wq, &wait);
 660                spin_lock(chip->mutex);
 661                goto resettime;
 662        }
 663}
 664
 665
 666static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 667{
 668        struct cfi_private *cfi = map->fldrv_priv;
 669
 670        switch(chip->oldstate) {
 671        case FL_ERASING:
 672                chip->state = chip->oldstate;
 673                map_write(map, CMD(0x30), chip->in_progress_block_addr);
 674                chip->oldstate = FL_READY;
 675                chip->state = FL_ERASING;
 676                break;
 677
 678        case FL_XIP_WHILE_ERASING:
 679                chip->state = chip->oldstate;
 680                chip->oldstate = FL_READY;
 681                break;
 682
 683        case FL_READY:
 684        case FL_STATUS:
 685                /* We should really make set_vpp() count, rather than doing this */
 686                DISABLE_VPP(map);
 687                break;
 688        default:
 689                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
 690        }
 691        wake_up(&chip->wq);
 692}
 693
 694#ifdef CONFIG_MTD_XIP
 695
 696/*
 697 * No interrupt what so ever can be serviced while the flash isn't in array
 698 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 699 * enclosing any code path where the flash is known not to be in array mode.
 700 * And within a XIP disabled code path, only functions marked with __xipram
 701 * may be called and nothing else (it's a good thing to inspect generated
 702 * assembly to make sure inline functions were actually inlined and that gcc
 703 * didn't emit calls to its own support functions). Also configuring MTD CFI
 704 * support to a single buswidth and a single interleave is also recommended.
 705 */
 706
 707static void xip_disable(struct map_info *map, struct flchip *chip,
 708                        unsigned long adr)
 709{
 710        /* TODO: chips with no XIP use should ignore and return */
 711        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 712        local_irq_disable();
 713}
 714
 715static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 716                                unsigned long adr)
 717{
 718        struct cfi_private *cfi = map->fldrv_priv;
 719
 720        if (chip->state != FL_POINT && chip->state != FL_READY) {
 721                map_write(map, CMD(0xf0), adr);
 722                chip->state = FL_READY;
 723        }
 724        (void) map_read(map, adr);
 725        xip_iprefetch();
 726        local_irq_enable();
 727}
 728
 729/*
 730 * When a delay is required for the flash operation to complete, the
 731 * xip_udelay() function is polling for both the given timeout and pending
 732 * (but still masked) hardware interrupts.  Whenever there is an interrupt
 733 * pending then the flash erase operation is suspended, array mode restored
 734 * and interrupts unmasked.  Task scheduling might also happen at that
 735 * point.  The CPU eventually returns from the interrupt or the call to
 736 * schedule() and the suspended flash operation is resumed for the remaining
 737 * of the delay period.
 738 *
 739 * Warning: this function _will_ fool interrupt latency tracing tools.
 740 */
 741
 742static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
 743                                unsigned long adr, int usec)
 744{
 745        struct cfi_private *cfi = map->fldrv_priv;
 746        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 747        map_word status, OK = CMD(0x80);
 748        unsigned long suspended, start = xip_currtime();
 749        flstate_t oldstate;
 750
 751        do {
 752                cpu_relax();
 753                if (xip_irqpending() && extp &&
 754                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
 755                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 756                        /*
 757                         * Let's suspend the erase operation when supported.
 758                         * Note that we currently don't try to suspend
 759                         * interleaved chips if there is already another
 760                         * operation suspended (imagine what happens
 761                         * when one chip was already done with the current
 762                         * operation while another chip suspended it, then
 763                         * we resume the whole thing at once).  Yes, it
 764                         * can happen!
 765                         */
 766                        map_write(map, CMD(0xb0), adr);
 767                        usec -= xip_elapsed_since(start);
 768                        suspended = xip_currtime();
 769                        do {
 770                                if (xip_elapsed_since(suspended) > 100000) {
 771                                        /*
 772                                         * The chip doesn't want to suspend
 773                                         * after waiting for 100 msecs.
 774                                         * This is a critical error but there
 775                                         * is not much we can do here.
 776                                         */
 777                                        return;
 778                                }
 779                                status = map_read(map, adr);
 780                        } while (!map_word_andequal(map, status, OK, OK));
 781
 782                        /* Suspend succeeded */
 783                        oldstate = chip->state;
 784                        if (!map_word_bitsset(map, status, CMD(0x40)))
 785                                break;
 786                        chip->state = FL_XIP_WHILE_ERASING;
 787                        chip->erase_suspended = 1;
 788                        map_write(map, CMD(0xf0), adr);
 789                        (void) map_read(map, adr);
 790                        xip_iprefetch();
 791                        local_irq_enable();
 792                        spin_unlock(chip->mutex);
 793                        xip_iprefetch();
 794                        cond_resched();
 795
 796                        /*
 797                         * We're back.  However someone else might have
 798                         * decided to go write to the chip if we are in
 799                         * a suspended erase state.  If so let's wait
 800                         * until it's done.
 801                         */
 802                        spin_lock(chip->mutex);
 803                        while (chip->state != FL_XIP_WHILE_ERASING) {
 804                                DECLARE_WAITQUEUE(wait, current);
 805                                set_current_state(TASK_UNINTERRUPTIBLE);
 806                                add_wait_queue(&chip->wq, &wait);
 807                                spin_unlock(chip->mutex);
 808                                schedule();
 809                                remove_wait_queue(&chip->wq, &wait);
 810                                spin_lock(chip->mutex);
 811                        }
 812                        /* Disallow XIP again */
 813                        local_irq_disable();
 814
 815                        /* Resume the write or erase operation */
 816                        map_write(map, CMD(0x30), adr);
 817                        chip->state = oldstate;
 818                        start = xip_currtime();
 819                } else if (usec >= 1000000/HZ) {
 820                        /*
 821                         * Try to save on CPU power when waiting delay
 822                         * is at least a system timer tick period.
 823                         * No need to be extremely accurate here.
 824                         */
 825                        xip_cpu_idle();
 826                }
 827                status = map_read(map, adr);
 828        } while (!map_word_andequal(map, status, OK, OK)
 829                 && xip_elapsed_since(start) < usec);
 830}
 831
 832#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
 833
 834/*
 835 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
 836 * the flash is actively programming or erasing since we have to poll for
 837 * the operation to complete anyway.  We can't do that in a generic way with
 838 * a XIP setup so do it before the actual flash operation in this case
 839 * and stub it out from INVALIDATE_CACHE_UDELAY.
 840 */
 841#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
 842        INVALIDATE_CACHED_RANGE(map, from, size)
 843
 844#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 845        UDELAY(map, chip, adr, usec)
 846
 847/*
 848 * Extra notes:
 849 *
 850 * Activating this XIP support changes the way the code works a bit.  For
 851 * example the code to suspend the current process when concurrent access
 852 * happens is never executed because xip_udelay() will always return with the
 853 * same chip state as it was entered with.  This is why there is no care for
 854 * the presence of add_wait_queue() or schedule() calls from within a couple
 855 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
 856 * The queueing and scheduling are always happening within xip_udelay().
 857 *
 858 * Similarly, get_chip() and put_chip() just happen to always be executed
 859 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
 860 * is in array mode, therefore never executing many cases therein and not
 861 * causing any problem with XIP.
 862 */
 863
 864#else
 865
 866#define xip_disable(map, chip, adr)
 867#define xip_enable(map, chip, adr)
 868#define XIP_INVAL_CACHED_RANGE(x...)
 869
 870#define UDELAY(map, chip, adr, usec)  \
 871do {  \
 872        spin_unlock(chip->mutex);  \
 873        cfi_udelay(usec);  \
 874        spin_lock(chip->mutex);  \
 875} while (0)
 876
 877#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 878do {  \
 879        spin_unlock(chip->mutex);  \
 880        INVALIDATE_CACHED_RANGE(map, adr, len);  \
 881        cfi_udelay(usec);  \
 882        spin_lock(chip->mutex);  \
 883} while (0)
 884
 885#endif
 886
 887static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 888{
 889        unsigned long cmd_addr;
 890        struct cfi_private *cfi = map->fldrv_priv;
 891        int ret;
 892
 893        adr += chip->start;
 894
 895        /* Ensure cmd read/writes are aligned. */
 896        cmd_addr = adr & ~(map_bankwidth(map)-1);
 897
 898        spin_lock(chip->mutex);
 899        ret = get_chip(map, chip, cmd_addr, FL_READY);
 900        if (ret) {
 901                spin_unlock(chip->mutex);
 902                return ret;
 903        }
 904
 905        if (chip->state != FL_POINT && chip->state != FL_READY) {
 906                map_write(map, CMD(0xf0), cmd_addr);
 907                chip->state = FL_READY;
 908        }
 909
 910        map_copy_from(map, buf, adr, len);
 911
 912        put_chip(map, chip, cmd_addr);
 913
 914        spin_unlock(chip->mutex);
 915        return 0;
 916}
 917
 918
 919static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 920{
 921        struct map_info *map = mtd->priv;
 922        struct cfi_private *cfi = map->fldrv_priv;
 923        unsigned long ofs;
 924        int chipnum;
 925        int ret = 0;
 926
 927        /* ofs: offset within the first chip that the first read should start */
 928
 929        chipnum = (from >> cfi->chipshift);
 930        ofs = from - (chipnum <<  cfi->chipshift);
 931
 932
 933        *retlen = 0;
 934
 935        while (len) {
 936                unsigned long thislen;
 937
 938                if (chipnum >= cfi->numchips)
 939                        break;
 940
 941                if ((len + ofs -1) >> cfi->chipshift)
 942                        thislen = (1<<cfi->chipshift) - ofs;
 943                else
 944                        thislen = len;
 945
 946                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
 947                if (ret)
 948                        break;
 949
 950                *retlen += thislen;
 951                len -= thislen;
 952                buf += thislen;
 953
 954                ofs = 0;
 955                chipnum++;
 956        }
 957        return ret;
 958}
 959
 960
 961static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 962{
 963        DECLARE_WAITQUEUE(wait, current);
 964        unsigned long timeo = jiffies + HZ;
 965        struct cfi_private *cfi = map->fldrv_priv;
 966
 967 retry:
 968        spin_lock(chip->mutex);
 969
 970        if (chip->state != FL_READY){
 971#if 0
 972                printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
 973#endif
 974                set_current_state(TASK_UNINTERRUPTIBLE);
 975                add_wait_queue(&chip->wq, &wait);
 976
 977                spin_unlock(chip->mutex);
 978
 979                schedule();
 980                remove_wait_queue(&chip->wq, &wait);
 981#if 0
 982                if(signal_pending(current))
 983                        return -EINTR;
 984#endif
 985                timeo = jiffies + HZ;
 986
 987                goto retry;
 988        }
 989
 990        adr += chip->start;
 991
 992        chip->state = FL_READY;
 993
 994        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 995        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
 996        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 997
 998        map_copy_from(map, buf, adr, len);
 999
1000        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1001        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1002        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1003        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1004
1005        wake_up(&chip->wq);
1006        spin_unlock(chip->mutex);
1007
1008        return 0;
1009}
1010
1011static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1012{
1013        struct map_info *map = mtd->priv;
1014        struct cfi_private *cfi = map->fldrv_priv;
1015        unsigned long ofs;
1016        int chipnum;
1017        int ret = 0;
1018
1019
1020        /* ofs: offset within the first chip that the first read should start */
1021
1022        /* 8 secsi bytes per chip */
1023        chipnum=from>>3;
1024        ofs=from & 7;
1025
1026
1027        *retlen = 0;
1028
1029        while (len) {
1030                unsigned long thislen;
1031
1032                if (chipnum >= cfi->numchips)
1033                        break;
1034
1035                if ((len + ofs -1) >> 3)
1036                        thislen = (1<<3) - ofs;
1037                else
1038                        thislen = len;
1039
1040                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1041                if (ret)
1042                        break;
1043
1044                *retlen += thislen;
1045                len -= thislen;
1046                buf += thislen;
1047
1048                ofs = 0;
1049                chipnum++;
1050        }
1051        return ret;
1052}
1053
1054
1055static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1056{
1057        struct cfi_private *cfi = map->fldrv_priv;
1058        unsigned long timeo = jiffies + HZ;
1059        /*
1060         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1061         * have a max write time of a few hundreds usec). However, we should
1062         * use the maximum timeout value given by the chip at probe time
1063         * instead.  Unfortunately, struct flchip does have a field for
1064         * maximum timeout, only for typical which can be far too short
1065         * depending of the conditions.  The ' + 1' is to avoid having a
1066         * timeout of 0 jiffies if HZ is smaller than 1000.
1067         */
1068        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1069        int ret = 0;
1070        map_word oldd;
1071        int retry_cnt = 0;
1072
1073        adr += chip->start;
1074
1075        spin_lock(chip->mutex);
1076        ret = get_chip(map, chip, adr, FL_WRITING);
1077        if (ret) {
1078                spin_unlock(chip->mutex);
1079                return ret;
1080        }
1081
1082        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1083               __func__, adr, datum.x[0] );
1084
1085        /*
1086         * Check for a NOP for the case when the datum to write is already
1087         * present - it saves time and works around buggy chips that corrupt
1088         * data at other locations when 0xff is written to a location that
1089         * already contains 0xff.
1090         */
1091        oldd = map_read(map, adr);
1092        if (map_word_equal(map, oldd, datum)) {
1093                DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1094                       __func__);
1095                goto op_done;
1096        }
1097
1098        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1099        ENABLE_VPP(map);
1100        xip_disable(map, chip, adr);
1101 retry:
1102        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1103        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1104        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1105        map_write(map, datum, adr);
1106        chip->state = FL_WRITING;
1107
1108        INVALIDATE_CACHE_UDELAY(map, chip,
1109                                adr, map_bankwidth(map),
1110                                chip->word_write_time);
1111
1112        /* See comment above for timeout value. */
1113        timeo = jiffies + uWriteTimeout;
1114        for (;;) {
1115                if (chip->state != FL_WRITING) {
1116                        /* Someone's suspended the write. Sleep */
1117                        DECLARE_WAITQUEUE(wait, current);
1118
1119                        set_current_state(TASK_UNINTERRUPTIBLE);
1120                        add_wait_queue(&chip->wq, &wait);
1121                        spin_unlock(chip->mutex);
1122                        schedule();
1123                        remove_wait_queue(&chip->wq, &wait);
1124                        timeo = jiffies + (HZ / 2); /* FIXME */
1125                        spin_lock(chip->mutex);
1126                        continue;
1127                }
1128
1129                if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1130                        xip_enable(map, chip, adr);
1131                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1132                        xip_disable(map, chip, adr);
1133                        break;
1134                }
1135
1136                if (chip_ready(map, adr))
1137                        break;
1138
1139                /* Latency issues. Drop the lock, wait a while and retry */
1140                UDELAY(map, chip, adr, 1);
1141        }
1142        /* Did we succeed? */
1143        if (!chip_good(map, adr, datum)) {
1144                /* reset on all failures. */
1145                map_write( map, CMD(0xF0), chip->start );
1146                /* FIXME - should have reset delay before continuing */
1147
1148                if (++retry_cnt <= MAX_WORD_RETRIES)
1149                        goto retry;
1150
1151                ret = -EIO;
1152        }
1153        xip_enable(map, chip, adr);
1154 op_done:
1155        chip->state = FL_READY;
1156        put_chip(map, chip, adr);
1157        spin_unlock(chip->mutex);
1158
1159        return ret;
1160}
1161
1162
1163static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1164                                  size_t *retlen, const u_char *buf)
1165{
1166        struct map_info *map = mtd->priv;
1167        struct cfi_private *cfi = map->fldrv_priv;
1168        int ret = 0;
1169        int chipnum;
1170        unsigned long ofs, chipstart;
1171        DECLARE_WAITQUEUE(wait, current);
1172
1173        *retlen = 0;
1174        if (!len)
1175                return 0;
1176
1177        chipnum = to >> cfi->chipshift;
1178        ofs = to  - (chipnum << cfi->chipshift);
1179        chipstart = cfi->chips[chipnum].start;
1180
1181        /* If it's not bus-aligned, do the first byte write */
1182        if (ofs & (map_bankwidth(map)-1)) {
1183                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1184                int i = ofs - bus_ofs;
1185                int n = 0;
1186                map_word tmp_buf;
1187
1188 retry:
1189                spin_lock(cfi->chips[chipnum].mutex);
1190
1191                if (cfi->chips[chipnum].state != FL_READY) {
1192#if 0
1193                        printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1194#endif
1195                        set_current_state(TASK_UNINTERRUPTIBLE);
1196                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1197
1198                        spin_unlock(cfi->chips[chipnum].mutex);
1199
1200                        schedule();
1201                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1202#if 0
1203                        if(signal_pending(current))
1204                                return -EINTR;
1205#endif
1206                        goto retry;
1207                }
1208
1209                /* Load 'tmp_buf' with old contents of flash */
1210                tmp_buf = map_read(map, bus_ofs+chipstart);
1211
1212                spin_unlock(cfi->chips[chipnum].mutex);
1213
1214                /* Number of bytes to copy from buffer */
1215                n = min_t(int, len, map_bankwidth(map)-i);
1216
1217                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1218
1219                ret = do_write_oneword(map, &cfi->chips[chipnum],
1220                                       bus_ofs, tmp_buf);
1221                if (ret)
1222                        return ret;
1223
1224                ofs += n;
1225                buf += n;
1226                (*retlen) += n;
1227                len -= n;
1228
1229                if (ofs >> cfi->chipshift) {
1230                        chipnum ++;
1231                        ofs = 0;
1232                        if (chipnum == cfi->numchips)
1233                                return 0;
1234                }
1235        }
1236
1237        /* We are now aligned, write as much as possible */
1238        while(len >= map_bankwidth(map)) {
1239                map_word datum;
1240
1241                datum = map_word_load(map, buf);
1242
1243                ret = do_write_oneword(map, &cfi->chips[chipnum],
1244                                       ofs, datum);
1245                if (ret)
1246                        return ret;
1247
1248                ofs += map_bankwidth(map);
1249                buf += map_bankwidth(map);
1250                (*retlen) += map_bankwidth(map);
1251                len -= map_bankwidth(map);
1252
1253                if (ofs >> cfi->chipshift) {
1254                        chipnum ++;
1255                        ofs = 0;
1256                        if (chipnum == cfi->numchips)
1257                                return 0;
1258                        chipstart = cfi->chips[chipnum].start;
1259                }
1260        }
1261
1262        /* Write the trailing bytes if any */
1263        if (len & (map_bankwidth(map)-1)) {
1264                map_word tmp_buf;
1265
1266 retry1:
1267                spin_lock(cfi->chips[chipnum].mutex);
1268
1269                if (cfi->chips[chipnum].state != FL_READY) {
1270#if 0
1271                        printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1272#endif
1273                        set_current_state(TASK_UNINTERRUPTIBLE);
1274                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1275
1276                        spin_unlock(cfi->chips[chipnum].mutex);
1277
1278                        schedule();
1279                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1280#if 0
1281                        if(signal_pending(current))
1282                                return -EINTR;
1283#endif
1284                        goto retry1;
1285                }
1286
1287                tmp_buf = map_read(map, ofs + chipstart);
1288
1289                spin_unlock(cfi->chips[chipnum].mutex);
1290
1291                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1292
1293                ret = do_write_oneword(map, &cfi->chips[chipnum],
1294                                ofs, tmp_buf);
1295                if (ret)
1296                        return ret;
1297
1298                (*retlen) += len;
1299        }
1300
1301        return 0;
1302}
1303
1304
1305/*
1306 * FIXME: interleaved mode not tested, and probably not supported!
1307 */
1308static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1309                                    unsigned long adr, const u_char *buf,
1310                                    int len)
1311{
1312        struct cfi_private *cfi = map->fldrv_priv;
1313        unsigned long timeo = jiffies + HZ;
1314        /* see comments in do_write_oneword() regarding uWriteTimeo. */
1315        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1316        int ret = -EIO;
1317        unsigned long cmd_adr;
1318        int z, words;
1319        map_word datum;
1320
1321        adr += chip->start;
1322        cmd_adr = adr;
1323
1324        spin_lock(chip->mutex);
1325        ret = get_chip(map, chip, adr, FL_WRITING);
1326        if (ret) {
1327                spin_unlock(chip->mutex);
1328                return ret;
1329        }
1330
1331        datum = map_word_load(map, buf);
1332
1333        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1334               __func__, adr, datum.x[0] );
1335
1336        XIP_INVAL_CACHED_RANGE(map, adr, len);
1337        ENABLE_VPP(map);
1338        xip_disable(map, chip, cmd_adr);
1339
1340        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1341        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1342        //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1343
1344        /* Write Buffer Load */
1345        map_write(map, CMD(0x25), cmd_adr);
1346
1347        chip->state = FL_WRITING_TO_BUFFER;
1348
1349        /* Write length of data to come */
1350        words = len / map_bankwidth(map);
1351        map_write(map, CMD(words - 1), cmd_adr);
1352        /* Write data */
1353        z = 0;
1354        while(z < words * map_bankwidth(map)) {
1355                datum = map_word_load(map, buf);
1356                map_write(map, datum, adr + z);
1357
1358                z += map_bankwidth(map);
1359                buf += map_bankwidth(map);
1360        }
1361        z -= map_bankwidth(map);
1362
1363        adr += z;
1364
1365        /* Write Buffer Program Confirm: GO GO GO */
1366        map_write(map, CMD(0x29), cmd_adr);
1367        chip->state = FL_WRITING;
1368
1369        INVALIDATE_CACHE_UDELAY(map, chip,
1370                                adr, map_bankwidth(map),
1371                                chip->word_write_time);
1372
1373        timeo = jiffies + uWriteTimeout;
1374
1375        for (;;) {
1376                if (chip->state != FL_WRITING) {
1377                        /* Someone's suspended the write. Sleep */
1378                        DECLARE_WAITQUEUE(wait, current);
1379
1380                        set_current_state(TASK_UNINTERRUPTIBLE);
1381                        add_wait_queue(&chip->wq, &wait);
1382                        spin_unlock(chip->mutex);
1383                        schedule();
1384                        remove_wait_queue(&chip->wq, &wait);
1385                        timeo = jiffies + (HZ / 2); /* FIXME */
1386                        spin_lock(chip->mutex);
1387                        continue;
1388                }
1389
1390                if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1391                        break;
1392
1393                if (chip_ready(map, adr)) {
1394                        xip_enable(map, chip, adr);
1395                        goto op_done;
1396                }
1397
1398                /* Latency issues. Drop the lock, wait a while and retry */
1399                UDELAY(map, chip, adr, 1);
1400        }
1401
1402        /* reset on all failures. */
1403        map_write( map, CMD(0xF0), chip->start );
1404        xip_enable(map, chip, adr);
1405        /* FIXME - should have reset delay before continuing */
1406
1407        printk(KERN_WARNING "MTD %s(): software timeout\n",
1408               __func__ );
1409
1410        ret = -EIO;
1411 op_done:
1412        chip->state = FL_READY;
1413        put_chip(map, chip, adr);
1414        spin_unlock(chip->mutex);
1415
1416        return ret;
1417}
1418
1419
1420static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1421                                    size_t *retlen, const u_char *buf)
1422{
1423        struct map_info *map = mtd->priv;
1424        struct cfi_private *cfi = map->fldrv_priv;
1425        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1426        int ret = 0;
1427        int chipnum;
1428        unsigned long ofs;
1429
1430        *retlen = 0;
1431        if (!len)
1432                return 0;
1433
1434        chipnum = to >> cfi->chipshift;
1435        ofs = to  - (chipnum << cfi->chipshift);
1436
1437        /* If it's not bus-aligned, do the first word write */
1438        if (ofs & (map_bankwidth(map)-1)) {
1439                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1440                if (local_len > len)
1441                        local_len = len;
1442                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1443                                             local_len, retlen, buf);
1444                if (ret)
1445                        return ret;
1446                ofs += local_len;
1447                buf += local_len;
1448                len -= local_len;
1449
1450                if (ofs >> cfi->chipshift) {
1451                        chipnum ++;
1452                        ofs = 0;
1453                        if (chipnum == cfi->numchips)
1454                                return 0;
1455                }
1456        }
1457
1458        /* Write buffer is worth it only if more than one word to write... */
1459        while (len >= map_bankwidth(map) * 2) {
1460                /* We must not cross write block boundaries */
1461                int size = wbufsize - (ofs & (wbufsize-1));
1462
1463                if (size > len)
1464                        size = len;
1465                if (size % map_bankwidth(map))
1466                        size -= size % map_bankwidth(map);
1467
1468                ret = do_write_buffer(map, &cfi->chips[chipnum],
1469                                      ofs, buf, size);
1470                if (ret)
1471                        return ret;
1472
1473                ofs += size;
1474                buf += size;
1475                (*retlen) += size;
1476                len -= size;
1477
1478                if (ofs >> cfi->chipshift) {
1479                        chipnum ++;
1480                        ofs = 0;
1481                        if (chipnum == cfi->numchips)
1482                                return 0;
1483                }
1484        }
1485
1486        if (len) {
1487                size_t retlen_dregs = 0;
1488
1489                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1490                                             len, &retlen_dregs, buf);
1491
1492                *retlen += retlen_dregs;
1493                return ret;
1494        }
1495
1496        return 0;
1497}
1498
1499
1500/*
1501 * Handle devices with one erase region, that only implement
1502 * the chip erase command.
1503 */
1504static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1505{
1506        struct cfi_private *cfi = map->fldrv_priv;
1507        unsigned long timeo = jiffies + HZ;
1508        unsigned long int adr;
1509        DECLARE_WAITQUEUE(wait, current);
1510        int ret = 0;
1511
1512        adr = cfi->addr_unlock1;
1513
1514        spin_lock(chip->mutex);
1515        ret = get_chip(map, chip, adr, FL_WRITING);
1516        if (ret) {
1517                spin_unlock(chip->mutex);
1518                return ret;
1519        }
1520
1521        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1522               __func__, chip->start );
1523
1524        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1525        ENABLE_VPP(map);
1526        xip_disable(map, chip, adr);
1527
1528        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1529        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1530        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1531        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1532        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1533        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1534
1535        chip->state = FL_ERASING;
1536        chip->erase_suspended = 0;
1537        chip->in_progress_block_addr = adr;
1538
1539        INVALIDATE_CACHE_UDELAY(map, chip,
1540                                adr, map->size,
1541                                chip->erase_time*500);
1542
1543        timeo = jiffies + (HZ*20);
1544
1545        for (;;) {
1546                if (chip->state != FL_ERASING) {
1547                        /* Someone's suspended the erase. Sleep */
1548                        set_current_state(TASK_UNINTERRUPTIBLE);
1549                        add_wait_queue(&chip->wq, &wait);
1550                        spin_unlock(chip->mutex);
1551                        schedule();
1552                        remove_wait_queue(&chip->wq, &wait);
1553                        spin_lock(chip->mutex);
1554                        continue;
1555                }
1556                if (chip->erase_suspended) {
1557                        /* This erase was suspended and resumed.
1558                           Adjust the timeout */
1559                        timeo = jiffies + (HZ*20); /* FIXME */
1560                        chip->erase_suspended = 0;
1561                }
1562
1563                if (chip_ready(map, adr))
1564                        break;
1565
1566                if (time_after(jiffies, timeo)) {
1567                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1568                                __func__ );
1569                        break;
1570                }
1571
1572                /* Latency issues. Drop the lock, wait a while and retry */
1573                UDELAY(map, chip, adr, 1000000/HZ);
1574        }
1575        /* Did we succeed? */
1576        if (!chip_good(map, adr, map_word_ff(map))) {
1577                /* reset on all failures. */
1578                map_write( map, CMD(0xF0), chip->start );
1579                /* FIXME - should have reset delay before continuing */
1580
1581                ret = -EIO;
1582        }
1583
1584        chip->state = FL_READY;
1585        xip_enable(map, chip, adr);
1586        put_chip(map, chip, adr);
1587        spin_unlock(chip->mutex);
1588
1589        return ret;
1590}
1591
1592
1593static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1594{
1595        struct cfi_private *cfi = map->fldrv_priv;
1596        unsigned long timeo = jiffies + HZ;
1597        DECLARE_WAITQUEUE(wait, current);
1598        int ret = 0;
1599
1600        adr += chip->start;
1601
1602        spin_lock(chip->mutex);
1603        ret = get_chip(map, chip, adr, FL_ERASING);
1604        if (ret) {
1605                spin_unlock(chip->mutex);
1606                return ret;
1607        }
1608
1609        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1610               __func__, adr );
1611
1612        XIP_INVAL_CACHED_RANGE(map, adr, len);
1613        ENABLE_VPP(map);
1614        xip_disable(map, chip, adr);
1615
1616        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1617        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1618        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1619        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1620        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1621        map_write(map, CMD(0x30), adr);
1622
1623        chip->state = FL_ERASING;
1624        chip->erase_suspended = 0;
1625        chip->in_progress_block_addr = adr;
1626
1627        INVALIDATE_CACHE_UDELAY(map, chip,
1628                                adr, len,
1629                                chip->erase_time*500);
1630
1631        timeo = jiffies + (HZ*20);
1632
1633        for (;;) {
1634                if (chip->state != FL_ERASING) {
1635                        /* Someone's suspended the erase. Sleep */
1636                        set_current_state(TASK_UNINTERRUPTIBLE);
1637                        add_wait_queue(&chip->wq, &wait);
1638                        spin_unlock(chip->mutex);
1639                        schedule();
1640                        remove_wait_queue(&chip->wq, &wait);
1641                        spin_lock(chip->mutex);
1642                        continue;
1643                }
1644                if (chip->erase_suspended) {
1645                        /* This erase was suspended and resumed.
1646                           Adjust the timeout */
1647                        timeo = jiffies + (HZ*20); /* FIXME */
1648                        chip->erase_suspended = 0;
1649                }
1650
1651                if (chip_ready(map, adr)) {
1652                        xip_enable(map, chip, adr);
1653                        break;
1654                }
1655
1656                if (time_after(jiffies, timeo)) {
1657                        xip_enable(map, chip, adr);
1658                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1659                                __func__ );
1660                        break;
1661                }
1662
1663                /* Latency issues. Drop the lock, wait a while and retry */
1664                UDELAY(map, chip, adr, 1000000/HZ);
1665        }
1666        /* Did we succeed? */
1667        if (!chip_good(map, adr, map_word_ff(map))) {
1668                /* reset on all failures. */
1669                map_write( map, CMD(0xF0), chip->start );
1670                /* FIXME - should have reset delay before continuing */
1671
1672                ret = -EIO;
1673        }
1674
1675        chip->state = FL_READY;
1676        put_chip(map, chip, adr);
1677        spin_unlock(chip->mutex);
1678        return ret;
1679}
1680
1681
1682static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1683{
1684        unsigned long ofs, len;
1685        int ret;
1686
1687        ofs = instr->addr;
1688        len = instr->len;
1689
1690        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1691        if (ret)
1692                return ret;
1693
1694        instr->state = MTD_ERASE_DONE;
1695        mtd_erase_callback(instr);
1696
1697        return 0;
1698}
1699
1700
1701static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1702{
1703        struct map_info *map = mtd->priv;
1704        struct cfi_private *cfi = map->fldrv_priv;
1705        int ret = 0;
1706
1707        if (instr->addr != 0)
1708                return -EINVAL;
1709
1710        if (instr->len != mtd->size)
1711                return -EINVAL;
1712
1713        ret = do_erase_chip(map, &cfi->chips[0]);
1714        if (ret)
1715                return ret;
1716
1717        instr->state = MTD_ERASE_DONE;
1718        mtd_erase_callback(instr);
1719
1720        return 0;
1721}
1722
1723static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1724                         unsigned long adr, int len, void *thunk)
1725{
1726        struct cfi_private *cfi = map->fldrv_priv;
1727        int ret;
1728
1729        spin_lock(chip->mutex);
1730        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1731        if (ret)
1732                goto out_unlock;
1733        chip->state = FL_LOCKING;
1734
1735        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1736              __func__, adr, len);
1737
1738        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1739                         cfi->device_type, NULL);
1740        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1741                         cfi->device_type, NULL);
1742        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1743                         cfi->device_type, NULL);
1744        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1745                         cfi->device_type, NULL);
1746        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1747                         cfi->device_type, NULL);
1748        map_write(map, CMD(0x40), chip->start + adr);
1749
1750        chip->state = FL_READY;
1751        put_chip(map, chip, adr + chip->start);
1752        ret = 0;
1753
1754out_unlock:
1755        spin_unlock(chip->mutex);
1756        return ret;
1757}
1758
1759static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1760                           unsigned long adr, int len, void *thunk)
1761{
1762        struct cfi_private *cfi = map->fldrv_priv;
1763        int ret;
1764
1765        spin_lock(chip->mutex);
1766        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1767        if (ret)
1768                goto out_unlock;
1769        chip->state = FL_UNLOCKING;
1770
1771        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1772              __func__, adr, len);
1773
1774        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1775                         cfi->device_type, NULL);
1776        map_write(map, CMD(0x70), adr);
1777
1778        chip->state = FL_READY;
1779        put_chip(map, chip, adr + chip->start);
1780        ret = 0;
1781
1782out_unlock:
1783        spin_unlock(chip->mutex);
1784        return ret;
1785}
1786
1787static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1788{
1789        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1790}
1791
1792static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1793{
1794        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1795}
1796
1797
1798static void cfi_amdstd_sync (struct mtd_info *mtd)
1799{
1800        struct map_info *map = mtd->priv;
1801        struct cfi_private *cfi = map->fldrv_priv;
1802        int i;
1803        struct flchip *chip;
1804        int ret = 0;
1805        DECLARE_WAITQUEUE(wait, current);
1806
1807        for (i=0; !ret && i<cfi->numchips; i++) {
1808                chip = &cfi->chips[i];
1809
1810        retry:
1811                spin_lock(chip->mutex);
1812
1813                switch(chip->state) {
1814                case FL_READY:
1815                case FL_STATUS:
1816                case FL_CFI_QUERY:
1817                case FL_JEDEC_QUERY:
1818                        chip->oldstate = chip->state;
1819                        chip->state = FL_SYNCING;
1820                        /* No need to wake_up() on this state change -
1821                         * as the whole point is that nobody can do anything
1822                         * with the chip now anyway.
1823                         */
1824                case FL_SYNCING:
1825                        spin_unlock(chip->mutex);
1826                        break;
1827
1828                default:
1829                        /* Not an idle state */
1830                        set_current_state(TASK_UNINTERRUPTIBLE);
1831                        add_wait_queue(&chip->wq, &wait);
1832
1833                        spin_unlock(chip->mutex);
1834
1835                        schedule();
1836
1837                        remove_wait_queue(&chip->wq, &wait);
1838
1839                        goto retry;
1840                }
1841        }
1842
1843        /* Unlock the chips again */
1844
1845        for (i--; i >=0; i--) {
1846                chip = &cfi->chips[i];
1847
1848                spin_lock(chip->mutex);
1849
1850                if (chip->state == FL_SYNCING) {
1851                        chip->state = chip->oldstate;
1852                        wake_up(&chip->wq);
1853                }
1854                spin_unlock(chip->mutex);
1855        }
1856}
1857
1858
1859static int cfi_amdstd_suspend(struct mtd_info *mtd)
1860{
1861        struct map_info *map = mtd->priv;
1862        struct cfi_private *cfi = map->fldrv_priv;
1863        int i;
1864        struct flchip *chip;
1865        int ret = 0;
1866
1867        for (i=0; !ret && i<cfi->numchips; i++) {
1868                chip = &cfi->chips[i];
1869
1870                spin_lock(chip->mutex);
1871
1872                switch(chip->state) {
1873                case FL_READY:
1874                case FL_STATUS:
1875                case FL_CFI_QUERY:
1876                case FL_JEDEC_QUERY:
1877                        chip->oldstate = chip->state;
1878                        chip->state = FL_PM_SUSPENDED;
1879                        /* No need to wake_up() on this state change -
1880                         * as the whole point is that nobody can do anything
1881                         * with the chip now anyway.
1882                         */
1883                case FL_PM_SUSPENDED:
1884                        break;
1885
1886                default:
1887                        ret = -EAGAIN;
1888                        break;
1889                }
1890                spin_unlock(chip->mutex);
1891        }
1892
1893        /* Unlock the chips again */
1894
1895        if (ret) {
1896                for (i--; i >=0; i--) {
1897                        chip = &cfi->chips[i];
1898
1899                        spin_lock(chip->mutex);
1900
1901                        if (chip->state == FL_PM_SUSPENDED) {
1902                                chip->state = chip->oldstate;
1903                                wake_up(&chip->wq);
1904                        }
1905                        spin_unlock(chip->mutex);
1906                }
1907        }
1908
1909        return ret;
1910}
1911
1912
1913static void cfi_amdstd_resume(struct mtd_info *mtd)
1914{
1915        struct map_info *map = mtd->priv;
1916        struct cfi_private *cfi = map->fldrv_priv;
1917        int i;
1918        struct flchip *chip;
1919
1920        for (i=0; i<cfi->numchips; i++) {
1921
1922                chip = &cfi->chips[i];
1923
1924                spin_lock(chip->mutex);
1925
1926                if (chip->state == FL_PM_SUSPENDED) {
1927                        chip->state = FL_READY;
1928                        map_write(map, CMD(0xF0), chip->start);
1929                        wake_up(&chip->wq);
1930                }
1931                else
1932                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1933
1934                spin_unlock(chip->mutex);
1935        }
1936}
1937
1938static void cfi_amdstd_destroy(struct mtd_info *mtd)
1939{
1940        struct map_info *map = mtd->priv;
1941        struct cfi_private *cfi = map->fldrv_priv;
1942
1943        kfree(cfi->cmdset_priv);
1944        kfree(cfi->cfiq);
1945        kfree(cfi);
1946        kfree(mtd->eraseregions);
1947}
1948
1949MODULE_LICENSE("GPL");
1950MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1951MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1952