linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/init.h>
  28#include <asm/io.h>
  29#include <asm/byteorder.h>
  30
  31#include <linux/errno.h>
  32#include <linux/slab.h>
  33#include <linux/delay.h>
  34#include <linux/interrupt.h>
  35#include <linux/reboot.h>
  36#include <linux/mtd/map.h>
  37#include <linux/mtd/mtd.h>
  38#include <linux/mtd/cfi.h>
  39#include <linux/mtd/xip.h>
  40
  41#define AMD_BOOTLOC_BUG
  42#define FORCE_WORD_WRITE 0
  43
  44#define MAX_WORD_RETRIES 3
  45
  46#define SST49LF004B             0x0060
  47#define SST49LF040B             0x0050
  48#define SST49LF008A             0x005a
  49#define AT49BV6416              0x00d6
  50
  51static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  52static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  53static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  54static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  55static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  56static void cfi_amdstd_sync (struct mtd_info *);
  57static int cfi_amdstd_suspend (struct mtd_info *);
  58static void cfi_amdstd_resume (struct mtd_info *);
  59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  61
  62static void cfi_amdstd_destroy(struct mtd_info *);
  63
  64struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  65static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  66
  67static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  68static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  69#include "fwh_lock.h"
  70
  71static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  72static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  73
  74static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  75        .probe          = NULL, /* Not usable directly */
  76        .destroy        = cfi_amdstd_destroy,
  77        .name           = "cfi_cmdset_0002",
  78        .module         = THIS_MODULE
  79};
  80
  81
  82/* #define DEBUG_CFI_FEATURES */
  83
  84
  85#ifdef DEBUG_CFI_FEATURES
  86static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  87{
  88        const char* erase_suspend[3] = {
  89                "Not supported", "Read only", "Read/write"
  90        };
  91        const char* top_bottom[6] = {
  92                "No WP", "8x8KiB sectors at top & bottom, no WP",
  93                "Bottom boot", "Top boot",
  94                "Uniform, Bottom WP", "Uniform, Top WP"
  95        };
  96
  97        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
  98        printk("  Address sensitive unlock: %s\n",
  99               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 100
 101        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 102                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 103        else
 104                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 105
 106        if (extp->BlkProt == 0)
 107                printk("  Block protection: Not supported\n");
 108        else
 109                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 110
 111
 112        printk("  Temporary block unprotect: %s\n",
 113               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 114        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 115        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 116        printk("  Burst mode: %s\n",
 117               extp->BurstMode ? "Supported" : "Not supported");
 118        if (extp->PageMode == 0)
 119                printk("  Page mode: Not supported\n");
 120        else
 121                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 122
 123        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 124               extp->VppMin >> 4, extp->VppMin & 0xf);
 125        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 126               extp->VppMax >> 4, extp->VppMax & 0xf);
 127
 128        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 129                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 130        else
 131                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 132}
 133#endif
 134
 135#ifdef AMD_BOOTLOC_BUG
 136/* Wheee. Bring me the head of someone at AMD. */
 137static void fixup_amd_bootblock(struct mtd_info *mtd)
 138{
 139        struct map_info *map = mtd->priv;
 140        struct cfi_private *cfi = map->fldrv_priv;
 141        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 142        __u8 major = extp->MajorVersion;
 143        __u8 minor = extp->MinorVersion;
 144
 145        if (((major << 8) | minor) < 0x3131) {
 146                /* CFI version 1.0 => don't trust bootloc */
 147
 148                DEBUG(MTD_DEBUG_LEVEL1,
 149                        "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 150                        map->name, cfi->mfr, cfi->id);
 151
 152                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 153                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 154                 * These were badly detected as they have the 0x80 bit set
 155                 * so treat them as a special case.
 156                 */
 157                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 158
 159                        /* Macronix added CFI to their 2nd generation
 160                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 161                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 162                         * has CFI.
 163                         *
 164                         * Therefore also check the manufacturer.
 165                         * This reduces the risk of false detection due to
 166                         * the 8-bit device ID.
 167                         */
 168                        (cfi->mfr == CFI_MFR_MACRONIX)) {
 169                        DEBUG(MTD_DEBUG_LEVEL1,
 170                                "%s: Macronix MX29LV400C with bottom boot block"
 171                                " detected\n", map->name);
 172                        extp->TopBottom = 2;    /* bottom boot */
 173                } else
 174                if (cfi->id & 0x80) {
 175                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 176                        extp->TopBottom = 3;    /* top boot */
 177                } else {
 178                        extp->TopBottom = 2;    /* bottom boot */
 179                }
 180
 181                DEBUG(MTD_DEBUG_LEVEL1,
 182                        "%s: AMD CFI PRI V%c.%c has no boot block field;"
 183                        " deduced %s from Device ID\n", map->name, major, minor,
 184                        extp->TopBottom == 2 ? "bottom" : "top");
 185        }
 186}
 187#endif
 188
 189static void fixup_use_write_buffers(struct mtd_info *mtd)
 190{
 191        struct map_info *map = mtd->priv;
 192        struct cfi_private *cfi = map->fldrv_priv;
 193        if (cfi->cfiq->BufWriteTimeoutTyp) {
 194                DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
 195                mtd->write = cfi_amdstd_write_buffers;
 196        }
 197}
 198
 199/* Atmel chips don't use the same PRI format as AMD chips */
 200static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 201{
 202        struct map_info *map = mtd->priv;
 203        struct cfi_private *cfi = map->fldrv_priv;
 204        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 205        struct cfi_pri_atmel atmel_pri;
 206
 207        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 208        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 209
 210        if (atmel_pri.Features & 0x02)
 211                extp->EraseSuspend = 2;
 212
 213        /* Some chips got it backwards... */
 214        if (cfi->id == AT49BV6416) {
 215                if (atmel_pri.BottomBoot)
 216                        extp->TopBottom = 3;
 217                else
 218                        extp->TopBottom = 2;
 219        } else {
 220                if (atmel_pri.BottomBoot)
 221                        extp->TopBottom = 2;
 222                else
 223                        extp->TopBottom = 3;
 224        }
 225
 226        /* burst write mode not supported */
 227        cfi->cfiq->BufWriteTimeoutTyp = 0;
 228        cfi->cfiq->BufWriteTimeoutMax = 0;
 229}
 230
 231static void fixup_use_secsi(struct mtd_info *mtd)
 232{
 233        /* Setup for chips with a secsi area */
 234        mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
 235        mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
 236}
 237
 238static void fixup_use_erase_chip(struct mtd_info *mtd)
 239{
 240        struct map_info *map = mtd->priv;
 241        struct cfi_private *cfi = map->fldrv_priv;
 242        if ((cfi->cfiq->NumEraseRegions == 1) &&
 243                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 244                mtd->erase = cfi_amdstd_erase_chip;
 245        }
 246
 247}
 248
 249/*
 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 251 * locked by default.
 252 */
 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
 254{
 255        mtd->lock = cfi_atmel_lock;
 256        mtd->unlock = cfi_atmel_unlock;
 257        mtd->flags |= MTD_POWERUP_LOCK;
 258}
 259
 260static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 261{
 262        struct map_info *map = mtd->priv;
 263        struct cfi_private *cfi = map->fldrv_priv;
 264
 265        /*
 266         * These flashes report two separate eraseblock regions based on the
 267         * sector_erase-size and block_erase-size, although they both operate on the
 268         * same memory. This is not allowed according to CFI, so we just pick the
 269         * sector_erase-size.
 270         */
 271        cfi->cfiq->NumEraseRegions = 1;
 272}
 273
 274static void fixup_sst39vf(struct mtd_info *mtd)
 275{
 276        struct map_info *map = mtd->priv;
 277        struct cfi_private *cfi = map->fldrv_priv;
 278
 279        fixup_old_sst_eraseregion(mtd);
 280
 281        cfi->addr_unlock1 = 0x5555;
 282        cfi->addr_unlock2 = 0x2AAA;
 283}
 284
 285static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 286{
 287        struct map_info *map = mtd->priv;
 288        struct cfi_private *cfi = map->fldrv_priv;
 289
 290        fixup_old_sst_eraseregion(mtd);
 291
 292        cfi->addr_unlock1 = 0x555;
 293        cfi->addr_unlock2 = 0x2AA;
 294
 295        cfi->sector_erase_cmd = CMD(0x50);
 296}
 297
 298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 299{
 300        struct map_info *map = mtd->priv;
 301        struct cfi_private *cfi = map->fldrv_priv;
 302
 303        fixup_sst39vf_rev_b(mtd);
 304
 305        /*
 306         * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 307         * it should report a size of 8KBytes (0x0020*256).
 308         */
 309        cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 310        pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 311}
 312
 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 314{
 315        struct map_info *map = mtd->priv;
 316        struct cfi_private *cfi = map->fldrv_priv;
 317
 318        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 319                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 320                pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
 321        }
 322}
 323
 324static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 325{
 326        struct map_info *map = mtd->priv;
 327        struct cfi_private *cfi = map->fldrv_priv;
 328
 329        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 330                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 331                pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
 332        }
 333}
 334
 335/* Used to fix CFI-Tables of chips without Extended Query Tables */
 336static struct cfi_fixup cfi_nopri_fixup_table[] = {
 337        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 338        { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 339        { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 340        { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 341        { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 342        { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 343        { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 344        { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 345        { 0, 0, NULL }
 346};
 347
 348static struct cfi_fixup cfi_fixup_table[] = {
 349        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 350#ifdef AMD_BOOTLOC_BUG
 351        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 352        { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 353        { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 354#endif
 355        { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 356        { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 357        { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 358        { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 359        { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 360        { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 361        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 362        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 363        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 364        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 365        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 366        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 367        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 368        { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 369#if !FORCE_WORD_WRITE
 370        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 371#endif
 372        { 0, 0, NULL }
 373};
 374static struct cfi_fixup jedec_fixup_table[] = {
 375        { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 376        { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 377        { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 378        { 0, 0, NULL }
 379};
 380
 381static struct cfi_fixup fixup_table[] = {
 382        /* The CFI vendor ids and the JEDEC vendor IDs appear
 383         * to be common.  It is like the devices id's are as
 384         * well.  This table is to pick all cases where
 385         * we know that is the case.
 386         */
 387        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 388        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 389        { 0, 0, NULL }
 390};
 391
 392
 393static void cfi_fixup_major_minor(struct cfi_private *cfi,
 394                                  struct cfi_pri_amdstd *extp)
 395{
 396        if (cfi->mfr == CFI_MFR_SAMSUNG) {
 397                if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 398                    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 399                        /*
 400                         * Samsung K8P2815UQB and K8D6x16UxM chips
 401                         * report major=0 / minor=0.
 402                         * K8D3x16UxC chips report major=3 / minor=3.
 403                         */
 404                        printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 405                               " Extended Query version to 1.%c\n",
 406                               extp->MinorVersion);
 407                        extp->MajorVersion = '1';
 408                }
 409        }
 410
 411        /*
 412         * SST 38VF640x chips report major=0xFF / minor=0xFF.
 413         */
 414        if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 415                extp->MajorVersion = '1';
 416                extp->MinorVersion = '0';
 417        }
 418}
 419
 420struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 421{
 422        struct cfi_private *cfi = map->fldrv_priv;
 423        struct mtd_info *mtd;
 424        int i;
 425
 426        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 427        if (!mtd) {
 428                printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
 429                return NULL;
 430        }
 431        mtd->priv = map;
 432        mtd->type = MTD_NORFLASH;
 433
 434        /* Fill in the default mtd operations */
 435        mtd->erase   = cfi_amdstd_erase_varsize;
 436        mtd->write   = cfi_amdstd_write_words;
 437        mtd->read    = cfi_amdstd_read;
 438        mtd->sync    = cfi_amdstd_sync;
 439        mtd->suspend = cfi_amdstd_suspend;
 440        mtd->resume  = cfi_amdstd_resume;
 441        mtd->flags   = MTD_CAP_NORFLASH;
 442        mtd->name    = map->name;
 443        mtd->writesize = 1;
 444        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 445
 446        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
 447                __func__, mtd->writebufsize);
 448
 449        mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 450
 451        if (cfi->cfi_mode==CFI_MODE_CFI){
 452                unsigned char bootloc;
 453                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 454                struct cfi_pri_amdstd *extp;
 455
 456                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 457                if (extp) {
 458                        /*
 459                         * It's a real CFI chip, not one for which the probe
 460                         * routine faked a CFI structure.
 461                         */
 462                        cfi_fixup_major_minor(cfi, extp);
 463
 464                        /*
 465                         * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 466                         * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 467                         *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 468                         *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 469                         *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 470                         */
 471                        if (extp->MajorVersion != '1' ||
 472                            (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 473                                printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 474                                       "version %c.%c (%#02x/%#02x).\n",
 475                                       extp->MajorVersion, extp->MinorVersion,
 476                                       extp->MajorVersion, extp->MinorVersion);
 477                                kfree(extp);
 478                                kfree(mtd);
 479                                return NULL;
 480                        }
 481
 482                        printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 483                               extp->MajorVersion, extp->MinorVersion);
 484
 485                        /* Install our own private info structure */
 486                        cfi->cmdset_priv = extp;
 487
 488                        /* Apply cfi device specific fixups */
 489                        cfi_fixup(mtd, cfi_fixup_table);
 490
 491#ifdef DEBUG_CFI_FEATURES
 492                        /* Tell the user about it in lots of lovely detail */
 493                        cfi_tell_features(extp);
 494#endif
 495
 496                        bootloc = extp->TopBottom;
 497                        if ((bootloc < 2) || (bootloc > 5)) {
 498                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 499                                       "bank location (%d). Assuming bottom.\n",
 500                                       map->name, bootloc);
 501                                bootloc = 2;
 502                        }
 503
 504                        if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 505                                printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 506
 507                                for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 508                                        int j = (cfi->cfiq->NumEraseRegions-1)-i;
 509                                        __u32 swap;
 510
 511                                        swap = cfi->cfiq->EraseRegionInfo[i];
 512                                        cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
 513                                        cfi->cfiq->EraseRegionInfo[j] = swap;
 514                                }
 515                        }
 516                        /* Set the default CFI lock/unlock addresses */
 517                        cfi->addr_unlock1 = 0x555;
 518                        cfi->addr_unlock2 = 0x2aa;
 519                }
 520                cfi_fixup(mtd, cfi_nopri_fixup_table);
 521
 522                if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 523                        kfree(mtd);
 524                        return NULL;
 525                }
 526
 527        } /* CFI mode */
 528        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 529                /* Apply jedec specific fixups */
 530                cfi_fixup(mtd, jedec_fixup_table);
 531        }
 532        /* Apply generic fixups */
 533        cfi_fixup(mtd, fixup_table);
 534
 535        for (i=0; i< cfi->numchips; i++) {
 536                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 537                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 538                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 539                cfi->chips[i].ref_point_counter = 0;
 540                init_waitqueue_head(&(cfi->chips[i].wq));
 541        }
 542
 543        map->fldrv = &cfi_amdstd_chipdrv;
 544
 545        return cfi_amdstd_setup(mtd);
 546}
 547struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 548struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 549EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 550EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 551EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 552
 553static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 554{
 555        struct map_info *map = mtd->priv;
 556        struct cfi_private *cfi = map->fldrv_priv;
 557        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 558        unsigned long offset = 0;
 559        int i,j;
 560
 561        printk(KERN_NOTICE "number of %s chips: %d\n",
 562               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 563        /* Select the correct geometry setup */
 564        mtd->size = devsize * cfi->numchips;
 565
 566        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 567        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 568                                    * mtd->numeraseregions, GFP_KERNEL);
 569        if (!mtd->eraseregions) {
 570                printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
 571                goto setup_err;
 572        }
 573
 574        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 575                unsigned long ernum, ersize;
 576                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 577                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 578
 579                if (mtd->erasesize < ersize) {
 580                        mtd->erasesize = ersize;
 581                }
 582                for (j=0; j<cfi->numchips; j++) {
 583                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 584                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 585                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 586                }
 587                offset += (ersize * ernum);
 588        }
 589        if (offset != devsize) {
 590                /* Argh */
 591                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 592                goto setup_err;
 593        }
 594
 595        __module_get(THIS_MODULE);
 596        register_reboot_notifier(&mtd->reboot_notifier);
 597        return mtd;
 598
 599 setup_err:
 600        kfree(mtd->eraseregions);
 601        kfree(mtd);
 602        kfree(cfi->cmdset_priv);
 603        kfree(cfi->cfiq);
 604        return NULL;
 605}
 606
 607/*
 608 * Return true if the chip is ready.
 609 *
 610 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 611 * non-suspended sector) and is indicated by no toggle bits toggling.
 612 *
 613 * Note that anything more complicated than checking if no bits are toggling
 614 * (including checking DQ5 for an error status) is tricky to get working
 615 * correctly and is therefore not done  (particularly with interleaved chips
 616 * as each chip must be checked independently of the others).
 617 */
 618static int __xipram chip_ready(struct map_info *map, unsigned long addr)
 619{
 620        map_word d, t;
 621
 622        d = map_read(map, addr);
 623        t = map_read(map, addr);
 624
 625        return map_word_equal(map, d, t);
 626}
 627
 628/*
 629 * Return true if the chip is ready and has the correct value.
 630 *
 631 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 632 * non-suspended sector) and it is indicated by no bits toggling.
 633 *
 634 * Error are indicated by toggling bits or bits held with the wrong value,
 635 * or with bits toggling.
 636 *
 637 * Note that anything more complicated than checking if no bits are toggling
 638 * (including checking DQ5 for an error status) is tricky to get working
 639 * correctly and is therefore not done  (particularly with interleaved chips
 640 * as each chip must be checked independently of the others).
 641 *
 642 */
 643static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
 644{
 645        map_word oldd, curd;
 646
 647        oldd = map_read(map, addr);
 648        curd = map_read(map, addr);
 649
 650        return  map_word_equal(map, oldd, curd) &&
 651                map_word_equal(map, curd, expected);
 652}
 653
 654static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 655{
 656        DECLARE_WAITQUEUE(wait, current);
 657        struct cfi_private *cfi = map->fldrv_priv;
 658        unsigned long timeo;
 659        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 660
 661 resettime:
 662        timeo = jiffies + HZ;
 663 retry:
 664        switch (chip->state) {
 665
 666        case FL_STATUS:
 667                for (;;) {
 668                        if (chip_ready(map, adr))
 669                                break;
 670
 671                        if (time_after(jiffies, timeo)) {
 672                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 673                                return -EIO;
 674                        }
 675                        mutex_unlock(&chip->mutex);
 676                        cfi_udelay(1);
 677                        mutex_lock(&chip->mutex);
 678                        /* Someone else might have been playing with it. */
 679                        goto retry;
 680                }
 681
 682        case FL_READY:
 683        case FL_CFI_QUERY:
 684        case FL_JEDEC_QUERY:
 685                return 0;
 686
 687        case FL_ERASING:
 688                if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 689                    !(mode == FL_READY || mode == FL_POINT ||
 690                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 691                        goto sleep;
 692
 693                /* We could check to see if we're trying to access the sector
 694                 * that is currently being erased. However, no user will try
 695                 * anything like that so we just wait for the timeout. */
 696
 697                /* Erase suspend */
 698                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 699                 * commands when the erase algorithm isn't in progress. */
 700                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 701                chip->oldstate = FL_ERASING;
 702                chip->state = FL_ERASE_SUSPENDING;
 703                chip->erase_suspended = 1;
 704                for (;;) {
 705                        if (chip_ready(map, adr))
 706                                break;
 707
 708                        if (time_after(jiffies, timeo)) {
 709                                /* Should have suspended the erase by now.
 710                                 * Send an Erase-Resume command as either
 711                                 * there was an error (so leave the erase
 712                                 * routine to recover from it) or we trying to
 713                                 * use the erase-in-progress sector. */
 714                                put_chip(map, chip, adr);
 715                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 716                                return -EIO;
 717                        }
 718
 719                        mutex_unlock(&chip->mutex);
 720                        cfi_udelay(1);
 721                        mutex_lock(&chip->mutex);
 722                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 723                           So we can just loop here. */
 724                }
 725                chip->state = FL_READY;
 726                return 0;
 727
 728        case FL_XIP_WHILE_ERASING:
 729                if (mode != FL_READY && mode != FL_POINT &&
 730                    (!cfip || !(cfip->EraseSuspend&2)))
 731                        goto sleep;
 732                chip->oldstate = chip->state;
 733                chip->state = FL_READY;
 734                return 0;
 735
 736        case FL_SHUTDOWN:
 737                /* The machine is rebooting */
 738                return -EIO;
 739
 740        case FL_POINT:
 741                /* Only if there's no operation suspended... */
 742                if (mode == FL_READY && chip->oldstate == FL_READY)
 743                        return 0;
 744
 745        default:
 746        sleep:
 747                set_current_state(TASK_UNINTERRUPTIBLE);
 748                add_wait_queue(&chip->wq, &wait);
 749                mutex_unlock(&chip->mutex);
 750                schedule();
 751                remove_wait_queue(&chip->wq, &wait);
 752                mutex_lock(&chip->mutex);
 753                goto resettime;
 754        }
 755}
 756
 757
 758static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 759{
 760        struct cfi_private *cfi = map->fldrv_priv;
 761
 762        switch(chip->oldstate) {
 763        case FL_ERASING:
 764                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 765                chip->oldstate = FL_READY;
 766                chip->state = FL_ERASING;
 767                break;
 768
 769        case FL_XIP_WHILE_ERASING:
 770                chip->state = chip->oldstate;
 771                chip->oldstate = FL_READY;
 772                break;
 773
 774        case FL_READY:
 775        case FL_STATUS:
 776                /* We should really make set_vpp() count, rather than doing this */
 777                DISABLE_VPP(map);
 778                break;
 779        default:
 780                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
 781        }
 782        wake_up(&chip->wq);
 783}
 784
 785#ifdef CONFIG_MTD_XIP
 786
 787/*
 788 * No interrupt what so ever can be serviced while the flash isn't in array
 789 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 790 * enclosing any code path where the flash is known not to be in array mode.
 791 * And within a XIP disabled code path, only functions marked with __xipram
 792 * may be called and nothing else (it's a good thing to inspect generated
 793 * assembly to make sure inline functions were actually inlined and that gcc
 794 * didn't emit calls to its own support functions). Also configuring MTD CFI
 795 * support to a single buswidth and a single interleave is also recommended.
 796 */
 797
 798static void xip_disable(struct map_info *map, struct flchip *chip,
 799                        unsigned long adr)
 800{
 801        /* TODO: chips with no XIP use should ignore and return */
 802        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 803        local_irq_disable();
 804}
 805
 806static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 807                                unsigned long adr)
 808{
 809        struct cfi_private *cfi = map->fldrv_priv;
 810
 811        if (chip->state != FL_POINT && chip->state != FL_READY) {
 812                map_write(map, CMD(0xf0), adr);
 813                chip->state = FL_READY;
 814        }
 815        (void) map_read(map, adr);
 816        xip_iprefetch();
 817        local_irq_enable();
 818}
 819
 820/*
 821 * When a delay is required for the flash operation to complete, the
 822 * xip_udelay() function is polling for both the given timeout and pending
 823 * (but still masked) hardware interrupts.  Whenever there is an interrupt
 824 * pending then the flash erase operation is suspended, array mode restored
 825 * and interrupts unmasked.  Task scheduling might also happen at that
 826 * point.  The CPU eventually returns from the interrupt or the call to
 827 * schedule() and the suspended flash operation is resumed for the remaining
 828 * of the delay period.
 829 *
 830 * Warning: this function _will_ fool interrupt latency tracing tools.
 831 */
 832
 833static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
 834                                unsigned long adr, int usec)
 835{
 836        struct cfi_private *cfi = map->fldrv_priv;
 837        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 838        map_word status, OK = CMD(0x80);
 839        unsigned long suspended, start = xip_currtime();
 840        flstate_t oldstate;
 841
 842        do {
 843                cpu_relax();
 844                if (xip_irqpending() && extp &&
 845                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
 846                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 847                        /*
 848                         * Let's suspend the erase operation when supported.
 849                         * Note that we currently don't try to suspend
 850                         * interleaved chips if there is already another
 851                         * operation suspended (imagine what happens
 852                         * when one chip was already done with the current
 853                         * operation while another chip suspended it, then
 854                         * we resume the whole thing at once).  Yes, it
 855                         * can happen!
 856                         */
 857                        map_write(map, CMD(0xb0), adr);
 858                        usec -= xip_elapsed_since(start);
 859                        suspended = xip_currtime();
 860                        do {
 861                                if (xip_elapsed_since(suspended) > 100000) {
 862                                        /*
 863                                         * The chip doesn't want to suspend
 864                                         * after waiting for 100 msecs.
 865                                         * This is a critical error but there
 866                                         * is not much we can do here.
 867                                         */
 868                                        return;
 869                                }
 870                                status = map_read(map, adr);
 871                        } while (!map_word_andequal(map, status, OK, OK));
 872
 873                        /* Suspend succeeded */
 874                        oldstate = chip->state;
 875                        if (!map_word_bitsset(map, status, CMD(0x40)))
 876                                break;
 877                        chip->state = FL_XIP_WHILE_ERASING;
 878                        chip->erase_suspended = 1;
 879                        map_write(map, CMD(0xf0), adr);
 880                        (void) map_read(map, adr);
 881                        xip_iprefetch();
 882                        local_irq_enable();
 883                        mutex_unlock(&chip->mutex);
 884                        xip_iprefetch();
 885                        cond_resched();
 886
 887                        /*
 888                         * We're back.  However someone else might have
 889                         * decided to go write to the chip if we are in
 890                         * a suspended erase state.  If so let's wait
 891                         * until it's done.
 892                         */
 893                        mutex_lock(&chip->mutex);
 894                        while (chip->state != FL_XIP_WHILE_ERASING) {
 895                                DECLARE_WAITQUEUE(wait, current);
 896                                set_current_state(TASK_UNINTERRUPTIBLE);
 897                                add_wait_queue(&chip->wq, &wait);
 898                                mutex_unlock(&chip->mutex);
 899                                schedule();
 900                                remove_wait_queue(&chip->wq, &wait);
 901                                mutex_lock(&chip->mutex);
 902                        }
 903                        /* Disallow XIP again */
 904                        local_irq_disable();
 905
 906                        /* Resume the write or erase operation */
 907                        map_write(map, cfi->sector_erase_cmd, adr);
 908                        chip->state = oldstate;
 909                        start = xip_currtime();
 910                } else if (usec >= 1000000/HZ) {
 911                        /*
 912                         * Try to save on CPU power when waiting delay
 913                         * is at least a system timer tick period.
 914                         * No need to be extremely accurate here.
 915                         */
 916                        xip_cpu_idle();
 917                }
 918                status = map_read(map, adr);
 919        } while (!map_word_andequal(map, status, OK, OK)
 920                 && xip_elapsed_since(start) < usec);
 921}
 922
 923#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
 924
 925/*
 926 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
 927 * the flash is actively programming or erasing since we have to poll for
 928 * the operation to complete anyway.  We can't do that in a generic way with
 929 * a XIP setup so do it before the actual flash operation in this case
 930 * and stub it out from INVALIDATE_CACHE_UDELAY.
 931 */
 932#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
 933        INVALIDATE_CACHED_RANGE(map, from, size)
 934
 935#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 936        UDELAY(map, chip, adr, usec)
 937
 938/*
 939 * Extra notes:
 940 *
 941 * Activating this XIP support changes the way the code works a bit.  For
 942 * example the code to suspend the current process when concurrent access
 943 * happens is never executed because xip_udelay() will always return with the
 944 * same chip state as it was entered with.  This is why there is no care for
 945 * the presence of add_wait_queue() or schedule() calls from within a couple
 946 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
 947 * The queueing and scheduling are always happening within xip_udelay().
 948 *
 949 * Similarly, get_chip() and put_chip() just happen to always be executed
 950 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
 951 * is in array mode, therefore never executing many cases therein and not
 952 * causing any problem with XIP.
 953 */
 954
 955#else
 956
 957#define xip_disable(map, chip, adr)
 958#define xip_enable(map, chip, adr)
 959#define XIP_INVAL_CACHED_RANGE(x...)
 960
 961#define UDELAY(map, chip, adr, usec)  \
 962do {  \
 963        mutex_unlock(&chip->mutex);  \
 964        cfi_udelay(usec);  \
 965        mutex_lock(&chip->mutex);  \
 966} while (0)
 967
 968#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 969do {  \
 970        mutex_unlock(&chip->mutex);  \
 971        INVALIDATE_CACHED_RANGE(map, adr, len);  \
 972        cfi_udelay(usec);  \
 973        mutex_lock(&chip->mutex);  \
 974} while (0)
 975
 976#endif
 977
 978static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 979{
 980        unsigned long cmd_addr;
 981        struct cfi_private *cfi = map->fldrv_priv;
 982        int ret;
 983
 984        adr += chip->start;
 985
 986        /* Ensure cmd read/writes are aligned. */
 987        cmd_addr = adr & ~(map_bankwidth(map)-1);
 988
 989        mutex_lock(&chip->mutex);
 990        ret = get_chip(map, chip, cmd_addr, FL_READY);
 991        if (ret) {
 992                mutex_unlock(&chip->mutex);
 993                return ret;
 994        }
 995
 996        if (chip->state != FL_POINT && chip->state != FL_READY) {
 997                map_write(map, CMD(0xf0), cmd_addr);
 998                chip->state = FL_READY;
 999        }
1000
1001        map_copy_from(map, buf, adr, len);
1002
1003        put_chip(map, chip, cmd_addr);
1004
1005        mutex_unlock(&chip->mutex);
1006        return 0;
1007}
1008
1009
1010static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1011{
1012        struct map_info *map = mtd->priv;
1013        struct cfi_private *cfi = map->fldrv_priv;
1014        unsigned long ofs;
1015        int chipnum;
1016        int ret = 0;
1017
1018        /* ofs: offset within the first chip that the first read should start */
1019
1020        chipnum = (from >> cfi->chipshift);
1021        ofs = from - (chipnum <<  cfi->chipshift);
1022
1023
1024        *retlen = 0;
1025
1026        while (len) {
1027                unsigned long thislen;
1028
1029                if (chipnum >= cfi->numchips)
1030                        break;
1031
1032                if ((len + ofs -1) >> cfi->chipshift)
1033                        thislen = (1<<cfi->chipshift) - ofs;
1034                else
1035                        thislen = len;
1036
1037                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1038                if (ret)
1039                        break;
1040
1041                *retlen += thislen;
1042                len -= thislen;
1043                buf += thislen;
1044
1045                ofs = 0;
1046                chipnum++;
1047        }
1048        return ret;
1049}
1050
1051
1052static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1053{
1054        DECLARE_WAITQUEUE(wait, current);
1055        unsigned long timeo = jiffies + HZ;
1056        struct cfi_private *cfi = map->fldrv_priv;
1057
1058 retry:
1059        mutex_lock(&chip->mutex);
1060
1061        if (chip->state != FL_READY){
1062                set_current_state(TASK_UNINTERRUPTIBLE);
1063                add_wait_queue(&chip->wq, &wait);
1064
1065                mutex_unlock(&chip->mutex);
1066
1067                schedule();
1068                remove_wait_queue(&chip->wq, &wait);
1069                timeo = jiffies + HZ;
1070
1071                goto retry;
1072        }
1073
1074        adr += chip->start;
1075
1076        chip->state = FL_READY;
1077
1078        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1079        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1080        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1081
1082        map_copy_from(map, buf, adr, len);
1083
1084        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1085        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1086        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1087        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1088
1089        wake_up(&chip->wq);
1090        mutex_unlock(&chip->mutex);
1091
1092        return 0;
1093}
1094
1095static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1096{
1097        struct map_info *map = mtd->priv;
1098        struct cfi_private *cfi = map->fldrv_priv;
1099        unsigned long ofs;
1100        int chipnum;
1101        int ret = 0;
1102
1103
1104        /* ofs: offset within the first chip that the first read should start */
1105
1106        /* 8 secsi bytes per chip */
1107        chipnum=from>>3;
1108        ofs=from & 7;
1109
1110
1111        *retlen = 0;
1112
1113        while (len) {
1114                unsigned long thislen;
1115
1116                if (chipnum >= cfi->numchips)
1117                        break;
1118
1119                if ((len + ofs -1) >> 3)
1120                        thislen = (1<<3) - ofs;
1121                else
1122                        thislen = len;
1123
1124                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1125                if (ret)
1126                        break;
1127
1128                *retlen += thislen;
1129                len -= thislen;
1130                buf += thislen;
1131
1132                ofs = 0;
1133                chipnum++;
1134        }
1135        return ret;
1136}
1137
1138
1139static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1140{
1141        struct cfi_private *cfi = map->fldrv_priv;
1142        unsigned long timeo = jiffies + HZ;
1143        /*
1144         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1145         * have a max write time of a few hundreds usec). However, we should
1146         * use the maximum timeout value given by the chip at probe time
1147         * instead.  Unfortunately, struct flchip does have a field for
1148         * maximum timeout, only for typical which can be far too short
1149         * depending of the conditions.  The ' + 1' is to avoid having a
1150         * timeout of 0 jiffies if HZ is smaller than 1000.
1151         */
1152        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1153        int ret = 0;
1154        map_word oldd;
1155        int retry_cnt = 0;
1156
1157        adr += chip->start;
1158
1159        mutex_lock(&chip->mutex);
1160        ret = get_chip(map, chip, adr, FL_WRITING);
1161        if (ret) {
1162                mutex_unlock(&chip->mutex);
1163                return ret;
1164        }
1165
1166        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1167               __func__, adr, datum.x[0] );
1168
1169        /*
1170         * Check for a NOP for the case when the datum to write is already
1171         * present - it saves time and works around buggy chips that corrupt
1172         * data at other locations when 0xff is written to a location that
1173         * already contains 0xff.
1174         */
1175        oldd = map_read(map, adr);
1176        if (map_word_equal(map, oldd, datum)) {
1177                DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1178                       __func__);
1179                goto op_done;
1180        }
1181
1182        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1183        ENABLE_VPP(map);
1184        xip_disable(map, chip, adr);
1185 retry:
1186        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1187        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1188        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1189        map_write(map, datum, adr);
1190        chip->state = FL_WRITING;
1191
1192        INVALIDATE_CACHE_UDELAY(map, chip,
1193                                adr, map_bankwidth(map),
1194                                chip->word_write_time);
1195
1196        /* See comment above for timeout value. */
1197        timeo = jiffies + uWriteTimeout;
1198        for (;;) {
1199                if (chip->state != FL_WRITING) {
1200                        /* Someone's suspended the write. Sleep */
1201                        DECLARE_WAITQUEUE(wait, current);
1202
1203                        set_current_state(TASK_UNINTERRUPTIBLE);
1204                        add_wait_queue(&chip->wq, &wait);
1205                        mutex_unlock(&chip->mutex);
1206                        schedule();
1207                        remove_wait_queue(&chip->wq, &wait);
1208                        timeo = jiffies + (HZ / 2); /* FIXME */
1209                        mutex_lock(&chip->mutex);
1210                        continue;
1211                }
1212
1213                if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1214                        xip_enable(map, chip, adr);
1215                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1216                        xip_disable(map, chip, adr);
1217                        break;
1218                }
1219
1220                if (chip_ready(map, adr))
1221                        break;
1222
1223                /* Latency issues. Drop the lock, wait a while and retry */
1224                UDELAY(map, chip, adr, 1);
1225        }
1226        /* Did we succeed? */
1227        if (!chip_good(map, adr, datum)) {
1228                /* reset on all failures. */
1229                map_write( map, CMD(0xF0), chip->start );
1230                /* FIXME - should have reset delay before continuing */
1231
1232                if (++retry_cnt <= MAX_WORD_RETRIES)
1233                        goto retry;
1234
1235                ret = -EIO;
1236        }
1237        xip_enable(map, chip, adr);
1238 op_done:
1239        chip->state = FL_READY;
1240        put_chip(map, chip, adr);
1241        mutex_unlock(&chip->mutex);
1242
1243        return ret;
1244}
1245
1246
1247static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1248                                  size_t *retlen, const u_char *buf)
1249{
1250        struct map_info *map = mtd->priv;
1251        struct cfi_private *cfi = map->fldrv_priv;
1252        int ret = 0;
1253        int chipnum;
1254        unsigned long ofs, chipstart;
1255        DECLARE_WAITQUEUE(wait, current);
1256
1257        *retlen = 0;
1258        if (!len)
1259                return 0;
1260
1261        chipnum = to >> cfi->chipshift;
1262        ofs = to  - (chipnum << cfi->chipshift);
1263        chipstart = cfi->chips[chipnum].start;
1264
1265        /* If it's not bus-aligned, do the first byte write */
1266        if (ofs & (map_bankwidth(map)-1)) {
1267                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1268                int i = ofs - bus_ofs;
1269                int n = 0;
1270                map_word tmp_buf;
1271
1272 retry:
1273                mutex_lock(&cfi->chips[chipnum].mutex);
1274
1275                if (cfi->chips[chipnum].state != FL_READY) {
1276                        set_current_state(TASK_UNINTERRUPTIBLE);
1277                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1278
1279                        mutex_unlock(&cfi->chips[chipnum].mutex);
1280
1281                        schedule();
1282                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1283                        goto retry;
1284                }
1285
1286                /* Load 'tmp_buf' with old contents of flash */
1287                tmp_buf = map_read(map, bus_ofs+chipstart);
1288
1289                mutex_unlock(&cfi->chips[chipnum].mutex);
1290
1291                /* Number of bytes to copy from buffer */
1292                n = min_t(int, len, map_bankwidth(map)-i);
1293
1294                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1295
1296                ret = do_write_oneword(map, &cfi->chips[chipnum],
1297                                       bus_ofs, tmp_buf);
1298                if (ret)
1299                        return ret;
1300
1301                ofs += n;
1302                buf += n;
1303                (*retlen) += n;
1304                len -= n;
1305
1306                if (ofs >> cfi->chipshift) {
1307                        chipnum ++;
1308                        ofs = 0;
1309                        if (chipnum == cfi->numchips)
1310                                return 0;
1311                }
1312        }
1313
1314        /* We are now aligned, write as much as possible */
1315        while(len >= map_bankwidth(map)) {
1316                map_word datum;
1317
1318                datum = map_word_load(map, buf);
1319
1320                ret = do_write_oneword(map, &cfi->chips[chipnum],
1321                                       ofs, datum);
1322                if (ret)
1323                        return ret;
1324
1325                ofs += map_bankwidth(map);
1326                buf += map_bankwidth(map);
1327                (*retlen) += map_bankwidth(map);
1328                len -= map_bankwidth(map);
1329
1330                if (ofs >> cfi->chipshift) {
1331                        chipnum ++;
1332                        ofs = 0;
1333                        if (chipnum == cfi->numchips)
1334                                return 0;
1335                        chipstart = cfi->chips[chipnum].start;
1336                }
1337        }
1338
1339        /* Write the trailing bytes if any */
1340        if (len & (map_bankwidth(map)-1)) {
1341                map_word tmp_buf;
1342
1343 retry1:
1344                mutex_lock(&cfi->chips[chipnum].mutex);
1345
1346                if (cfi->chips[chipnum].state != FL_READY) {
1347                        set_current_state(TASK_UNINTERRUPTIBLE);
1348                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1349
1350                        mutex_unlock(&cfi->chips[chipnum].mutex);
1351
1352                        schedule();
1353                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1354                        goto retry1;
1355                }
1356
1357                tmp_buf = map_read(map, ofs + chipstart);
1358
1359                mutex_unlock(&cfi->chips[chipnum].mutex);
1360
1361                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1362
1363                ret = do_write_oneword(map, &cfi->chips[chipnum],
1364                                ofs, tmp_buf);
1365                if (ret)
1366                        return ret;
1367
1368                (*retlen) += len;
1369        }
1370
1371        return 0;
1372}
1373
1374
1375/*
1376 * FIXME: interleaved mode not tested, and probably not supported!
1377 */
1378static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1379                                    unsigned long adr, const u_char *buf,
1380                                    int len)
1381{
1382        struct cfi_private *cfi = map->fldrv_priv;
1383        unsigned long timeo = jiffies + HZ;
1384        /* see comments in do_write_oneword() regarding uWriteTimeo. */
1385        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1386        int ret = -EIO;
1387        unsigned long cmd_adr;
1388        int z, words;
1389        map_word datum;
1390
1391        adr += chip->start;
1392        cmd_adr = adr;
1393
1394        mutex_lock(&chip->mutex);
1395        ret = get_chip(map, chip, adr, FL_WRITING);
1396        if (ret) {
1397                mutex_unlock(&chip->mutex);
1398                return ret;
1399        }
1400
1401        datum = map_word_load(map, buf);
1402
1403        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1404               __func__, adr, datum.x[0] );
1405
1406        XIP_INVAL_CACHED_RANGE(map, adr, len);
1407        ENABLE_VPP(map);
1408        xip_disable(map, chip, cmd_adr);
1409
1410        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1411        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1412
1413        /* Write Buffer Load */
1414        map_write(map, CMD(0x25), cmd_adr);
1415
1416        chip->state = FL_WRITING_TO_BUFFER;
1417
1418        /* Write length of data to come */
1419        words = len / map_bankwidth(map);
1420        map_write(map, CMD(words - 1), cmd_adr);
1421        /* Write data */
1422        z = 0;
1423        while(z < words * map_bankwidth(map)) {
1424                datum = map_word_load(map, buf);
1425                map_write(map, datum, adr + z);
1426
1427                z += map_bankwidth(map);
1428                buf += map_bankwidth(map);
1429        }
1430        z -= map_bankwidth(map);
1431
1432        adr += z;
1433
1434        /* Write Buffer Program Confirm: GO GO GO */
1435        map_write(map, CMD(0x29), cmd_adr);
1436        chip->state = FL_WRITING;
1437
1438        INVALIDATE_CACHE_UDELAY(map, chip,
1439                                adr, map_bankwidth(map),
1440                                chip->word_write_time);
1441
1442        timeo = jiffies + uWriteTimeout;
1443
1444        for (;;) {
1445                if (chip->state != FL_WRITING) {
1446                        /* Someone's suspended the write. Sleep */
1447                        DECLARE_WAITQUEUE(wait, current);
1448
1449                        set_current_state(TASK_UNINTERRUPTIBLE);
1450                        add_wait_queue(&chip->wq, &wait);
1451                        mutex_unlock(&chip->mutex);
1452                        schedule();
1453                        remove_wait_queue(&chip->wq, &wait);
1454                        timeo = jiffies + (HZ / 2); /* FIXME */
1455                        mutex_lock(&chip->mutex);
1456                        continue;
1457                }
1458
1459                if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1460                        break;
1461
1462                if (chip_ready(map, adr)) {
1463                        xip_enable(map, chip, adr);
1464                        goto op_done;
1465                }
1466
1467                /* Latency issues. Drop the lock, wait a while and retry */
1468                UDELAY(map, chip, adr, 1);
1469        }
1470
1471        /* reset on all failures. */
1472        map_write( map, CMD(0xF0), chip->start );
1473        xip_enable(map, chip, adr);
1474        /* FIXME - should have reset delay before continuing */
1475
1476        printk(KERN_WARNING "MTD %s(): software timeout\n",
1477               __func__ );
1478
1479        ret = -EIO;
1480 op_done:
1481        chip->state = FL_READY;
1482        put_chip(map, chip, adr);
1483        mutex_unlock(&chip->mutex);
1484
1485        return ret;
1486}
1487
1488
1489static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1490                                    size_t *retlen, const u_char *buf)
1491{
1492        struct map_info *map = mtd->priv;
1493        struct cfi_private *cfi = map->fldrv_priv;
1494        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1495        int ret = 0;
1496        int chipnum;
1497        unsigned long ofs;
1498
1499        *retlen = 0;
1500        if (!len)
1501                return 0;
1502
1503        chipnum = to >> cfi->chipshift;
1504        ofs = to  - (chipnum << cfi->chipshift);
1505
1506        /* If it's not bus-aligned, do the first word write */
1507        if (ofs & (map_bankwidth(map)-1)) {
1508                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1509                if (local_len > len)
1510                        local_len = len;
1511                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1512                                             local_len, retlen, buf);
1513                if (ret)
1514                        return ret;
1515                ofs += local_len;
1516                buf += local_len;
1517                len -= local_len;
1518
1519                if (ofs >> cfi->chipshift) {
1520                        chipnum ++;
1521                        ofs = 0;
1522                        if (chipnum == cfi->numchips)
1523                                return 0;
1524                }
1525        }
1526
1527        /* Write buffer is worth it only if more than one word to write... */
1528        while (len >= map_bankwidth(map) * 2) {
1529                /* We must not cross write block boundaries */
1530                int size = wbufsize - (ofs & (wbufsize-1));
1531
1532                if (size > len)
1533                        size = len;
1534                if (size % map_bankwidth(map))
1535                        size -= size % map_bankwidth(map);
1536
1537                ret = do_write_buffer(map, &cfi->chips[chipnum],
1538                                      ofs, buf, size);
1539                if (ret)
1540                        return ret;
1541
1542                ofs += size;
1543                buf += size;
1544                (*retlen) += size;
1545                len -= size;
1546
1547                if (ofs >> cfi->chipshift) {
1548                        chipnum ++;
1549                        ofs = 0;
1550                        if (chipnum == cfi->numchips)
1551                                return 0;
1552                }
1553        }
1554
1555        if (len) {
1556                size_t retlen_dregs = 0;
1557
1558                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1559                                             len, &retlen_dregs, buf);
1560
1561                *retlen += retlen_dregs;
1562                return ret;
1563        }
1564
1565        return 0;
1566}
1567
1568
1569/*
1570 * Handle devices with one erase region, that only implement
1571 * the chip erase command.
1572 */
1573static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1574{
1575        struct cfi_private *cfi = map->fldrv_priv;
1576        unsigned long timeo = jiffies + HZ;
1577        unsigned long int adr;
1578        DECLARE_WAITQUEUE(wait, current);
1579        int ret = 0;
1580
1581        adr = cfi->addr_unlock1;
1582
1583        mutex_lock(&chip->mutex);
1584        ret = get_chip(map, chip, adr, FL_WRITING);
1585        if (ret) {
1586                mutex_unlock(&chip->mutex);
1587                return ret;
1588        }
1589
1590        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1591               __func__, chip->start );
1592
1593        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1594        ENABLE_VPP(map);
1595        xip_disable(map, chip, adr);
1596
1597        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1598        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1599        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1600        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1601        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1602        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1603
1604        chip->state = FL_ERASING;
1605        chip->erase_suspended = 0;
1606        chip->in_progress_block_addr = adr;
1607
1608        INVALIDATE_CACHE_UDELAY(map, chip,
1609                                adr, map->size,
1610                                chip->erase_time*500);
1611
1612        timeo = jiffies + (HZ*20);
1613
1614        for (;;) {
1615                if (chip->state != FL_ERASING) {
1616                        /* Someone's suspended the erase. Sleep */
1617                        set_current_state(TASK_UNINTERRUPTIBLE);
1618                        add_wait_queue(&chip->wq, &wait);
1619                        mutex_unlock(&chip->mutex);
1620                        schedule();
1621                        remove_wait_queue(&chip->wq, &wait);
1622                        mutex_lock(&chip->mutex);
1623                        continue;
1624                }
1625                if (chip->erase_suspended) {
1626                        /* This erase was suspended and resumed.
1627                           Adjust the timeout */
1628                        timeo = jiffies + (HZ*20); /* FIXME */
1629                        chip->erase_suspended = 0;
1630                }
1631
1632                if (chip_ready(map, adr))
1633                        break;
1634
1635                if (time_after(jiffies, timeo)) {
1636                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1637                                __func__ );
1638                        break;
1639                }
1640
1641                /* Latency issues. Drop the lock, wait a while and retry */
1642                UDELAY(map, chip, adr, 1000000/HZ);
1643        }
1644        /* Did we succeed? */
1645        if (!chip_good(map, adr, map_word_ff(map))) {
1646                /* reset on all failures. */
1647                map_write( map, CMD(0xF0), chip->start );
1648                /* FIXME - should have reset delay before continuing */
1649
1650                ret = -EIO;
1651        }
1652
1653        chip->state = FL_READY;
1654        xip_enable(map, chip, adr);
1655        put_chip(map, chip, adr);
1656        mutex_unlock(&chip->mutex);
1657
1658        return ret;
1659}
1660
1661
1662static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1663{
1664        struct cfi_private *cfi = map->fldrv_priv;
1665        unsigned long timeo = jiffies + HZ;
1666        DECLARE_WAITQUEUE(wait, current);
1667        int ret = 0;
1668
1669        adr += chip->start;
1670
1671        mutex_lock(&chip->mutex);
1672        ret = get_chip(map, chip, adr, FL_ERASING);
1673        if (ret) {
1674                mutex_unlock(&chip->mutex);
1675                return ret;
1676        }
1677
1678        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1679               __func__, adr );
1680
1681        XIP_INVAL_CACHED_RANGE(map, adr, len);
1682        ENABLE_VPP(map);
1683        xip_disable(map, chip, adr);
1684
1685        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1686        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1687        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1688        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1689        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1690        map_write(map, cfi->sector_erase_cmd, adr);
1691
1692        chip->state = FL_ERASING;
1693        chip->erase_suspended = 0;
1694        chip->in_progress_block_addr = adr;
1695
1696        INVALIDATE_CACHE_UDELAY(map, chip,
1697                                adr, len,
1698                                chip->erase_time*500);
1699
1700        timeo = jiffies + (HZ*20);
1701
1702        for (;;) {
1703                if (chip->state != FL_ERASING) {
1704                        /* Someone's suspended the erase. Sleep */
1705                        set_current_state(TASK_UNINTERRUPTIBLE);
1706                        add_wait_queue(&chip->wq, &wait);
1707                        mutex_unlock(&chip->mutex);
1708                        schedule();
1709                        remove_wait_queue(&chip->wq, &wait);
1710                        mutex_lock(&chip->mutex);
1711                        continue;
1712                }
1713                if (chip->erase_suspended) {
1714                        /* This erase was suspended and resumed.
1715                           Adjust the timeout */
1716                        timeo = jiffies + (HZ*20); /* FIXME */
1717                        chip->erase_suspended = 0;
1718                }
1719
1720                if (chip_ready(map, adr)) {
1721                        xip_enable(map, chip, adr);
1722                        break;
1723                }
1724
1725                if (time_after(jiffies, timeo)) {
1726                        xip_enable(map, chip, adr);
1727                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1728                                __func__ );
1729                        break;
1730                }
1731
1732                /* Latency issues. Drop the lock, wait a while and retry */
1733                UDELAY(map, chip, adr, 1000000/HZ);
1734        }
1735        /* Did we succeed? */
1736        if (!chip_good(map, adr, map_word_ff(map))) {
1737                /* reset on all failures. */
1738                map_write( map, CMD(0xF0), chip->start );
1739                /* FIXME - should have reset delay before continuing */
1740
1741                ret = -EIO;
1742        }
1743
1744        chip->state = FL_READY;
1745        put_chip(map, chip, adr);
1746        mutex_unlock(&chip->mutex);
1747        return ret;
1748}
1749
1750
1751static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1752{
1753        unsigned long ofs, len;
1754        int ret;
1755
1756        ofs = instr->addr;
1757        len = instr->len;
1758
1759        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1760        if (ret)
1761                return ret;
1762
1763        instr->state = MTD_ERASE_DONE;
1764        mtd_erase_callback(instr);
1765
1766        return 0;
1767}
1768
1769
1770static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1771{
1772        struct map_info *map = mtd->priv;
1773        struct cfi_private *cfi = map->fldrv_priv;
1774        int ret = 0;
1775
1776        if (instr->addr != 0)
1777                return -EINVAL;
1778
1779        if (instr->len != mtd->size)
1780                return -EINVAL;
1781
1782        ret = do_erase_chip(map, &cfi->chips[0]);
1783        if (ret)
1784                return ret;
1785
1786        instr->state = MTD_ERASE_DONE;
1787        mtd_erase_callback(instr);
1788
1789        return 0;
1790}
1791
1792static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1793                         unsigned long adr, int len, void *thunk)
1794{
1795        struct cfi_private *cfi = map->fldrv_priv;
1796        int ret;
1797
1798        mutex_lock(&chip->mutex);
1799        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1800        if (ret)
1801                goto out_unlock;
1802        chip->state = FL_LOCKING;
1803
1804        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1805              __func__, adr, len);
1806
1807        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1808                         cfi->device_type, NULL);
1809        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1810                         cfi->device_type, NULL);
1811        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1812                         cfi->device_type, NULL);
1813        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1814                         cfi->device_type, NULL);
1815        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1816                         cfi->device_type, NULL);
1817        map_write(map, CMD(0x40), chip->start + adr);
1818
1819        chip->state = FL_READY;
1820        put_chip(map, chip, adr + chip->start);
1821        ret = 0;
1822
1823out_unlock:
1824        mutex_unlock(&chip->mutex);
1825        return ret;
1826}
1827
1828static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1829                           unsigned long adr, int len, void *thunk)
1830{
1831        struct cfi_private *cfi = map->fldrv_priv;
1832        int ret;
1833
1834        mutex_lock(&chip->mutex);
1835        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1836        if (ret)
1837                goto out_unlock;
1838        chip->state = FL_UNLOCKING;
1839
1840        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1841              __func__, adr, len);
1842
1843        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1844                         cfi->device_type, NULL);
1845        map_write(map, CMD(0x70), adr);
1846
1847        chip->state = FL_READY;
1848        put_chip(map, chip, adr + chip->start);
1849        ret = 0;
1850
1851out_unlock:
1852        mutex_unlock(&chip->mutex);
1853        return ret;
1854}
1855
1856static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1857{
1858        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1859}
1860
1861static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1862{
1863        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1864}
1865
1866
1867static void cfi_amdstd_sync (struct mtd_info *mtd)
1868{
1869        struct map_info *map = mtd->priv;
1870        struct cfi_private *cfi = map->fldrv_priv;
1871        int i;
1872        struct flchip *chip;
1873        int ret = 0;
1874        DECLARE_WAITQUEUE(wait, current);
1875
1876        for (i=0; !ret && i<cfi->numchips; i++) {
1877                chip = &cfi->chips[i];
1878
1879        retry:
1880                mutex_lock(&chip->mutex);
1881
1882                switch(chip->state) {
1883                case FL_READY:
1884                case FL_STATUS:
1885                case FL_CFI_QUERY:
1886                case FL_JEDEC_QUERY:
1887                        chip->oldstate = chip->state;
1888                        chip->state = FL_SYNCING;
1889                        /* No need to wake_up() on this state change -
1890                         * as the whole point is that nobody can do anything
1891                         * with the chip now anyway.
1892                         */
1893                case FL_SYNCING:
1894                        mutex_unlock(&chip->mutex);
1895                        break;
1896
1897                default:
1898                        /* Not an idle state */
1899                        set_current_state(TASK_UNINTERRUPTIBLE);
1900                        add_wait_queue(&chip->wq, &wait);
1901
1902                        mutex_unlock(&chip->mutex);
1903
1904                        schedule();
1905
1906                        remove_wait_queue(&chip->wq, &wait);
1907
1908                        goto retry;
1909                }
1910        }
1911
1912        /* Unlock the chips again */
1913
1914        for (i--; i >=0; i--) {
1915                chip = &cfi->chips[i];
1916
1917                mutex_lock(&chip->mutex);
1918
1919                if (chip->state == FL_SYNCING) {
1920                        chip->state = chip->oldstate;
1921                        wake_up(&chip->wq);
1922                }
1923                mutex_unlock(&chip->mutex);
1924        }
1925}
1926
1927
1928static int cfi_amdstd_suspend(struct mtd_info *mtd)
1929{
1930        struct map_info *map = mtd->priv;
1931        struct cfi_private *cfi = map->fldrv_priv;
1932        int i;
1933        struct flchip *chip;
1934        int ret = 0;
1935
1936        for (i=0; !ret && i<cfi->numchips; i++) {
1937                chip = &cfi->chips[i];
1938
1939                mutex_lock(&chip->mutex);
1940
1941                switch(chip->state) {
1942                case FL_READY:
1943                case FL_STATUS:
1944                case FL_CFI_QUERY:
1945                case FL_JEDEC_QUERY:
1946                        chip->oldstate = chip->state;
1947                        chip->state = FL_PM_SUSPENDED;
1948                        /* No need to wake_up() on this state change -
1949                         * as the whole point is that nobody can do anything
1950                         * with the chip now anyway.
1951                         */
1952                case FL_PM_SUSPENDED:
1953                        break;
1954
1955                default:
1956                        ret = -EAGAIN;
1957                        break;
1958                }
1959                mutex_unlock(&chip->mutex);
1960        }
1961
1962        /* Unlock the chips again */
1963
1964        if (ret) {
1965                for (i--; i >=0; i--) {
1966                        chip = &cfi->chips[i];
1967
1968                        mutex_lock(&chip->mutex);
1969
1970                        if (chip->state == FL_PM_SUSPENDED) {
1971                                chip->state = chip->oldstate;
1972                                wake_up(&chip->wq);
1973                        }
1974                        mutex_unlock(&chip->mutex);
1975                }
1976        }
1977
1978        return ret;
1979}
1980
1981
1982static void cfi_amdstd_resume(struct mtd_info *mtd)
1983{
1984        struct map_info *map = mtd->priv;
1985        struct cfi_private *cfi = map->fldrv_priv;
1986        int i;
1987        struct flchip *chip;
1988
1989        for (i=0; i<cfi->numchips; i++) {
1990
1991                chip = &cfi->chips[i];
1992
1993                mutex_lock(&chip->mutex);
1994
1995                if (chip->state == FL_PM_SUSPENDED) {
1996                        chip->state = FL_READY;
1997                        map_write(map, CMD(0xF0), chip->start);
1998                        wake_up(&chip->wq);
1999                }
2000                else
2001                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2002
2003                mutex_unlock(&chip->mutex);
2004        }
2005}
2006
2007
2008/*
2009 * Ensure that the flash device is put back into read array mode before
2010 * unloading the driver or rebooting.  On some systems, rebooting while
2011 * the flash is in query/program/erase mode will prevent the CPU from
2012 * fetching the bootloader code, requiring a hard reset or power cycle.
2013 */
2014static int cfi_amdstd_reset(struct mtd_info *mtd)
2015{
2016        struct map_info *map = mtd->priv;
2017        struct cfi_private *cfi = map->fldrv_priv;
2018        int i, ret;
2019        struct flchip *chip;
2020
2021        for (i = 0; i < cfi->numchips; i++) {
2022
2023                chip = &cfi->chips[i];
2024
2025                mutex_lock(&chip->mutex);
2026
2027                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2028                if (!ret) {
2029                        map_write(map, CMD(0xF0), chip->start);
2030                        chip->state = FL_SHUTDOWN;
2031                        put_chip(map, chip, chip->start);
2032                }
2033
2034                mutex_unlock(&chip->mutex);
2035        }
2036
2037        return 0;
2038}
2039
2040
2041static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2042                               void *v)
2043{
2044        struct mtd_info *mtd;
2045
2046        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2047        cfi_amdstd_reset(mtd);
2048        return NOTIFY_DONE;
2049}
2050
2051
2052static void cfi_amdstd_destroy(struct mtd_info *mtd)
2053{
2054        struct map_info *map = mtd->priv;
2055        struct cfi_private *cfi = map->fldrv_priv;
2056
2057        cfi_amdstd_reset(mtd);
2058        unregister_reboot_notifier(&mtd->reboot_notifier);
2059        kfree(cfi->cmdset_priv);
2060        kfree(cfi->cfiq);
2061        kfree(cfi);
2062        kfree(mtd->eraseregions);
2063}
2064
2065MODULE_LICENSE("GPL");
2066MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2067MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2068MODULE_ALIAS("cfi_cmdset_0006");
2069MODULE_ALIAS("cfi_cmdset_0701");
2070