linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/init.h>
  28#include <asm/io.h>
  29#include <asm/byteorder.h>
  30
  31#include <linux/errno.h>
  32#include <linux/slab.h>
  33#include <linux/delay.h>
  34#include <linux/interrupt.h>
  35#include <linux/reboot.h>
  36#include <linux/mtd/map.h>
  37#include <linux/mtd/mtd.h>
  38#include <linux/mtd/cfi.h>
  39#include <linux/mtd/xip.h>
  40
  41#define AMD_BOOTLOC_BUG
  42#define FORCE_WORD_WRITE 0
  43
  44#define MAX_WORD_RETRIES 3
  45
  46#define SST49LF004B             0x0060
  47#define SST49LF040B             0x0050
  48#define SST49LF008A             0x005a
  49#define AT49BV6416              0x00d6
  50
  51static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  52static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  53static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  54static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  55static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  56static void cfi_amdstd_sync (struct mtd_info *);
  57static int cfi_amdstd_suspend (struct mtd_info *);
  58static void cfi_amdstd_resume (struct mtd_info *);
  59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  61
  62static void cfi_amdstd_destroy(struct mtd_info *);
  63
  64struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  65static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  66
  67static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  68static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  69#include "fwh_lock.h"
  70
  71static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  72static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  73
  74static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  75        .probe          = NULL, /* Not usable directly */
  76        .destroy        = cfi_amdstd_destroy,
  77        .name           = "cfi_cmdset_0002",
  78        .module         = THIS_MODULE
  79};
  80
  81
  82/* #define DEBUG_CFI_FEATURES */
  83
  84
  85#ifdef DEBUG_CFI_FEATURES
  86static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  87{
  88        const char* erase_suspend[3] = {
  89                "Not supported", "Read only", "Read/write"
  90        };
  91        const char* top_bottom[6] = {
  92                "No WP", "8x8KiB sectors at top & bottom, no WP",
  93                "Bottom boot", "Top boot",
  94                "Uniform, Bottom WP", "Uniform, Top WP"
  95        };
  96
  97        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
  98        printk("  Address sensitive unlock: %s\n",
  99               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 100
 101        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 102                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 103        else
 104                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 105
 106        if (extp->BlkProt == 0)
 107                printk("  Block protection: Not supported\n");
 108        else
 109                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 110
 111
 112        printk("  Temporary block unprotect: %s\n",
 113               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 114        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 115        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 116        printk("  Burst mode: %s\n",
 117               extp->BurstMode ? "Supported" : "Not supported");
 118        if (extp->PageMode == 0)
 119                printk("  Page mode: Not supported\n");
 120        else
 121                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 122
 123        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 124               extp->VppMin >> 4, extp->VppMin & 0xf);
 125        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 126               extp->VppMax >> 4, extp->VppMax & 0xf);
 127
 128        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 129                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 130        else
 131                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 132}
 133#endif
 134
 135#ifdef AMD_BOOTLOC_BUG
 136/* Wheee. Bring me the head of someone at AMD. */
 137static void fixup_amd_bootblock(struct mtd_info *mtd)
 138{
 139        struct map_info *map = mtd->priv;
 140        struct cfi_private *cfi = map->fldrv_priv;
 141        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 142        __u8 major = extp->MajorVersion;
 143        __u8 minor = extp->MinorVersion;
 144
 145        if (((major << 8) | minor) < 0x3131) {
 146                /* CFI version 1.0 => don't trust bootloc */
 147
 148                DEBUG(MTD_DEBUG_LEVEL1,
 149                        "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 150                        map->name, cfi->mfr, cfi->id);
 151
 152                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 153                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 154                 * These were badly detected as they have the 0x80 bit set
 155                 * so treat them as a special case.
 156                 */
 157                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 158
 159                        /* Macronix added CFI to their 2nd generation
 160                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 161                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 162                         * has CFI.
 163                         *
 164                         * Therefore also check the manufacturer.
 165                         * This reduces the risk of false detection due to
 166                         * the 8-bit device ID.
 167                         */
 168                        (cfi->mfr == CFI_MFR_MACRONIX)) {
 169                        DEBUG(MTD_DEBUG_LEVEL1,
 170                                "%s: Macronix MX29LV400C with bottom boot block"
 171                                " detected\n", map->name);
 172                        extp->TopBottom = 2;    /* bottom boot */
 173                } else
 174                if (cfi->id & 0x80) {
 175                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 176                        extp->TopBottom = 3;    /* top boot */
 177                } else {
 178                        extp->TopBottom = 2;    /* bottom boot */
 179                }
 180
 181                DEBUG(MTD_DEBUG_LEVEL1,
 182                        "%s: AMD CFI PRI V%c.%c has no boot block field;"
 183                        " deduced %s from Device ID\n", map->name, major, minor,
 184                        extp->TopBottom == 2 ? "bottom" : "top");
 185        }
 186}
 187#endif
 188
 189static void fixup_use_write_buffers(struct mtd_info *mtd)
 190{
 191        struct map_info *map = mtd->priv;
 192        struct cfi_private *cfi = map->fldrv_priv;
 193        if (cfi->cfiq->BufWriteTimeoutTyp) {
 194                DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
 195                mtd->write = cfi_amdstd_write_buffers;
 196        }
 197}
 198
 199/* Atmel chips don't use the same PRI format as AMD chips */
 200static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 201{
 202        struct map_info *map = mtd->priv;
 203        struct cfi_private *cfi = map->fldrv_priv;
 204        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 205        struct cfi_pri_atmel atmel_pri;
 206
 207        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 208        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 209
 210        if (atmel_pri.Features & 0x02)
 211                extp->EraseSuspend = 2;
 212
 213        /* Some chips got it backwards... */
 214        if (cfi->id == AT49BV6416) {
 215                if (atmel_pri.BottomBoot)
 216                        extp->TopBottom = 3;
 217                else
 218                        extp->TopBottom = 2;
 219        } else {
 220                if (atmel_pri.BottomBoot)
 221                        extp->TopBottom = 2;
 222                else
 223                        extp->TopBottom = 3;
 224        }
 225
 226        /* burst write mode not supported */
 227        cfi->cfiq->BufWriteTimeoutTyp = 0;
 228        cfi->cfiq->BufWriteTimeoutMax = 0;
 229}
 230
 231static void fixup_use_secsi(struct mtd_info *mtd)
 232{
 233        /* Setup for chips with a secsi area */
 234        mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
 235        mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
 236}
 237
 238static void fixup_use_erase_chip(struct mtd_info *mtd)
 239{
 240        struct map_info *map = mtd->priv;
 241        struct cfi_private *cfi = map->fldrv_priv;
 242        if ((cfi->cfiq->NumEraseRegions == 1) &&
 243                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 244                mtd->erase = cfi_amdstd_erase_chip;
 245        }
 246
 247}
 248
 249/*
 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 251 * locked by default.
 252 */
 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
 254{
 255        mtd->lock = cfi_atmel_lock;
 256        mtd->unlock = cfi_atmel_unlock;
 257        mtd->flags |= MTD_POWERUP_LOCK;
 258}
 259
 260static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 261{
 262        struct map_info *map = mtd->priv;
 263        struct cfi_private *cfi = map->fldrv_priv;
 264
 265        /*
 266         * These flashes report two seperate eraseblock regions based on the
 267         * sector_erase-size and block_erase-size, although they both operate on the
 268         * same memory. This is not allowed according to CFI, so we just pick the
 269         * sector_erase-size.
 270         */
 271        cfi->cfiq->NumEraseRegions = 1;
 272}
 273
 274static void fixup_sst39vf(struct mtd_info *mtd)
 275{
 276        struct map_info *map = mtd->priv;
 277        struct cfi_private *cfi = map->fldrv_priv;
 278
 279        fixup_old_sst_eraseregion(mtd);
 280
 281        cfi->addr_unlock1 = 0x5555;
 282        cfi->addr_unlock2 = 0x2AAA;
 283}
 284
 285static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 286{
 287        struct map_info *map = mtd->priv;
 288        struct cfi_private *cfi = map->fldrv_priv;
 289
 290        fixup_old_sst_eraseregion(mtd);
 291
 292        cfi->addr_unlock1 = 0x555;
 293        cfi->addr_unlock2 = 0x2AA;
 294
 295        cfi->sector_erase_cmd = CMD(0x50);
 296}
 297
 298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 299{
 300        struct map_info *map = mtd->priv;
 301        struct cfi_private *cfi = map->fldrv_priv;
 302
 303        fixup_sst39vf_rev_b(mtd);
 304
 305        /*
 306         * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 307         * it should report a size of 8KBytes (0x0020*256).
 308         */
 309        cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 310        pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 311}
 312
 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 314{
 315        struct map_info *map = mtd->priv;
 316        struct cfi_private *cfi = map->fldrv_priv;
 317
 318        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 319                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 320                pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
 321        }
 322}
 323
 324static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 325{
 326        struct map_info *map = mtd->priv;
 327        struct cfi_private *cfi = map->fldrv_priv;
 328
 329        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 330                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 331                pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
 332        }
 333}
 334
 335/* Used to fix CFI-Tables of chips without Extended Query Tables */
 336static struct cfi_fixup cfi_nopri_fixup_table[] = {
 337        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 338        { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 339        { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 340        { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 341        { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 342        { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 343        { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 344        { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 345        { 0, 0, NULL }
 346};
 347
 348static struct cfi_fixup cfi_fixup_table[] = {
 349        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 350#ifdef AMD_BOOTLOC_BUG
 351        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 352        { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 353#endif
 354        { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 355        { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 356        { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 357        { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 358        { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 359        { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 360        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 361        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 362        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 363        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 364        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 365        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 366        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 367        { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 368#if !FORCE_WORD_WRITE
 369        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 370#endif
 371        { 0, 0, NULL }
 372};
 373static struct cfi_fixup jedec_fixup_table[] = {
 374        { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 375        { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 376        { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 377        { 0, 0, NULL }
 378};
 379
 380static struct cfi_fixup fixup_table[] = {
 381        /* The CFI vendor ids and the JEDEC vendor IDs appear
 382         * to be common.  It is like the devices id's are as
 383         * well.  This table is to pick all cases where
 384         * we know that is the case.
 385         */
 386        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 387        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 388        { 0, 0, NULL }
 389};
 390
 391
 392static void cfi_fixup_major_minor(struct cfi_private *cfi,
 393                                  struct cfi_pri_amdstd *extp)
 394{
 395        if (cfi->mfr == CFI_MFR_SAMSUNG) {
 396                if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 397                    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 398                        /*
 399                         * Samsung K8P2815UQB and K8D6x16UxM chips
 400                         * report major=0 / minor=0.
 401                         * K8D3x16UxC chips report major=3 / minor=3.
 402                         */
 403                        printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 404                               " Extended Query version to 1.%c\n",
 405                               extp->MinorVersion);
 406                        extp->MajorVersion = '1';
 407                }
 408        }
 409
 410        /*
 411         * SST 38VF640x chips report major=0xFF / minor=0xFF.
 412         */
 413        if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 414                extp->MajorVersion = '1';
 415                extp->MinorVersion = '0';
 416        }
 417}
 418
 419struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 420{
 421        struct cfi_private *cfi = map->fldrv_priv;
 422        struct mtd_info *mtd;
 423        int i;
 424
 425        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 426        if (!mtd) {
 427                printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
 428                return NULL;
 429        }
 430        mtd->priv = map;
 431        mtd->type = MTD_NORFLASH;
 432
 433        /* Fill in the default mtd operations */
 434        mtd->erase   = cfi_amdstd_erase_varsize;
 435        mtd->write   = cfi_amdstd_write_words;
 436        mtd->read    = cfi_amdstd_read;
 437        mtd->sync    = cfi_amdstd_sync;
 438        mtd->suspend = cfi_amdstd_suspend;
 439        mtd->resume  = cfi_amdstd_resume;
 440        mtd->flags   = MTD_CAP_NORFLASH;
 441        mtd->name    = map->name;
 442        mtd->writesize = 1;
 443        mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
 444
 445        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
 446                __func__, mtd->writebufsize);
 447
 448        mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 449
 450        if (cfi->cfi_mode==CFI_MODE_CFI){
 451                unsigned char bootloc;
 452                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 453                struct cfi_pri_amdstd *extp;
 454
 455                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 456                if (extp) {
 457                        /*
 458                         * It's a real CFI chip, not one for which the probe
 459                         * routine faked a CFI structure.
 460                         */
 461                        cfi_fixup_major_minor(cfi, extp);
 462
 463                        /*
 464                         * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
 465                         * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 466                         *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 467                         *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 468                         */
 469                        if (extp->MajorVersion != '1' ||
 470                            (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
 471                                printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 472                                       "version %c.%c (%#02x/%#02x).\n",
 473                                       extp->MajorVersion, extp->MinorVersion,
 474                                       extp->MajorVersion, extp->MinorVersion);
 475                                kfree(extp);
 476                                kfree(mtd);
 477                                return NULL;
 478                        }
 479
 480                        printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 481                               extp->MajorVersion, extp->MinorVersion);
 482
 483                        /* Install our own private info structure */
 484                        cfi->cmdset_priv = extp;
 485
 486                        /* Apply cfi device specific fixups */
 487                        cfi_fixup(mtd, cfi_fixup_table);
 488
 489#ifdef DEBUG_CFI_FEATURES
 490                        /* Tell the user about it in lots of lovely detail */
 491                        cfi_tell_features(extp);
 492#endif
 493
 494                        bootloc = extp->TopBottom;
 495                        if ((bootloc < 2) || (bootloc > 5)) {
 496                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 497                                       "bank location (%d). Assuming bottom.\n",
 498                                       map->name, bootloc);
 499                                bootloc = 2;
 500                        }
 501
 502                        if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 503                                printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 504
 505                                for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 506                                        int j = (cfi->cfiq->NumEraseRegions-1)-i;
 507                                        __u32 swap;
 508
 509                                        swap = cfi->cfiq->EraseRegionInfo[i];
 510                                        cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
 511                                        cfi->cfiq->EraseRegionInfo[j] = swap;
 512                                }
 513                        }
 514                        /* Set the default CFI lock/unlock addresses */
 515                        cfi->addr_unlock1 = 0x555;
 516                        cfi->addr_unlock2 = 0x2aa;
 517                }
 518                cfi_fixup(mtd, cfi_nopri_fixup_table);
 519
 520                if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 521                        kfree(mtd);
 522                        return NULL;
 523                }
 524
 525        } /* CFI mode */
 526        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 527                /* Apply jedec specific fixups */
 528                cfi_fixup(mtd, jedec_fixup_table);
 529        }
 530        /* Apply generic fixups */
 531        cfi_fixup(mtd, fixup_table);
 532
 533        for (i=0; i< cfi->numchips; i++) {
 534                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 535                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 536                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 537                cfi->chips[i].ref_point_counter = 0;
 538                init_waitqueue_head(&(cfi->chips[i].wq));
 539        }
 540
 541        map->fldrv = &cfi_amdstd_chipdrv;
 542
 543        return cfi_amdstd_setup(mtd);
 544}
 545struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 546struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 547EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 548EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 549EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 550
 551static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 552{
 553        struct map_info *map = mtd->priv;
 554        struct cfi_private *cfi = map->fldrv_priv;
 555        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 556        unsigned long offset = 0;
 557        int i,j;
 558
 559        printk(KERN_NOTICE "number of %s chips: %d\n",
 560               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 561        /* Select the correct geometry setup */
 562        mtd->size = devsize * cfi->numchips;
 563
 564        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 565        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 566                                    * mtd->numeraseregions, GFP_KERNEL);
 567        if (!mtd->eraseregions) {
 568                printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
 569                goto setup_err;
 570        }
 571
 572        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 573                unsigned long ernum, ersize;
 574                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 575                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 576
 577                if (mtd->erasesize < ersize) {
 578                        mtd->erasesize = ersize;
 579                }
 580                for (j=0; j<cfi->numchips; j++) {
 581                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 582                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 583                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 584                }
 585                offset += (ersize * ernum);
 586        }
 587        if (offset != devsize) {
 588                /* Argh */
 589                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 590                goto setup_err;
 591        }
 592
 593        __module_get(THIS_MODULE);
 594        register_reboot_notifier(&mtd->reboot_notifier);
 595        return mtd;
 596
 597 setup_err:
 598        kfree(mtd->eraseregions);
 599        kfree(mtd);
 600        kfree(cfi->cmdset_priv);
 601        kfree(cfi->cfiq);
 602        return NULL;
 603}
 604
 605/*
 606 * Return true if the chip is ready.
 607 *
 608 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 609 * non-suspended sector) and is indicated by no toggle bits toggling.
 610 *
 611 * Note that anything more complicated than checking if no bits are toggling
 612 * (including checking DQ5 for an error status) is tricky to get working
 613 * correctly and is therefore not done  (particulary with interleaved chips
 614 * as each chip must be checked independantly of the others).
 615 */
 616static int __xipram chip_ready(struct map_info *map, unsigned long addr)
 617{
 618        map_word d, t;
 619
 620        d = map_read(map, addr);
 621        t = map_read(map, addr);
 622
 623        return map_word_equal(map, d, t);
 624}
 625
 626/*
 627 * Return true if the chip is ready and has the correct value.
 628 *
 629 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 630 * non-suspended sector) and it is indicated by no bits toggling.
 631 *
 632 * Error are indicated by toggling bits or bits held with the wrong value,
 633 * or with bits toggling.
 634 *
 635 * Note that anything more complicated than checking if no bits are toggling
 636 * (including checking DQ5 for an error status) is tricky to get working
 637 * correctly and is therefore not done  (particulary with interleaved chips
 638 * as each chip must be checked independantly of the others).
 639 *
 640 */
 641static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
 642{
 643        map_word oldd, curd;
 644
 645        oldd = map_read(map, addr);
 646        curd = map_read(map, addr);
 647
 648        return  map_word_equal(map, oldd, curd) &&
 649                map_word_equal(map, curd, expected);
 650}
 651
 652static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 653{
 654        DECLARE_WAITQUEUE(wait, current);
 655        struct cfi_private *cfi = map->fldrv_priv;
 656        unsigned long timeo;
 657        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 658
 659 resettime:
 660        timeo = jiffies + HZ;
 661 retry:
 662        switch (chip->state) {
 663
 664        case FL_STATUS:
 665                for (;;) {
 666                        if (chip_ready(map, adr))
 667                                break;
 668
 669                        if (time_after(jiffies, timeo)) {
 670                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 671                                return -EIO;
 672                        }
 673                        mutex_unlock(&chip->mutex);
 674                        cfi_udelay(1);
 675                        mutex_lock(&chip->mutex);
 676                        /* Someone else might have been playing with it. */
 677                        goto retry;
 678                }
 679
 680        case FL_READY:
 681        case FL_CFI_QUERY:
 682        case FL_JEDEC_QUERY:
 683                return 0;
 684
 685        case FL_ERASING:
 686                if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 687                    !(mode == FL_READY || mode == FL_POINT ||
 688                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 689                        goto sleep;
 690
 691                /* We could check to see if we're trying to access the sector
 692                 * that is currently being erased. However, no user will try
 693                 * anything like that so we just wait for the timeout. */
 694
 695                /* Erase suspend */
 696                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 697                 * commands when the erase algorithm isn't in progress. */
 698                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 699                chip->oldstate = FL_ERASING;
 700                chip->state = FL_ERASE_SUSPENDING;
 701                chip->erase_suspended = 1;
 702                for (;;) {
 703                        if (chip_ready(map, adr))
 704                                break;
 705
 706                        if (time_after(jiffies, timeo)) {
 707                                /* Should have suspended the erase by now.
 708                                 * Send an Erase-Resume command as either
 709                                 * there was an error (so leave the erase
 710                                 * routine to recover from it) or we trying to
 711                                 * use the erase-in-progress sector. */
 712                                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 713                                chip->state = FL_ERASING;
 714                                chip->oldstate = FL_READY;
 715                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 716                                return -EIO;
 717                        }
 718
 719                        mutex_unlock(&chip->mutex);
 720                        cfi_udelay(1);
 721                        mutex_lock(&chip->mutex);
 722                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 723                           So we can just loop here. */
 724                }
 725                chip->state = FL_READY;
 726                return 0;
 727
 728        case FL_XIP_WHILE_ERASING:
 729                if (mode != FL_READY && mode != FL_POINT &&
 730                    (!cfip || !(cfip->EraseSuspend&2)))
 731                        goto sleep;
 732                chip->oldstate = chip->state;
 733                chip->state = FL_READY;
 734                return 0;
 735
 736        case FL_SHUTDOWN:
 737                /* The machine is rebooting */
 738                return -EIO;
 739
 740        case FL_POINT:
 741                /* Only if there's no operation suspended... */
 742                if (mode == FL_READY && chip->oldstate == FL_READY)
 743                        return 0;
 744
 745        default:
 746        sleep:
 747                set_current_state(TASK_UNINTERRUPTIBLE);
 748                add_wait_queue(&chip->wq, &wait);
 749                mutex_unlock(&chip->mutex);
 750                schedule();
 751                remove_wait_queue(&chip->wq, &wait);
 752                mutex_lock(&chip->mutex);
 753                goto resettime;
 754        }
 755}
 756
 757
 758static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 759{
 760        struct cfi_private *cfi = map->fldrv_priv;
 761
 762        switch(chip->oldstate) {
 763        case FL_ERASING:
 764                chip->state = chip->oldstate;
 765                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 766                chip->oldstate = FL_READY;
 767                chip->state = FL_ERASING;
 768                break;
 769
 770        case FL_XIP_WHILE_ERASING:
 771                chip->state = chip->oldstate;
 772                chip->oldstate = FL_READY;
 773                break;
 774
 775        case FL_READY:
 776        case FL_STATUS:
 777                /* We should really make set_vpp() count, rather than doing this */
 778                DISABLE_VPP(map);
 779                break;
 780        default:
 781                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
 782        }
 783        wake_up(&chip->wq);
 784}
 785
 786#ifdef CONFIG_MTD_XIP
 787
 788/*
 789 * No interrupt what so ever can be serviced while the flash isn't in array
 790 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 791 * enclosing any code path where the flash is known not to be in array mode.
 792 * And within a XIP disabled code path, only functions marked with __xipram
 793 * may be called and nothing else (it's a good thing to inspect generated
 794 * assembly to make sure inline functions were actually inlined and that gcc
 795 * didn't emit calls to its own support functions). Also configuring MTD CFI
 796 * support to a single buswidth and a single interleave is also recommended.
 797 */
 798
 799static void xip_disable(struct map_info *map, struct flchip *chip,
 800                        unsigned long adr)
 801{
 802        /* TODO: chips with no XIP use should ignore and return */
 803        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 804        local_irq_disable();
 805}
 806
 807static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 808                                unsigned long adr)
 809{
 810        struct cfi_private *cfi = map->fldrv_priv;
 811
 812        if (chip->state != FL_POINT && chip->state != FL_READY) {
 813                map_write(map, CMD(0xf0), adr);
 814                chip->state = FL_READY;
 815        }
 816        (void) map_read(map, adr);
 817        xip_iprefetch();
 818        local_irq_enable();
 819}
 820
 821/*
 822 * When a delay is required for the flash operation to complete, the
 823 * xip_udelay() function is polling for both the given timeout and pending
 824 * (but still masked) hardware interrupts.  Whenever there is an interrupt
 825 * pending then the flash erase operation is suspended, array mode restored
 826 * and interrupts unmasked.  Task scheduling might also happen at that
 827 * point.  The CPU eventually returns from the interrupt or the call to
 828 * schedule() and the suspended flash operation is resumed for the remaining
 829 * of the delay period.
 830 *
 831 * Warning: this function _will_ fool interrupt latency tracing tools.
 832 */
 833
 834static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
 835                                unsigned long adr, int usec)
 836{
 837        struct cfi_private *cfi = map->fldrv_priv;
 838        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 839        map_word status, OK = CMD(0x80);
 840        unsigned long suspended, start = xip_currtime();
 841        flstate_t oldstate;
 842
 843        do {
 844                cpu_relax();
 845                if (xip_irqpending() && extp &&
 846                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
 847                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 848                        /*
 849                         * Let's suspend the erase operation when supported.
 850                         * Note that we currently don't try to suspend
 851                         * interleaved chips if there is already another
 852                         * operation suspended (imagine what happens
 853                         * when one chip was already done with the current
 854                         * operation while another chip suspended it, then
 855                         * we resume the whole thing at once).  Yes, it
 856                         * can happen!
 857                         */
 858                        map_write(map, CMD(0xb0), adr);
 859                        usec -= xip_elapsed_since(start);
 860                        suspended = xip_currtime();
 861                        do {
 862                                if (xip_elapsed_since(suspended) > 100000) {
 863                                        /*
 864                                         * The chip doesn't want to suspend
 865                                         * after waiting for 100 msecs.
 866                                         * This is a critical error but there
 867                                         * is not much we can do here.
 868                                         */
 869                                        return;
 870                                }
 871                                status = map_read(map, adr);
 872                        } while (!map_word_andequal(map, status, OK, OK));
 873
 874                        /* Suspend succeeded */
 875                        oldstate = chip->state;
 876                        if (!map_word_bitsset(map, status, CMD(0x40)))
 877                                break;
 878                        chip->state = FL_XIP_WHILE_ERASING;
 879                        chip->erase_suspended = 1;
 880                        map_write(map, CMD(0xf0), adr);
 881                        (void) map_read(map, adr);
 882                        xip_iprefetch();
 883                        local_irq_enable();
 884                        mutex_unlock(&chip->mutex);
 885                        xip_iprefetch();
 886                        cond_resched();
 887
 888                        /*
 889                         * We're back.  However someone else might have
 890                         * decided to go write to the chip if we are in
 891                         * a suspended erase state.  If so let's wait
 892                         * until it's done.
 893                         */
 894                        mutex_lock(&chip->mutex);
 895                        while (chip->state != FL_XIP_WHILE_ERASING) {
 896                                DECLARE_WAITQUEUE(wait, current);
 897                                set_current_state(TASK_UNINTERRUPTIBLE);
 898                                add_wait_queue(&chip->wq, &wait);
 899                                mutex_unlock(&chip->mutex);
 900                                schedule();
 901                                remove_wait_queue(&chip->wq, &wait);
 902                                mutex_lock(&chip->mutex);
 903                        }
 904                        /* Disallow XIP again */
 905                        local_irq_disable();
 906
 907                        /* Resume the write or erase operation */
 908                        map_write(map, cfi->sector_erase_cmd, adr);
 909                        chip->state = oldstate;
 910                        start = xip_currtime();
 911                } else if (usec >= 1000000/HZ) {
 912                        /*
 913                         * Try to save on CPU power when waiting delay
 914                         * is at least a system timer tick period.
 915                         * No need to be extremely accurate here.
 916                         */
 917                        xip_cpu_idle();
 918                }
 919                status = map_read(map, adr);
 920        } while (!map_word_andequal(map, status, OK, OK)
 921                 && xip_elapsed_since(start) < usec);
 922}
 923
 924#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
 925
 926/*
 927 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
 928 * the flash is actively programming or erasing since we have to poll for
 929 * the operation to complete anyway.  We can't do that in a generic way with
 930 * a XIP setup so do it before the actual flash operation in this case
 931 * and stub it out from INVALIDATE_CACHE_UDELAY.
 932 */
 933#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
 934        INVALIDATE_CACHED_RANGE(map, from, size)
 935
 936#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 937        UDELAY(map, chip, adr, usec)
 938
 939/*
 940 * Extra notes:
 941 *
 942 * Activating this XIP support changes the way the code works a bit.  For
 943 * example the code to suspend the current process when concurrent access
 944 * happens is never executed because xip_udelay() will always return with the
 945 * same chip state as it was entered with.  This is why there is no care for
 946 * the presence of add_wait_queue() or schedule() calls from within a couple
 947 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
 948 * The queueing and scheduling are always happening within xip_udelay().
 949 *
 950 * Similarly, get_chip() and put_chip() just happen to always be executed
 951 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
 952 * is in array mode, therefore never executing many cases therein and not
 953 * causing any problem with XIP.
 954 */
 955
 956#else
 957
 958#define xip_disable(map, chip, adr)
 959#define xip_enable(map, chip, adr)
 960#define XIP_INVAL_CACHED_RANGE(x...)
 961
 962#define UDELAY(map, chip, adr, usec)  \
 963do {  \
 964        mutex_unlock(&chip->mutex);  \
 965        cfi_udelay(usec);  \
 966        mutex_lock(&chip->mutex);  \
 967} while (0)
 968
 969#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 970do {  \
 971        mutex_unlock(&chip->mutex);  \
 972        INVALIDATE_CACHED_RANGE(map, adr, len);  \
 973        cfi_udelay(usec);  \
 974        mutex_lock(&chip->mutex);  \
 975} while (0)
 976
 977#endif
 978
 979static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 980{
 981        unsigned long cmd_addr;
 982        struct cfi_private *cfi = map->fldrv_priv;
 983        int ret;
 984
 985        adr += chip->start;
 986
 987        /* Ensure cmd read/writes are aligned. */
 988        cmd_addr = adr & ~(map_bankwidth(map)-1);
 989
 990        mutex_lock(&chip->mutex);
 991        ret = get_chip(map, chip, cmd_addr, FL_READY);
 992        if (ret) {
 993                mutex_unlock(&chip->mutex);
 994                return ret;
 995        }
 996
 997        if (chip->state != FL_POINT && chip->state != FL_READY) {
 998                map_write(map, CMD(0xf0), cmd_addr);
 999                chip->state = FL_READY;
1000        }
1001
1002        map_copy_from(map, buf, adr, len);
1003
1004        put_chip(map, chip, cmd_addr);
1005
1006        mutex_unlock(&chip->mutex);
1007        return 0;
1008}
1009
1010
1011static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1012{
1013        struct map_info *map = mtd->priv;
1014        struct cfi_private *cfi = map->fldrv_priv;
1015        unsigned long ofs;
1016        int chipnum;
1017        int ret = 0;
1018
1019        /* ofs: offset within the first chip that the first read should start */
1020
1021        chipnum = (from >> cfi->chipshift);
1022        ofs = from - (chipnum <<  cfi->chipshift);
1023
1024
1025        *retlen = 0;
1026
1027        while (len) {
1028                unsigned long thislen;
1029
1030                if (chipnum >= cfi->numchips)
1031                        break;
1032
1033                if ((len + ofs -1) >> cfi->chipshift)
1034                        thislen = (1<<cfi->chipshift) - ofs;
1035                else
1036                        thislen = len;
1037
1038                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1039                if (ret)
1040                        break;
1041
1042                *retlen += thislen;
1043                len -= thislen;
1044                buf += thislen;
1045
1046                ofs = 0;
1047                chipnum++;
1048        }
1049        return ret;
1050}
1051
1052
1053static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1054{
1055        DECLARE_WAITQUEUE(wait, current);
1056        unsigned long timeo = jiffies + HZ;
1057        struct cfi_private *cfi = map->fldrv_priv;
1058
1059 retry:
1060        mutex_lock(&chip->mutex);
1061
1062        if (chip->state != FL_READY){
1063                set_current_state(TASK_UNINTERRUPTIBLE);
1064                add_wait_queue(&chip->wq, &wait);
1065
1066                mutex_unlock(&chip->mutex);
1067
1068                schedule();
1069                remove_wait_queue(&chip->wq, &wait);
1070                timeo = jiffies + HZ;
1071
1072                goto retry;
1073        }
1074
1075        adr += chip->start;
1076
1077        chip->state = FL_READY;
1078
1079        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1080        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1081        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1082
1083        map_copy_from(map, buf, adr, len);
1084
1085        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1086        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1087        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1088        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1089
1090        wake_up(&chip->wq);
1091        mutex_unlock(&chip->mutex);
1092
1093        return 0;
1094}
1095
1096static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1097{
1098        struct map_info *map = mtd->priv;
1099        struct cfi_private *cfi = map->fldrv_priv;
1100        unsigned long ofs;
1101        int chipnum;
1102        int ret = 0;
1103
1104
1105        /* ofs: offset within the first chip that the first read should start */
1106
1107        /* 8 secsi bytes per chip */
1108        chipnum=from>>3;
1109        ofs=from & 7;
1110
1111
1112        *retlen = 0;
1113
1114        while (len) {
1115                unsigned long thislen;
1116
1117                if (chipnum >= cfi->numchips)
1118                        break;
1119
1120                if ((len + ofs -1) >> 3)
1121                        thislen = (1<<3) - ofs;
1122                else
1123                        thislen = len;
1124
1125                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1126                if (ret)
1127                        break;
1128
1129                *retlen += thislen;
1130                len -= thislen;
1131                buf += thislen;
1132
1133                ofs = 0;
1134                chipnum++;
1135        }
1136        return ret;
1137}
1138
1139
1140static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1141{
1142        struct cfi_private *cfi = map->fldrv_priv;
1143        unsigned long timeo = jiffies + HZ;
1144        /*
1145         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1146         * have a max write time of a few hundreds usec). However, we should
1147         * use the maximum timeout value given by the chip at probe time
1148         * instead.  Unfortunately, struct flchip does have a field for
1149         * maximum timeout, only for typical which can be far too short
1150         * depending of the conditions.  The ' + 1' is to avoid having a
1151         * timeout of 0 jiffies if HZ is smaller than 1000.
1152         */
1153        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1154        int ret = 0;
1155        map_word oldd;
1156        int retry_cnt = 0;
1157
1158        adr += chip->start;
1159
1160        mutex_lock(&chip->mutex);
1161        ret = get_chip(map, chip, adr, FL_WRITING);
1162        if (ret) {
1163                mutex_unlock(&chip->mutex);
1164                return ret;
1165        }
1166
1167        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1168               __func__, adr, datum.x[0] );
1169
1170        /*
1171         * Check for a NOP for the case when the datum to write is already
1172         * present - it saves time and works around buggy chips that corrupt
1173         * data at other locations when 0xff is written to a location that
1174         * already contains 0xff.
1175         */
1176        oldd = map_read(map, adr);
1177        if (map_word_equal(map, oldd, datum)) {
1178                DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1179                       __func__);
1180                goto op_done;
1181        }
1182
1183        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1184        ENABLE_VPP(map);
1185        xip_disable(map, chip, adr);
1186 retry:
1187        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1188        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1189        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1190        map_write(map, datum, adr);
1191        chip->state = FL_WRITING;
1192
1193        INVALIDATE_CACHE_UDELAY(map, chip,
1194                                adr, map_bankwidth(map),
1195                                chip->word_write_time);
1196
1197        /* See comment above for timeout value. */
1198        timeo = jiffies + uWriteTimeout;
1199        for (;;) {
1200                if (chip->state != FL_WRITING) {
1201                        /* Someone's suspended the write. Sleep */
1202                        DECLARE_WAITQUEUE(wait, current);
1203
1204                        set_current_state(TASK_UNINTERRUPTIBLE);
1205                        add_wait_queue(&chip->wq, &wait);
1206                        mutex_unlock(&chip->mutex);
1207                        schedule();
1208                        remove_wait_queue(&chip->wq, &wait);
1209                        timeo = jiffies + (HZ / 2); /* FIXME */
1210                        mutex_lock(&chip->mutex);
1211                        continue;
1212                }
1213
1214                if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1215                        xip_enable(map, chip, adr);
1216                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1217                        xip_disable(map, chip, adr);
1218                        break;
1219                }
1220
1221                if (chip_ready(map, adr))
1222                        break;
1223
1224                /* Latency issues. Drop the lock, wait a while and retry */
1225                UDELAY(map, chip, adr, 1);
1226        }
1227        /* Did we succeed? */
1228        if (!chip_good(map, adr, datum)) {
1229                /* reset on all failures. */
1230                map_write( map, CMD(0xF0), chip->start );
1231                /* FIXME - should have reset delay before continuing */
1232
1233                if (++retry_cnt <= MAX_WORD_RETRIES)
1234                        goto retry;
1235
1236                ret = -EIO;
1237        }
1238        xip_enable(map, chip, adr);
1239 op_done:
1240        chip->state = FL_READY;
1241        put_chip(map, chip, adr);
1242        mutex_unlock(&chip->mutex);
1243
1244        return ret;
1245}
1246
1247
1248static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1249                                  size_t *retlen, const u_char *buf)
1250{
1251        struct map_info *map = mtd->priv;
1252        struct cfi_private *cfi = map->fldrv_priv;
1253        int ret = 0;
1254        int chipnum;
1255        unsigned long ofs, chipstart;
1256        DECLARE_WAITQUEUE(wait, current);
1257
1258        *retlen = 0;
1259        if (!len)
1260                return 0;
1261
1262        chipnum = to >> cfi->chipshift;
1263        ofs = to  - (chipnum << cfi->chipshift);
1264        chipstart = cfi->chips[chipnum].start;
1265
1266        /* If it's not bus-aligned, do the first byte write */
1267        if (ofs & (map_bankwidth(map)-1)) {
1268                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1269                int i = ofs - bus_ofs;
1270                int n = 0;
1271                map_word tmp_buf;
1272
1273 retry:
1274                mutex_lock(&cfi->chips[chipnum].mutex);
1275
1276                if (cfi->chips[chipnum].state != FL_READY) {
1277                        set_current_state(TASK_UNINTERRUPTIBLE);
1278                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1279
1280                        mutex_unlock(&cfi->chips[chipnum].mutex);
1281
1282                        schedule();
1283                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1284                        goto retry;
1285                }
1286
1287                /* Load 'tmp_buf' with old contents of flash */
1288                tmp_buf = map_read(map, bus_ofs+chipstart);
1289
1290                mutex_unlock(&cfi->chips[chipnum].mutex);
1291
1292                /* Number of bytes to copy from buffer */
1293                n = min_t(int, len, map_bankwidth(map)-i);
1294
1295                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1296
1297                ret = do_write_oneword(map, &cfi->chips[chipnum],
1298                                       bus_ofs, tmp_buf);
1299                if (ret)
1300                        return ret;
1301
1302                ofs += n;
1303                buf += n;
1304                (*retlen) += n;
1305                len -= n;
1306
1307                if (ofs >> cfi->chipshift) {
1308                        chipnum ++;
1309                        ofs = 0;
1310                        if (chipnum == cfi->numchips)
1311                                return 0;
1312                }
1313        }
1314
1315        /* We are now aligned, write as much as possible */
1316        while(len >= map_bankwidth(map)) {
1317                map_word datum;
1318
1319                datum = map_word_load(map, buf);
1320
1321                ret = do_write_oneword(map, &cfi->chips[chipnum],
1322                                       ofs, datum);
1323                if (ret)
1324                        return ret;
1325
1326                ofs += map_bankwidth(map);
1327                buf += map_bankwidth(map);
1328                (*retlen) += map_bankwidth(map);
1329                len -= map_bankwidth(map);
1330
1331                if (ofs >> cfi->chipshift) {
1332                        chipnum ++;
1333                        ofs = 0;
1334                        if (chipnum == cfi->numchips)
1335                                return 0;
1336                        chipstart = cfi->chips[chipnum].start;
1337                }
1338        }
1339
1340        /* Write the trailing bytes if any */
1341        if (len & (map_bankwidth(map)-1)) {
1342                map_word tmp_buf;
1343
1344 retry1:
1345                mutex_lock(&cfi->chips[chipnum].mutex);
1346
1347                if (cfi->chips[chipnum].state != FL_READY) {
1348                        set_current_state(TASK_UNINTERRUPTIBLE);
1349                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1350
1351                        mutex_unlock(&cfi->chips[chipnum].mutex);
1352
1353                        schedule();
1354                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1355                        goto retry1;
1356                }
1357
1358                tmp_buf = map_read(map, ofs + chipstart);
1359
1360                mutex_unlock(&cfi->chips[chipnum].mutex);
1361
1362                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1363
1364                ret = do_write_oneword(map, &cfi->chips[chipnum],
1365                                ofs, tmp_buf);
1366                if (ret)
1367                        return ret;
1368
1369                (*retlen) += len;
1370        }
1371
1372        return 0;
1373}
1374
1375
1376/*
1377 * FIXME: interleaved mode not tested, and probably not supported!
1378 */
1379static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1380                                    unsigned long adr, const u_char *buf,
1381                                    int len)
1382{
1383        struct cfi_private *cfi = map->fldrv_priv;
1384        unsigned long timeo = jiffies + HZ;
1385        /* see comments in do_write_oneword() regarding uWriteTimeo. */
1386        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1387        int ret = -EIO;
1388        unsigned long cmd_adr;
1389        int z, words;
1390        map_word datum;
1391
1392        adr += chip->start;
1393        cmd_adr = adr;
1394
1395        mutex_lock(&chip->mutex);
1396        ret = get_chip(map, chip, adr, FL_WRITING);
1397        if (ret) {
1398                mutex_unlock(&chip->mutex);
1399                return ret;
1400        }
1401
1402        datum = map_word_load(map, buf);
1403
1404        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1405               __func__, adr, datum.x[0] );
1406
1407        XIP_INVAL_CACHED_RANGE(map, adr, len);
1408        ENABLE_VPP(map);
1409        xip_disable(map, chip, cmd_adr);
1410
1411        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1412        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1413
1414        /* Write Buffer Load */
1415        map_write(map, CMD(0x25), cmd_adr);
1416
1417        chip->state = FL_WRITING_TO_BUFFER;
1418
1419        /* Write length of data to come */
1420        words = len / map_bankwidth(map);
1421        map_write(map, CMD(words - 1), cmd_adr);
1422        /* Write data */
1423        z = 0;
1424        while(z < words * map_bankwidth(map)) {
1425                datum = map_word_load(map, buf);
1426                map_write(map, datum, adr + z);
1427
1428                z += map_bankwidth(map);
1429                buf += map_bankwidth(map);
1430        }
1431        z -= map_bankwidth(map);
1432
1433        adr += z;
1434
1435        /* Write Buffer Program Confirm: GO GO GO */
1436        map_write(map, CMD(0x29), cmd_adr);
1437        chip->state = FL_WRITING;
1438
1439        INVALIDATE_CACHE_UDELAY(map, chip,
1440                                adr, map_bankwidth(map),
1441                                chip->word_write_time);
1442
1443        timeo = jiffies + uWriteTimeout;
1444
1445        for (;;) {
1446                if (chip->state != FL_WRITING) {
1447                        /* Someone's suspended the write. Sleep */
1448                        DECLARE_WAITQUEUE(wait, current);
1449
1450                        set_current_state(TASK_UNINTERRUPTIBLE);
1451                        add_wait_queue(&chip->wq, &wait);
1452                        mutex_unlock(&chip->mutex);
1453                        schedule();
1454                        remove_wait_queue(&chip->wq, &wait);
1455                        timeo = jiffies + (HZ / 2); /* FIXME */
1456                        mutex_lock(&chip->mutex);
1457                        continue;
1458                }
1459
1460                if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1461                        break;
1462
1463                if (chip_ready(map, adr)) {
1464                        xip_enable(map, chip, adr);
1465                        goto op_done;
1466                }
1467
1468                /* Latency issues. Drop the lock, wait a while and retry */
1469                UDELAY(map, chip, adr, 1);
1470        }
1471
1472        /* reset on all failures. */
1473        map_write( map, CMD(0xF0), chip->start );
1474        xip_enable(map, chip, adr);
1475        /* FIXME - should have reset delay before continuing */
1476
1477        printk(KERN_WARNING "MTD %s(): software timeout\n",
1478               __func__ );
1479
1480        ret = -EIO;
1481 op_done:
1482        chip->state = FL_READY;
1483        put_chip(map, chip, adr);
1484        mutex_unlock(&chip->mutex);
1485
1486        return ret;
1487}
1488
1489
1490static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1491                                    size_t *retlen, const u_char *buf)
1492{
1493        struct map_info *map = mtd->priv;
1494        struct cfi_private *cfi = map->fldrv_priv;
1495        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1496        int ret = 0;
1497        int chipnum;
1498        unsigned long ofs;
1499
1500        *retlen = 0;
1501        if (!len)
1502                return 0;
1503
1504        chipnum = to >> cfi->chipshift;
1505        ofs = to  - (chipnum << cfi->chipshift);
1506
1507        /* If it's not bus-aligned, do the first word write */
1508        if (ofs & (map_bankwidth(map)-1)) {
1509                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1510                if (local_len > len)
1511                        local_len = len;
1512                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1513                                             local_len, retlen, buf);
1514                if (ret)
1515                        return ret;
1516                ofs += local_len;
1517                buf += local_len;
1518                len -= local_len;
1519
1520                if (ofs >> cfi->chipshift) {
1521                        chipnum ++;
1522                        ofs = 0;
1523                        if (chipnum == cfi->numchips)
1524                                return 0;
1525                }
1526        }
1527
1528        /* Write buffer is worth it only if more than one word to write... */
1529        while (len >= map_bankwidth(map) * 2) {
1530                /* We must not cross write block boundaries */
1531                int size = wbufsize - (ofs & (wbufsize-1));
1532
1533                if (size > len)
1534                        size = len;
1535                if (size % map_bankwidth(map))
1536                        size -= size % map_bankwidth(map);
1537
1538                ret = do_write_buffer(map, &cfi->chips[chipnum],
1539                                      ofs, buf, size);
1540                if (ret)
1541                        return ret;
1542
1543                ofs += size;
1544                buf += size;
1545                (*retlen) += size;
1546                len -= size;
1547
1548                if (ofs >> cfi->chipshift) {
1549                        chipnum ++;
1550                        ofs = 0;
1551                        if (chipnum == cfi->numchips)
1552                                return 0;
1553                }
1554        }
1555
1556        if (len) {
1557                size_t retlen_dregs = 0;
1558
1559                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1560                                             len, &retlen_dregs, buf);
1561
1562                *retlen += retlen_dregs;
1563                return ret;
1564        }
1565
1566        return 0;
1567}
1568
1569
1570/*
1571 * Handle devices with one erase region, that only implement
1572 * the chip erase command.
1573 */
1574static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1575{
1576        struct cfi_private *cfi = map->fldrv_priv;
1577        unsigned long timeo = jiffies + HZ;
1578        unsigned long int adr;
1579        DECLARE_WAITQUEUE(wait, current);
1580        int ret = 0;
1581
1582        adr = cfi->addr_unlock1;
1583
1584        mutex_lock(&chip->mutex);
1585        ret = get_chip(map, chip, adr, FL_WRITING);
1586        if (ret) {
1587                mutex_unlock(&chip->mutex);
1588                return ret;
1589        }
1590
1591        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1592               __func__, chip->start );
1593
1594        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1595        ENABLE_VPP(map);
1596        xip_disable(map, chip, adr);
1597
1598        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1599        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1600        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1601        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1602        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1603        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1604
1605        chip->state = FL_ERASING;
1606        chip->erase_suspended = 0;
1607        chip->in_progress_block_addr = adr;
1608
1609        INVALIDATE_CACHE_UDELAY(map, chip,
1610                                adr, map->size,
1611                                chip->erase_time*500);
1612
1613        timeo = jiffies + (HZ*20);
1614
1615        for (;;) {
1616                if (chip->state != FL_ERASING) {
1617                        /* Someone's suspended the erase. Sleep */
1618                        set_current_state(TASK_UNINTERRUPTIBLE);
1619                        add_wait_queue(&chip->wq, &wait);
1620                        mutex_unlock(&chip->mutex);
1621                        schedule();
1622                        remove_wait_queue(&chip->wq, &wait);
1623                        mutex_lock(&chip->mutex);
1624                        continue;
1625                }
1626                if (chip->erase_suspended) {
1627                        /* This erase was suspended and resumed.
1628                           Adjust the timeout */
1629                        timeo = jiffies + (HZ*20); /* FIXME */
1630                        chip->erase_suspended = 0;
1631                }
1632
1633                if (chip_ready(map, adr))
1634                        break;
1635
1636                if (time_after(jiffies, timeo)) {
1637                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1638                                __func__ );
1639                        break;
1640                }
1641
1642                /* Latency issues. Drop the lock, wait a while and retry */
1643                UDELAY(map, chip, adr, 1000000/HZ);
1644        }
1645        /* Did we succeed? */
1646        if (!chip_good(map, adr, map_word_ff(map))) {
1647                /* reset on all failures. */
1648                map_write( map, CMD(0xF0), chip->start );
1649                /* FIXME - should have reset delay before continuing */
1650
1651                ret = -EIO;
1652        }
1653
1654        chip->state = FL_READY;
1655        xip_enable(map, chip, adr);
1656        put_chip(map, chip, adr);
1657        mutex_unlock(&chip->mutex);
1658
1659        return ret;
1660}
1661
1662
1663static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1664{
1665        struct cfi_private *cfi = map->fldrv_priv;
1666        unsigned long timeo = jiffies + HZ;
1667        DECLARE_WAITQUEUE(wait, current);
1668        int ret = 0;
1669
1670        adr += chip->start;
1671
1672        mutex_lock(&chip->mutex);
1673        ret = get_chip(map, chip, adr, FL_ERASING);
1674        if (ret) {
1675                mutex_unlock(&chip->mutex);
1676                return ret;
1677        }
1678
1679        DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1680               __func__, adr );
1681
1682        XIP_INVAL_CACHED_RANGE(map, adr, len);
1683        ENABLE_VPP(map);
1684        xip_disable(map, chip, adr);
1685
1686        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1687        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1688        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1689        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1690        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1691        map_write(map, cfi->sector_erase_cmd, adr);
1692
1693        chip->state = FL_ERASING;
1694        chip->erase_suspended = 0;
1695        chip->in_progress_block_addr = adr;
1696
1697        INVALIDATE_CACHE_UDELAY(map, chip,
1698                                adr, len,
1699                                chip->erase_time*500);
1700
1701        timeo = jiffies + (HZ*20);
1702
1703        for (;;) {
1704                if (chip->state != FL_ERASING) {
1705                        /* Someone's suspended the erase. Sleep */
1706                        set_current_state(TASK_UNINTERRUPTIBLE);
1707                        add_wait_queue(&chip->wq, &wait);
1708                        mutex_unlock(&chip->mutex);
1709                        schedule();
1710                        remove_wait_queue(&chip->wq, &wait);
1711                        mutex_lock(&chip->mutex);
1712                        continue;
1713                }
1714                if (chip->erase_suspended) {
1715                        /* This erase was suspended and resumed.
1716                           Adjust the timeout */
1717                        timeo = jiffies + (HZ*20); /* FIXME */
1718                        chip->erase_suspended = 0;
1719                }
1720
1721                if (chip_ready(map, adr)) {
1722                        xip_enable(map, chip, adr);
1723                        break;
1724                }
1725
1726                if (time_after(jiffies, timeo)) {
1727                        xip_enable(map, chip, adr);
1728                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1729                                __func__ );
1730                        break;
1731                }
1732
1733                /* Latency issues. Drop the lock, wait a while and retry */
1734                UDELAY(map, chip, adr, 1000000/HZ);
1735        }
1736        /* Did we succeed? */
1737        if (!chip_good(map, adr, map_word_ff(map))) {
1738                /* reset on all failures. */
1739                map_write( map, CMD(0xF0), chip->start );
1740                /* FIXME - should have reset delay before continuing */
1741
1742                ret = -EIO;
1743        }
1744
1745        chip->state = FL_READY;
1746        put_chip(map, chip, adr);
1747        mutex_unlock(&chip->mutex);
1748        return ret;
1749}
1750
1751
1752static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1753{
1754        unsigned long ofs, len;
1755        int ret;
1756
1757        ofs = instr->addr;
1758        len = instr->len;
1759
1760        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1761        if (ret)
1762                return ret;
1763
1764        instr->state = MTD_ERASE_DONE;
1765        mtd_erase_callback(instr);
1766
1767        return 0;
1768}
1769
1770
1771static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1772{
1773        struct map_info *map = mtd->priv;
1774        struct cfi_private *cfi = map->fldrv_priv;
1775        int ret = 0;
1776
1777        if (instr->addr != 0)
1778                return -EINVAL;
1779
1780        if (instr->len != mtd->size)
1781                return -EINVAL;
1782
1783        ret = do_erase_chip(map, &cfi->chips[0]);
1784        if (ret)
1785                return ret;
1786
1787        instr->state = MTD_ERASE_DONE;
1788        mtd_erase_callback(instr);
1789
1790        return 0;
1791}
1792
1793static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1794                         unsigned long adr, int len, void *thunk)
1795{
1796        struct cfi_private *cfi = map->fldrv_priv;
1797        int ret;
1798
1799        mutex_lock(&chip->mutex);
1800        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1801        if (ret)
1802                goto out_unlock;
1803        chip->state = FL_LOCKING;
1804
1805        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1806              __func__, adr, len);
1807
1808        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1809                         cfi->device_type, NULL);
1810        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1811                         cfi->device_type, NULL);
1812        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1813                         cfi->device_type, NULL);
1814        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1815                         cfi->device_type, NULL);
1816        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1817                         cfi->device_type, NULL);
1818        map_write(map, CMD(0x40), chip->start + adr);
1819
1820        chip->state = FL_READY;
1821        put_chip(map, chip, adr + chip->start);
1822        ret = 0;
1823
1824out_unlock:
1825        mutex_unlock(&chip->mutex);
1826        return ret;
1827}
1828
1829static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1830                           unsigned long adr, int len, void *thunk)
1831{
1832        struct cfi_private *cfi = map->fldrv_priv;
1833        int ret;
1834
1835        mutex_lock(&chip->mutex);
1836        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1837        if (ret)
1838                goto out_unlock;
1839        chip->state = FL_UNLOCKING;
1840
1841        DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1842              __func__, adr, len);
1843
1844        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1845                         cfi->device_type, NULL);
1846        map_write(map, CMD(0x70), adr);
1847
1848        chip->state = FL_READY;
1849        put_chip(map, chip, adr + chip->start);
1850        ret = 0;
1851
1852out_unlock:
1853        mutex_unlock(&chip->mutex);
1854        return ret;
1855}
1856
1857static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1858{
1859        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1860}
1861
1862static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1863{
1864        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1865}
1866
1867
1868static void cfi_amdstd_sync (struct mtd_info *mtd)
1869{
1870        struct map_info *map = mtd->priv;
1871        struct cfi_private *cfi = map->fldrv_priv;
1872        int i;
1873        struct flchip *chip;
1874        int ret = 0;
1875        DECLARE_WAITQUEUE(wait, current);
1876
1877        for (i=0; !ret && i<cfi->numchips; i++) {
1878                chip = &cfi->chips[i];
1879
1880        retry:
1881                mutex_lock(&chip->mutex);
1882
1883                switch(chip->state) {
1884                case FL_READY:
1885                case FL_STATUS:
1886                case FL_CFI_QUERY:
1887                case FL_JEDEC_QUERY:
1888                        chip->oldstate = chip->state;
1889                        chip->state = FL_SYNCING;
1890                        /* No need to wake_up() on this state change -
1891                         * as the whole point is that nobody can do anything
1892                         * with the chip now anyway.
1893                         */
1894                case FL_SYNCING:
1895                        mutex_unlock(&chip->mutex);
1896                        break;
1897
1898                default:
1899                        /* Not an idle state */
1900                        set_current_state(TASK_UNINTERRUPTIBLE);
1901                        add_wait_queue(&chip->wq, &wait);
1902
1903                        mutex_unlock(&chip->mutex);
1904
1905                        schedule();
1906
1907                        remove_wait_queue(&chip->wq, &wait);
1908
1909                        goto retry;
1910                }
1911        }
1912
1913        /* Unlock the chips again */
1914
1915        for (i--; i >=0; i--) {
1916                chip = &cfi->chips[i];
1917
1918                mutex_lock(&chip->mutex);
1919
1920                if (chip->state == FL_SYNCING) {
1921                        chip->state = chip->oldstate;
1922                        wake_up(&chip->wq);
1923                }
1924                mutex_unlock(&chip->mutex);
1925        }
1926}
1927
1928
1929static int cfi_amdstd_suspend(struct mtd_info *mtd)
1930{
1931        struct map_info *map = mtd->priv;
1932        struct cfi_private *cfi = map->fldrv_priv;
1933        int i;
1934        struct flchip *chip;
1935        int ret = 0;
1936
1937        for (i=0; !ret && i<cfi->numchips; i++) {
1938                chip = &cfi->chips[i];
1939
1940                mutex_lock(&chip->mutex);
1941
1942                switch(chip->state) {
1943                case FL_READY:
1944                case FL_STATUS:
1945                case FL_CFI_QUERY:
1946                case FL_JEDEC_QUERY:
1947                        chip->oldstate = chip->state;
1948                        chip->state = FL_PM_SUSPENDED;
1949                        /* No need to wake_up() on this state change -
1950                         * as the whole point is that nobody can do anything
1951                         * with the chip now anyway.
1952                         */
1953                case FL_PM_SUSPENDED:
1954                        break;
1955
1956                default:
1957                        ret = -EAGAIN;
1958                        break;
1959                }
1960                mutex_unlock(&chip->mutex);
1961        }
1962
1963        /* Unlock the chips again */
1964
1965        if (ret) {
1966                for (i--; i >=0; i--) {
1967                        chip = &cfi->chips[i];
1968
1969                        mutex_lock(&chip->mutex);
1970
1971                        if (chip->state == FL_PM_SUSPENDED) {
1972                                chip->state = chip->oldstate;
1973                                wake_up(&chip->wq);
1974                        }
1975                        mutex_unlock(&chip->mutex);
1976                }
1977        }
1978
1979        return ret;
1980}
1981
1982
1983static void cfi_amdstd_resume(struct mtd_info *mtd)
1984{
1985        struct map_info *map = mtd->priv;
1986        struct cfi_private *cfi = map->fldrv_priv;
1987        int i;
1988        struct flchip *chip;
1989
1990        for (i=0; i<cfi->numchips; i++) {
1991
1992                chip = &cfi->chips[i];
1993
1994                mutex_lock(&chip->mutex);
1995
1996                if (chip->state == FL_PM_SUSPENDED) {
1997                        chip->state = FL_READY;
1998                        map_write(map, CMD(0xF0), chip->start);
1999                        wake_up(&chip->wq);
2000                }
2001                else
2002                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2003
2004                mutex_unlock(&chip->mutex);
2005        }
2006}
2007
2008
2009/*
2010 * Ensure that the flash device is put back into read array mode before
2011 * unloading the driver or rebooting.  On some systems, rebooting while
2012 * the flash is in query/program/erase mode will prevent the CPU from
2013 * fetching the bootloader code, requiring a hard reset or power cycle.
2014 */
2015static int cfi_amdstd_reset(struct mtd_info *mtd)
2016{
2017        struct map_info *map = mtd->priv;
2018        struct cfi_private *cfi = map->fldrv_priv;
2019        int i, ret;
2020        struct flchip *chip;
2021
2022        for (i = 0; i < cfi->numchips; i++) {
2023
2024                chip = &cfi->chips[i];
2025
2026                mutex_lock(&chip->mutex);
2027
2028                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2029                if (!ret) {
2030                        map_write(map, CMD(0xF0), chip->start);
2031                        chip->state = FL_SHUTDOWN;
2032                        put_chip(map, chip, chip->start);
2033                }
2034
2035                mutex_unlock(&chip->mutex);
2036        }
2037
2038        return 0;
2039}
2040
2041
2042static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2043                               void *v)
2044{
2045        struct mtd_info *mtd;
2046
2047        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2048        cfi_amdstd_reset(mtd);
2049        return NOTIFY_DONE;
2050}
2051
2052
2053static void cfi_amdstd_destroy(struct mtd_info *mtd)
2054{
2055        struct map_info *map = mtd->priv;
2056        struct cfi_private *cfi = map->fldrv_priv;
2057
2058        cfi_amdstd_reset(mtd);
2059        unregister_reboot_notifier(&mtd->reboot_notifier);
2060        kfree(cfi->cmdset_priv);
2061        kfree(cfi->cfiq);
2062        kfree(cfi);
2063        kfree(mtd->eraseregions);
2064}
2065
2066MODULE_LICENSE("GPL");
2067MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2068MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2069MODULE_ALIAS("cfi_cmdset_0006");
2070MODULE_ALIAS("cfi_cmdset_0701");
2071