linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/init.h>
  28#include <asm/io.h>
  29#include <asm/byteorder.h>
  30
  31#include <linux/errno.h>
  32#include <linux/slab.h>
  33#include <linux/delay.h>
  34#include <linux/interrupt.h>
  35#include <linux/reboot.h>
  36#include <linux/mtd/map.h>
  37#include <linux/mtd/mtd.h>
  38#include <linux/mtd/cfi.h>
  39#include <linux/mtd/xip.h>
  40
  41#define AMD_BOOTLOC_BUG
  42#define FORCE_WORD_WRITE 0
  43
  44#define MAX_WORD_RETRIES 3
  45
  46#define SST49LF004B             0x0060
  47#define SST49LF040B             0x0050
  48#define SST49LF008A             0x005a
  49#define AT49BV6416              0x00d6
  50
  51static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  52static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  53static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  54static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  55static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  56static void cfi_amdstd_sync (struct mtd_info *);
  57static int cfi_amdstd_suspend (struct mtd_info *);
  58static void cfi_amdstd_resume (struct mtd_info *);
  59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  61
  62static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  63                                  size_t *retlen, const u_char *buf);
  64
  65static void cfi_amdstd_destroy(struct mtd_info *);
  66
  67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  68static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  69
  70static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  72#include "fwh_lock.h"
  73
  74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  76
  77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  78        .probe          = NULL, /* Not usable directly */
  79        .destroy        = cfi_amdstd_destroy,
  80        .name           = "cfi_cmdset_0002",
  81        .module         = THIS_MODULE
  82};
  83
  84
  85/* #define DEBUG_CFI_FEATURES */
  86
  87
  88#ifdef DEBUG_CFI_FEATURES
  89static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  90{
  91        const char* erase_suspend[3] = {
  92                "Not supported", "Read only", "Read/write"
  93        };
  94        const char* top_bottom[6] = {
  95                "No WP", "8x8KiB sectors at top & bottom, no WP",
  96                "Bottom boot", "Top boot",
  97                "Uniform, Bottom WP", "Uniform, Top WP"
  98        };
  99
 100        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 101        printk("  Address sensitive unlock: %s\n",
 102               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 103
 104        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 105                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 106        else
 107                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 108
 109        if (extp->BlkProt == 0)
 110                printk("  Block protection: Not supported\n");
 111        else
 112                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 113
 114
 115        printk("  Temporary block unprotect: %s\n",
 116               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 117        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 118        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 119        printk("  Burst mode: %s\n",
 120               extp->BurstMode ? "Supported" : "Not supported");
 121        if (extp->PageMode == 0)
 122                printk("  Page mode: Not supported\n");
 123        else
 124                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 125
 126        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 127               extp->VppMin >> 4, extp->VppMin & 0xf);
 128        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 129               extp->VppMax >> 4, extp->VppMax & 0xf);
 130
 131        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 132                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 133        else
 134                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 135}
 136#endif
 137
 138#ifdef AMD_BOOTLOC_BUG
 139/* Wheee. Bring me the head of someone at AMD. */
 140static void fixup_amd_bootblock(struct mtd_info *mtd)
 141{
 142        struct map_info *map = mtd->priv;
 143        struct cfi_private *cfi = map->fldrv_priv;
 144        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 145        __u8 major = extp->MajorVersion;
 146        __u8 minor = extp->MinorVersion;
 147
 148        if (((major << 8) | minor) < 0x3131) {
 149                /* CFI version 1.0 => don't trust bootloc */
 150
 151                pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 152                        map->name, cfi->mfr, cfi->id);
 153
 154                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 155                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 156                 * These were badly detected as they have the 0x80 bit set
 157                 * so treat them as a special case.
 158                 */
 159                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 160
 161                        /* Macronix added CFI to their 2nd generation
 162                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 163                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 164                         * has CFI.
 165                         *
 166                         * Therefore also check the manufacturer.
 167                         * This reduces the risk of false detection due to
 168                         * the 8-bit device ID.
 169                         */
 170                        (cfi->mfr == CFI_MFR_MACRONIX)) {
 171                        pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 172                                " detected\n", map->name);
 173                        extp->TopBottom = 2;    /* bottom boot */
 174                } else
 175                if (cfi->id & 0x80) {
 176                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 177                        extp->TopBottom = 3;    /* top boot */
 178                } else {
 179                        extp->TopBottom = 2;    /* bottom boot */
 180                }
 181
 182                pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 183                        " deduced %s from Device ID\n", map->name, major, minor,
 184                        extp->TopBottom == 2 ? "bottom" : "top");
 185        }
 186}
 187#endif
 188
 189static void fixup_use_write_buffers(struct mtd_info *mtd)
 190{
 191        struct map_info *map = mtd->priv;
 192        struct cfi_private *cfi = map->fldrv_priv;
 193        if (cfi->cfiq->BufWriteTimeoutTyp) {
 194                pr_debug("Using buffer write method\n" );
 195                mtd->_write = cfi_amdstd_write_buffers;
 196        }
 197}
 198
 199/* Atmel chips don't use the same PRI format as AMD chips */
 200static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 201{
 202        struct map_info *map = mtd->priv;
 203        struct cfi_private *cfi = map->fldrv_priv;
 204        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 205        struct cfi_pri_atmel atmel_pri;
 206
 207        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 208        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 209
 210        if (atmel_pri.Features & 0x02)
 211                extp->EraseSuspend = 2;
 212
 213        /* Some chips got it backwards... */
 214        if (cfi->id == AT49BV6416) {
 215                if (atmel_pri.BottomBoot)
 216                        extp->TopBottom = 3;
 217                else
 218                        extp->TopBottom = 2;
 219        } else {
 220                if (atmel_pri.BottomBoot)
 221                        extp->TopBottom = 2;
 222                else
 223                        extp->TopBottom = 3;
 224        }
 225
 226        /* burst write mode not supported */
 227        cfi->cfiq->BufWriteTimeoutTyp = 0;
 228        cfi->cfiq->BufWriteTimeoutMax = 0;
 229}
 230
 231static void fixup_use_secsi(struct mtd_info *mtd)
 232{
 233        /* Setup for chips with a secsi area */
 234        mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
 235        mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 236}
 237
 238static void fixup_use_erase_chip(struct mtd_info *mtd)
 239{
 240        struct map_info *map = mtd->priv;
 241        struct cfi_private *cfi = map->fldrv_priv;
 242        if ((cfi->cfiq->NumEraseRegions == 1) &&
 243                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 244                mtd->_erase = cfi_amdstd_erase_chip;
 245        }
 246
 247}
 248
 249/*
 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 251 * locked by default.
 252 */
 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
 254{
 255        mtd->_lock = cfi_atmel_lock;
 256        mtd->_unlock = cfi_atmel_unlock;
 257        mtd->flags |= MTD_POWERUP_LOCK;
 258}
 259
 260static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 261{
 262        struct map_info *map = mtd->priv;
 263        struct cfi_private *cfi = map->fldrv_priv;
 264
 265        /*
 266         * These flashes report two separate eraseblock regions based on the
 267         * sector_erase-size and block_erase-size, although they both operate on the
 268         * same memory. This is not allowed according to CFI, so we just pick the
 269         * sector_erase-size.
 270         */
 271        cfi->cfiq->NumEraseRegions = 1;
 272}
 273
 274static void fixup_sst39vf(struct mtd_info *mtd)
 275{
 276        struct map_info *map = mtd->priv;
 277        struct cfi_private *cfi = map->fldrv_priv;
 278
 279        fixup_old_sst_eraseregion(mtd);
 280
 281        cfi->addr_unlock1 = 0x5555;
 282        cfi->addr_unlock2 = 0x2AAA;
 283}
 284
 285static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 286{
 287        struct map_info *map = mtd->priv;
 288        struct cfi_private *cfi = map->fldrv_priv;
 289
 290        fixup_old_sst_eraseregion(mtd);
 291
 292        cfi->addr_unlock1 = 0x555;
 293        cfi->addr_unlock2 = 0x2AA;
 294
 295        cfi->sector_erase_cmd = CMD(0x50);
 296}
 297
 298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 299{
 300        struct map_info *map = mtd->priv;
 301        struct cfi_private *cfi = map->fldrv_priv;
 302
 303        fixup_sst39vf_rev_b(mtd);
 304
 305        /*
 306         * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 307         * it should report a size of 8KBytes (0x0020*256).
 308         */
 309        cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 310        pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 311}
 312
 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 314{
 315        struct map_info *map = mtd->priv;
 316        struct cfi_private *cfi = map->fldrv_priv;
 317
 318        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 319                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 320                pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
 321        }
 322}
 323
 324static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 325{
 326        struct map_info *map = mtd->priv;
 327        struct cfi_private *cfi = map->fldrv_priv;
 328
 329        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 330                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 331                pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
 332        }
 333}
 334
 335static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
 336{
 337        struct map_info *map = mtd->priv;
 338        struct cfi_private *cfi = map->fldrv_priv;
 339
 340        /*
 341         *  S29NS512P flash uses more than 8bits to report number of sectors,
 342         * which is not permitted by CFI.
 343         */
 344        cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
 345        pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
 346}
 347
 348/* Used to fix CFI-Tables of chips without Extended Query Tables */
 349static struct cfi_fixup cfi_nopri_fixup_table[] = {
 350        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 351        { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 352        { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 353        { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 354        { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 355        { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 356        { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 357        { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 358        { 0, 0, NULL }
 359};
 360
 361static struct cfi_fixup cfi_fixup_table[] = {
 362        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 363#ifdef AMD_BOOTLOC_BUG
 364        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 365        { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 366        { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 367#endif
 368        { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 369        { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 370        { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 371        { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 372        { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 373        { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 374        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 375        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 376        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 377        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 378        { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 379        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 380        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 381        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 382        { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 383#if !FORCE_WORD_WRITE
 384        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 385#endif
 386        { 0, 0, NULL }
 387};
 388static struct cfi_fixup jedec_fixup_table[] = {
 389        { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 390        { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 391        { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 392        { 0, 0, NULL }
 393};
 394
 395static struct cfi_fixup fixup_table[] = {
 396        /* The CFI vendor ids and the JEDEC vendor IDs appear
 397         * to be common.  It is like the devices id's are as
 398         * well.  This table is to pick all cases where
 399         * we know that is the case.
 400         */
 401        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 402        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 403        { 0, 0, NULL }
 404};
 405
 406
 407static void cfi_fixup_major_minor(struct cfi_private *cfi,
 408                                  struct cfi_pri_amdstd *extp)
 409{
 410        if (cfi->mfr == CFI_MFR_SAMSUNG) {
 411                if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 412                    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 413                        /*
 414                         * Samsung K8P2815UQB and K8D6x16UxM chips
 415                         * report major=0 / minor=0.
 416                         * K8D3x16UxC chips report major=3 / minor=3.
 417                         */
 418                        printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 419                               " Extended Query version to 1.%c\n",
 420                               extp->MinorVersion);
 421                        extp->MajorVersion = '1';
 422                }
 423        }
 424
 425        /*
 426         * SST 38VF640x chips report major=0xFF / minor=0xFF.
 427         */
 428        if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 429                extp->MajorVersion = '1';
 430                extp->MinorVersion = '0';
 431        }
 432}
 433
 434struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 435{
 436        struct cfi_private *cfi = map->fldrv_priv;
 437        struct mtd_info *mtd;
 438        int i;
 439
 440        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 441        if (!mtd) {
 442                printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
 443                return NULL;
 444        }
 445        mtd->priv = map;
 446        mtd->type = MTD_NORFLASH;
 447
 448        /* Fill in the default mtd operations */
 449        mtd->_erase   = cfi_amdstd_erase_varsize;
 450        mtd->_write   = cfi_amdstd_write_words;
 451        mtd->_read    = cfi_amdstd_read;
 452        mtd->_sync    = cfi_amdstd_sync;
 453        mtd->_suspend = cfi_amdstd_suspend;
 454        mtd->_resume  = cfi_amdstd_resume;
 455        mtd->flags   = MTD_CAP_NORFLASH;
 456        mtd->name    = map->name;
 457        mtd->writesize = 1;
 458        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 459
 460        pr_debug("MTD %s(): write buffer size %d\n", __func__,
 461                        mtd->writebufsize);
 462
 463        mtd->_panic_write = cfi_amdstd_panic_write;
 464        mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 465
 466        if (cfi->cfi_mode==CFI_MODE_CFI){
 467                unsigned char bootloc;
 468                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 469                struct cfi_pri_amdstd *extp;
 470
 471                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 472                if (extp) {
 473                        /*
 474                         * It's a real CFI chip, not one for which the probe
 475                         * routine faked a CFI structure.
 476                         */
 477                        cfi_fixup_major_minor(cfi, extp);
 478
 479                        /*
 480                         * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 481                         * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 482                         *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 483                         *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 484                         *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 485                         */
 486                        if (extp->MajorVersion != '1' ||
 487                            (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 488                                printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 489                                       "version %c.%c (%#02x/%#02x).\n",
 490                                       extp->MajorVersion, extp->MinorVersion,
 491                                       extp->MajorVersion, extp->MinorVersion);
 492                                kfree(extp);
 493                                kfree(mtd);
 494                                return NULL;
 495                        }
 496
 497                        printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 498                               extp->MajorVersion, extp->MinorVersion);
 499
 500                        /* Install our own private info structure */
 501                        cfi->cmdset_priv = extp;
 502
 503                        /* Apply cfi device specific fixups */
 504                        cfi_fixup(mtd, cfi_fixup_table);
 505
 506#ifdef DEBUG_CFI_FEATURES
 507                        /* Tell the user about it in lots of lovely detail */
 508                        cfi_tell_features(extp);
 509#endif
 510
 511                        bootloc = extp->TopBottom;
 512                        if ((bootloc < 2) || (bootloc > 5)) {
 513                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 514                                       "bank location (%d). Assuming bottom.\n",
 515                                       map->name, bootloc);
 516                                bootloc = 2;
 517                        }
 518
 519                        if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 520                                printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 521
 522                                for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 523                                        int j = (cfi->cfiq->NumEraseRegions-1)-i;
 524                                        __u32 swap;
 525
 526                                        swap = cfi->cfiq->EraseRegionInfo[i];
 527                                        cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
 528                                        cfi->cfiq->EraseRegionInfo[j] = swap;
 529                                }
 530                        }
 531                        /* Set the default CFI lock/unlock addresses */
 532                        cfi->addr_unlock1 = 0x555;
 533                        cfi->addr_unlock2 = 0x2aa;
 534                }
 535                cfi_fixup(mtd, cfi_nopri_fixup_table);
 536
 537                if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 538                        kfree(mtd);
 539                        return NULL;
 540                }
 541
 542        } /* CFI mode */
 543        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 544                /* Apply jedec specific fixups */
 545                cfi_fixup(mtd, jedec_fixup_table);
 546        }
 547        /* Apply generic fixups */
 548        cfi_fixup(mtd, fixup_table);
 549
 550        for (i=0; i< cfi->numchips; i++) {
 551                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 552                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 553                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 554                cfi->chips[i].ref_point_counter = 0;
 555                init_waitqueue_head(&(cfi->chips[i].wq));
 556        }
 557
 558        map->fldrv = &cfi_amdstd_chipdrv;
 559
 560        return cfi_amdstd_setup(mtd);
 561}
 562struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 563struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 564EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 565EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 566EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 567
 568static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 569{
 570        struct map_info *map = mtd->priv;
 571        struct cfi_private *cfi = map->fldrv_priv;
 572        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 573        unsigned long offset = 0;
 574        int i,j;
 575
 576        printk(KERN_NOTICE "number of %s chips: %d\n",
 577               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 578        /* Select the correct geometry setup */
 579        mtd->size = devsize * cfi->numchips;
 580
 581        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 582        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 583                                    * mtd->numeraseregions, GFP_KERNEL);
 584        if (!mtd->eraseregions) {
 585                printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
 586                goto setup_err;
 587        }
 588
 589        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 590                unsigned long ernum, ersize;
 591                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 592                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 593
 594                if (mtd->erasesize < ersize) {
 595                        mtd->erasesize = ersize;
 596                }
 597                for (j=0; j<cfi->numchips; j++) {
 598                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 599                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 600                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 601                }
 602                offset += (ersize * ernum);
 603        }
 604        if (offset != devsize) {
 605                /* Argh */
 606                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 607                goto setup_err;
 608        }
 609
 610        __module_get(THIS_MODULE);
 611        register_reboot_notifier(&mtd->reboot_notifier);
 612        return mtd;
 613
 614 setup_err:
 615        kfree(mtd->eraseregions);
 616        kfree(mtd);
 617        kfree(cfi->cmdset_priv);
 618        kfree(cfi->cfiq);
 619        return NULL;
 620}
 621
 622/*
 623 * Return true if the chip is ready.
 624 *
 625 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 626 * non-suspended sector) and is indicated by no toggle bits toggling.
 627 *
 628 * Note that anything more complicated than checking if no bits are toggling
 629 * (including checking DQ5 for an error status) is tricky to get working
 630 * correctly and is therefore not done  (particularly with interleaved chips
 631 * as each chip must be checked independently of the others).
 632 */
 633static int __xipram chip_ready(struct map_info *map, unsigned long addr)
 634{
 635        map_word d, t;
 636
 637        d = map_read(map, addr);
 638        t = map_read(map, addr);
 639
 640        return map_word_equal(map, d, t);
 641}
 642
 643/*
 644 * Return true if the chip is ready and has the correct value.
 645 *
 646 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 647 * non-suspended sector) and it is indicated by no bits toggling.
 648 *
 649 * Error are indicated by toggling bits or bits held with the wrong value,
 650 * or with bits toggling.
 651 *
 652 * Note that anything more complicated than checking if no bits are toggling
 653 * (including checking DQ5 for an error status) is tricky to get working
 654 * correctly and is therefore not done  (particularly with interleaved chips
 655 * as each chip must be checked independently of the others).
 656 *
 657 */
 658static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
 659{
 660        map_word oldd, curd;
 661
 662        oldd = map_read(map, addr);
 663        curd = map_read(map, addr);
 664
 665        return  map_word_equal(map, oldd, curd) &&
 666                map_word_equal(map, curd, expected);
 667}
 668
 669static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 670{
 671        DECLARE_WAITQUEUE(wait, current);
 672        struct cfi_private *cfi = map->fldrv_priv;
 673        unsigned long timeo;
 674        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 675
 676 resettime:
 677        timeo = jiffies + HZ;
 678 retry:
 679        switch (chip->state) {
 680
 681        case FL_STATUS:
 682                for (;;) {
 683                        if (chip_ready(map, adr))
 684                                break;
 685
 686                        if (time_after(jiffies, timeo)) {
 687                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 688                                return -EIO;
 689                        }
 690                        mutex_unlock(&chip->mutex);
 691                        cfi_udelay(1);
 692                        mutex_lock(&chip->mutex);
 693                        /* Someone else might have been playing with it. */
 694                        goto retry;
 695                }
 696
 697        case FL_READY:
 698        case FL_CFI_QUERY:
 699        case FL_JEDEC_QUERY:
 700                return 0;
 701
 702        case FL_ERASING:
 703                if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 704                    !(mode == FL_READY || mode == FL_POINT ||
 705                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 706                        goto sleep;
 707
 708                /* We could check to see if we're trying to access the sector
 709                 * that is currently being erased. However, no user will try
 710                 * anything like that so we just wait for the timeout. */
 711
 712                /* Erase suspend */
 713                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 714                 * commands when the erase algorithm isn't in progress. */
 715                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 716                chip->oldstate = FL_ERASING;
 717                chip->state = FL_ERASE_SUSPENDING;
 718                chip->erase_suspended = 1;
 719                for (;;) {
 720                        if (chip_ready(map, adr))
 721                                break;
 722
 723                        if (time_after(jiffies, timeo)) {
 724                                /* Should have suspended the erase by now.
 725                                 * Send an Erase-Resume command as either
 726                                 * there was an error (so leave the erase
 727                                 * routine to recover from it) or we trying to
 728                                 * use the erase-in-progress sector. */
 729                                put_chip(map, chip, adr);
 730                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 731                                return -EIO;
 732                        }
 733
 734                        mutex_unlock(&chip->mutex);
 735                        cfi_udelay(1);
 736                        mutex_lock(&chip->mutex);
 737                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 738                           So we can just loop here. */
 739                }
 740                chip->state = FL_READY;
 741                return 0;
 742
 743        case FL_XIP_WHILE_ERASING:
 744                if (mode != FL_READY && mode != FL_POINT &&
 745                    (!cfip || !(cfip->EraseSuspend&2)))
 746                        goto sleep;
 747                chip->oldstate = chip->state;
 748                chip->state = FL_READY;
 749                return 0;
 750
 751        case FL_SHUTDOWN:
 752                /* The machine is rebooting */
 753                return -EIO;
 754
 755        case FL_POINT:
 756                /* Only if there's no operation suspended... */
 757                if (mode == FL_READY && chip->oldstate == FL_READY)
 758                        return 0;
 759
 760        default:
 761        sleep:
 762                set_current_state(TASK_UNINTERRUPTIBLE);
 763                add_wait_queue(&chip->wq, &wait);
 764                mutex_unlock(&chip->mutex);
 765                schedule();
 766                remove_wait_queue(&chip->wq, &wait);
 767                mutex_lock(&chip->mutex);
 768                goto resettime;
 769        }
 770}
 771
 772
 773static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 774{
 775        struct cfi_private *cfi = map->fldrv_priv;
 776
 777        switch(chip->oldstate) {
 778        case FL_ERASING:
 779                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 780                chip->oldstate = FL_READY;
 781                chip->state = FL_ERASING;
 782                break;
 783
 784        case FL_XIP_WHILE_ERASING:
 785                chip->state = chip->oldstate;
 786                chip->oldstate = FL_READY;
 787                break;
 788
 789        case FL_READY:
 790        case FL_STATUS:
 791                break;
 792        default:
 793                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
 794        }
 795        wake_up(&chip->wq);
 796}
 797
 798#ifdef CONFIG_MTD_XIP
 799
 800/*
 801 * No interrupt what so ever can be serviced while the flash isn't in array
 802 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 803 * enclosing any code path where the flash is known not to be in array mode.
 804 * And within a XIP disabled code path, only functions marked with __xipram
 805 * may be called and nothing else (it's a good thing to inspect generated
 806 * assembly to make sure inline functions were actually inlined and that gcc
 807 * didn't emit calls to its own support functions). Also configuring MTD CFI
 808 * support to a single buswidth and a single interleave is also recommended.
 809 */
 810
 811static void xip_disable(struct map_info *map, struct flchip *chip,
 812                        unsigned long adr)
 813{
 814        /* TODO: chips with no XIP use should ignore and return */
 815        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 816        local_irq_disable();
 817}
 818
 819static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 820                                unsigned long adr)
 821{
 822        struct cfi_private *cfi = map->fldrv_priv;
 823
 824        if (chip->state != FL_POINT && chip->state != FL_READY) {
 825                map_write(map, CMD(0xf0), adr);
 826                chip->state = FL_READY;
 827        }
 828        (void) map_read(map, adr);
 829        xip_iprefetch();
 830        local_irq_enable();
 831}
 832
 833/*
 834 * When a delay is required for the flash operation to complete, the
 835 * xip_udelay() function is polling for both the given timeout and pending
 836 * (but still masked) hardware interrupts.  Whenever there is an interrupt
 837 * pending then the flash erase operation is suspended, array mode restored
 838 * and interrupts unmasked.  Task scheduling might also happen at that
 839 * point.  The CPU eventually returns from the interrupt or the call to
 840 * schedule() and the suspended flash operation is resumed for the remaining
 841 * of the delay period.
 842 *
 843 * Warning: this function _will_ fool interrupt latency tracing tools.
 844 */
 845
 846static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
 847                                unsigned long adr, int usec)
 848{
 849        struct cfi_private *cfi = map->fldrv_priv;
 850        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 851        map_word status, OK = CMD(0x80);
 852        unsigned long suspended, start = xip_currtime();
 853        flstate_t oldstate;
 854
 855        do {
 856                cpu_relax();
 857                if (xip_irqpending() && extp &&
 858                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
 859                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 860                        /*
 861                         * Let's suspend the erase operation when supported.
 862                         * Note that we currently don't try to suspend
 863                         * interleaved chips if there is already another
 864                         * operation suspended (imagine what happens
 865                         * when one chip was already done with the current
 866                         * operation while another chip suspended it, then
 867                         * we resume the whole thing at once).  Yes, it
 868                         * can happen!
 869                         */
 870                        map_write(map, CMD(0xb0), adr);
 871                        usec -= xip_elapsed_since(start);
 872                        suspended = xip_currtime();
 873                        do {
 874                                if (xip_elapsed_since(suspended) > 100000) {
 875                                        /*
 876                                         * The chip doesn't want to suspend
 877                                         * after waiting for 100 msecs.
 878                                         * This is a critical error but there
 879                                         * is not much we can do here.
 880                                         */
 881                                        return;
 882                                }
 883                                status = map_read(map, adr);
 884                        } while (!map_word_andequal(map, status, OK, OK));
 885
 886                        /* Suspend succeeded */
 887                        oldstate = chip->state;
 888                        if (!map_word_bitsset(map, status, CMD(0x40)))
 889                                break;
 890                        chip->state = FL_XIP_WHILE_ERASING;
 891                        chip->erase_suspended = 1;
 892                        map_write(map, CMD(0xf0), adr);
 893                        (void) map_read(map, adr);
 894                        xip_iprefetch();
 895                        local_irq_enable();
 896                        mutex_unlock(&chip->mutex);
 897                        xip_iprefetch();
 898                        cond_resched();
 899
 900                        /*
 901                         * We're back.  However someone else might have
 902                         * decided to go write to the chip if we are in
 903                         * a suspended erase state.  If so let's wait
 904                         * until it's done.
 905                         */
 906                        mutex_lock(&chip->mutex);
 907                        while (chip->state != FL_XIP_WHILE_ERASING) {
 908                                DECLARE_WAITQUEUE(wait, current);
 909                                set_current_state(TASK_UNINTERRUPTIBLE);
 910                                add_wait_queue(&chip->wq, &wait);
 911                                mutex_unlock(&chip->mutex);
 912                                schedule();
 913                                remove_wait_queue(&chip->wq, &wait);
 914                                mutex_lock(&chip->mutex);
 915                        }
 916                        /* Disallow XIP again */
 917                        local_irq_disable();
 918
 919                        /* Resume the write or erase operation */
 920                        map_write(map, cfi->sector_erase_cmd, adr);
 921                        chip->state = oldstate;
 922                        start = xip_currtime();
 923                } else if (usec >= 1000000/HZ) {
 924                        /*
 925                         * Try to save on CPU power when waiting delay
 926                         * is at least a system timer tick period.
 927                         * No need to be extremely accurate here.
 928                         */
 929                        xip_cpu_idle();
 930                }
 931                status = map_read(map, adr);
 932        } while (!map_word_andequal(map, status, OK, OK)
 933                 && xip_elapsed_since(start) < usec);
 934}
 935
 936#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
 937
 938/*
 939 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
 940 * the flash is actively programming or erasing since we have to poll for
 941 * the operation to complete anyway.  We can't do that in a generic way with
 942 * a XIP setup so do it before the actual flash operation in this case
 943 * and stub it out from INVALIDATE_CACHE_UDELAY.
 944 */
 945#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
 946        INVALIDATE_CACHED_RANGE(map, from, size)
 947
 948#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 949        UDELAY(map, chip, adr, usec)
 950
 951/*
 952 * Extra notes:
 953 *
 954 * Activating this XIP support changes the way the code works a bit.  For
 955 * example the code to suspend the current process when concurrent access
 956 * happens is never executed because xip_udelay() will always return with the
 957 * same chip state as it was entered with.  This is why there is no care for
 958 * the presence of add_wait_queue() or schedule() calls from within a couple
 959 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
 960 * The queueing and scheduling are always happening within xip_udelay().
 961 *
 962 * Similarly, get_chip() and put_chip() just happen to always be executed
 963 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
 964 * is in array mode, therefore never executing many cases therein and not
 965 * causing any problem with XIP.
 966 */
 967
 968#else
 969
 970#define xip_disable(map, chip, adr)
 971#define xip_enable(map, chip, adr)
 972#define XIP_INVAL_CACHED_RANGE(x...)
 973
 974#define UDELAY(map, chip, adr, usec)  \
 975do {  \
 976        mutex_unlock(&chip->mutex);  \
 977        cfi_udelay(usec);  \
 978        mutex_lock(&chip->mutex);  \
 979} while (0)
 980
 981#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
 982do {  \
 983        mutex_unlock(&chip->mutex);  \
 984        INVALIDATE_CACHED_RANGE(map, adr, len);  \
 985        cfi_udelay(usec);  \
 986        mutex_lock(&chip->mutex);  \
 987} while (0)
 988
 989#endif
 990
 991static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 992{
 993        unsigned long cmd_addr;
 994        struct cfi_private *cfi = map->fldrv_priv;
 995        int ret;
 996
 997        adr += chip->start;
 998
 999        /* Ensure cmd read/writes are aligned. */
1000        cmd_addr = adr & ~(map_bankwidth(map)-1);
1001
1002        mutex_lock(&chip->mutex);
1003        ret = get_chip(map, chip, cmd_addr, FL_READY);
1004        if (ret) {
1005                mutex_unlock(&chip->mutex);
1006                return ret;
1007        }
1008
1009        if (chip->state != FL_POINT && chip->state != FL_READY) {
1010                map_write(map, CMD(0xf0), cmd_addr);
1011                chip->state = FL_READY;
1012        }
1013
1014        map_copy_from(map, buf, adr, len);
1015
1016        put_chip(map, chip, cmd_addr);
1017
1018        mutex_unlock(&chip->mutex);
1019        return 0;
1020}
1021
1022
1023static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1024{
1025        struct map_info *map = mtd->priv;
1026        struct cfi_private *cfi = map->fldrv_priv;
1027        unsigned long ofs;
1028        int chipnum;
1029        int ret = 0;
1030
1031        /* ofs: offset within the first chip that the first read should start */
1032        chipnum = (from >> cfi->chipshift);
1033        ofs = from - (chipnum <<  cfi->chipshift);
1034
1035        while (len) {
1036                unsigned long thislen;
1037
1038                if (chipnum >= cfi->numchips)
1039                        break;
1040
1041                if ((len + ofs -1) >> cfi->chipshift)
1042                        thislen = (1<<cfi->chipshift) - ofs;
1043                else
1044                        thislen = len;
1045
1046                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1047                if (ret)
1048                        break;
1049
1050                *retlen += thislen;
1051                len -= thislen;
1052                buf += thislen;
1053
1054                ofs = 0;
1055                chipnum++;
1056        }
1057        return ret;
1058}
1059
1060
1061static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1062{
1063        DECLARE_WAITQUEUE(wait, current);
1064        unsigned long timeo = jiffies + HZ;
1065        struct cfi_private *cfi = map->fldrv_priv;
1066
1067 retry:
1068        mutex_lock(&chip->mutex);
1069
1070        if (chip->state != FL_READY){
1071                set_current_state(TASK_UNINTERRUPTIBLE);
1072                add_wait_queue(&chip->wq, &wait);
1073
1074                mutex_unlock(&chip->mutex);
1075
1076                schedule();
1077                remove_wait_queue(&chip->wq, &wait);
1078                timeo = jiffies + HZ;
1079
1080                goto retry;
1081        }
1082
1083        adr += chip->start;
1084
1085        chip->state = FL_READY;
1086
1087        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1088        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1089        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1090
1091        map_copy_from(map, buf, adr, len);
1092
1093        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1094        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1095        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1096        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1097
1098        wake_up(&chip->wq);
1099        mutex_unlock(&chip->mutex);
1100
1101        return 0;
1102}
1103
1104static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1105{
1106        struct map_info *map = mtd->priv;
1107        struct cfi_private *cfi = map->fldrv_priv;
1108        unsigned long ofs;
1109        int chipnum;
1110        int ret = 0;
1111
1112        /* ofs: offset within the first chip that the first read should start */
1113        /* 8 secsi bytes per chip */
1114        chipnum=from>>3;
1115        ofs=from & 7;
1116
1117        while (len) {
1118                unsigned long thislen;
1119
1120                if (chipnum >= cfi->numchips)
1121                        break;
1122
1123                if ((len + ofs -1) >> 3)
1124                        thislen = (1<<3) - ofs;
1125                else
1126                        thislen = len;
1127
1128                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1129                if (ret)
1130                        break;
1131
1132                *retlen += thislen;
1133                len -= thislen;
1134                buf += thislen;
1135
1136                ofs = 0;
1137                chipnum++;
1138        }
1139        return ret;
1140}
1141
1142
1143static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1144{
1145        struct cfi_private *cfi = map->fldrv_priv;
1146        unsigned long timeo = jiffies + HZ;
1147        /*
1148         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1149         * have a max write time of a few hundreds usec). However, we should
1150         * use the maximum timeout value given by the chip at probe time
1151         * instead.  Unfortunately, struct flchip does have a field for
1152         * maximum timeout, only for typical which can be far too short
1153         * depending of the conditions.  The ' + 1' is to avoid having a
1154         * timeout of 0 jiffies if HZ is smaller than 1000.
1155         */
1156        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1157        int ret = 0;
1158        map_word oldd;
1159        int retry_cnt = 0;
1160
1161        adr += chip->start;
1162
1163        mutex_lock(&chip->mutex);
1164        ret = get_chip(map, chip, adr, FL_WRITING);
1165        if (ret) {
1166                mutex_unlock(&chip->mutex);
1167                return ret;
1168        }
1169
1170        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1171               __func__, adr, datum.x[0] );
1172
1173        /*
1174         * Check for a NOP for the case when the datum to write is already
1175         * present - it saves time and works around buggy chips that corrupt
1176         * data at other locations when 0xff is written to a location that
1177         * already contains 0xff.
1178         */
1179        oldd = map_read(map, adr);
1180        if (map_word_equal(map, oldd, datum)) {
1181                pr_debug("MTD %s(): NOP\n",
1182                       __func__);
1183                goto op_done;
1184        }
1185
1186        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1187        ENABLE_VPP(map);
1188        xip_disable(map, chip, adr);
1189 retry:
1190        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1191        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1192        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1193        map_write(map, datum, adr);
1194        chip->state = FL_WRITING;
1195
1196        INVALIDATE_CACHE_UDELAY(map, chip,
1197                                adr, map_bankwidth(map),
1198                                chip->word_write_time);
1199
1200        /* See comment above for timeout value. */
1201        timeo = jiffies + uWriteTimeout;
1202        for (;;) {
1203                if (chip->state != FL_WRITING) {
1204                        /* Someone's suspended the write. Sleep */
1205                        DECLARE_WAITQUEUE(wait, current);
1206
1207                        set_current_state(TASK_UNINTERRUPTIBLE);
1208                        add_wait_queue(&chip->wq, &wait);
1209                        mutex_unlock(&chip->mutex);
1210                        schedule();
1211                        remove_wait_queue(&chip->wq, &wait);
1212                        timeo = jiffies + (HZ / 2); /* FIXME */
1213                        mutex_lock(&chip->mutex);
1214                        continue;
1215                }
1216
1217                if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1218                        xip_enable(map, chip, adr);
1219                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1220                        xip_disable(map, chip, adr);
1221                        break;
1222                }
1223
1224                if (chip_ready(map, adr))
1225                        break;
1226
1227                /* Latency issues. Drop the lock, wait a while and retry */
1228                UDELAY(map, chip, adr, 1);
1229        }
1230        /* Did we succeed? */
1231        if (!chip_good(map, adr, datum)) {
1232                /* reset on all failures. */
1233                map_write( map, CMD(0xF0), chip->start );
1234                /* FIXME - should have reset delay before continuing */
1235
1236                if (++retry_cnt <= MAX_WORD_RETRIES)
1237                        goto retry;
1238
1239                ret = -EIO;
1240        }
1241        xip_enable(map, chip, adr);
1242 op_done:
1243        chip->state = FL_READY;
1244        DISABLE_VPP(map);
1245        put_chip(map, chip, adr);
1246        mutex_unlock(&chip->mutex);
1247
1248        return ret;
1249}
1250
1251
1252static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1253                                  size_t *retlen, const u_char *buf)
1254{
1255        struct map_info *map = mtd->priv;
1256        struct cfi_private *cfi = map->fldrv_priv;
1257        int ret = 0;
1258        int chipnum;
1259        unsigned long ofs, chipstart;
1260        DECLARE_WAITQUEUE(wait, current);
1261
1262        chipnum = to >> cfi->chipshift;
1263        ofs = to  - (chipnum << cfi->chipshift);
1264        chipstart = cfi->chips[chipnum].start;
1265
1266        /* If it's not bus-aligned, do the first byte write */
1267        if (ofs & (map_bankwidth(map)-1)) {
1268                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1269                int i = ofs - bus_ofs;
1270                int n = 0;
1271                map_word tmp_buf;
1272
1273 retry:
1274                mutex_lock(&cfi->chips[chipnum].mutex);
1275
1276                if (cfi->chips[chipnum].state != FL_READY) {
1277                        set_current_state(TASK_UNINTERRUPTIBLE);
1278                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1279
1280                        mutex_unlock(&cfi->chips[chipnum].mutex);
1281
1282                        schedule();
1283                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1284                        goto retry;
1285                }
1286
1287                /* Load 'tmp_buf' with old contents of flash */
1288                tmp_buf = map_read(map, bus_ofs+chipstart);
1289
1290                mutex_unlock(&cfi->chips[chipnum].mutex);
1291
1292                /* Number of bytes to copy from buffer */
1293                n = min_t(int, len, map_bankwidth(map)-i);
1294
1295                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1296
1297                ret = do_write_oneword(map, &cfi->chips[chipnum],
1298                                       bus_ofs, tmp_buf);
1299                if (ret)
1300                        return ret;
1301
1302                ofs += n;
1303                buf += n;
1304                (*retlen) += n;
1305                len -= n;
1306
1307                if (ofs >> cfi->chipshift) {
1308                        chipnum ++;
1309                        ofs = 0;
1310                        if (chipnum == cfi->numchips)
1311                                return 0;
1312                }
1313        }
1314
1315        /* We are now aligned, write as much as possible */
1316        while(len >= map_bankwidth(map)) {
1317                map_word datum;
1318
1319                datum = map_word_load(map, buf);
1320
1321                ret = do_write_oneword(map, &cfi->chips[chipnum],
1322                                       ofs, datum);
1323                if (ret)
1324                        return ret;
1325
1326                ofs += map_bankwidth(map);
1327                buf += map_bankwidth(map);
1328                (*retlen) += map_bankwidth(map);
1329                len -= map_bankwidth(map);
1330
1331                if (ofs >> cfi->chipshift) {
1332                        chipnum ++;
1333                        ofs = 0;
1334                        if (chipnum == cfi->numchips)
1335                                return 0;
1336                        chipstart = cfi->chips[chipnum].start;
1337                }
1338        }
1339
1340        /* Write the trailing bytes if any */
1341        if (len & (map_bankwidth(map)-1)) {
1342                map_word tmp_buf;
1343
1344 retry1:
1345                mutex_lock(&cfi->chips[chipnum].mutex);
1346
1347                if (cfi->chips[chipnum].state != FL_READY) {
1348                        set_current_state(TASK_UNINTERRUPTIBLE);
1349                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1350
1351                        mutex_unlock(&cfi->chips[chipnum].mutex);
1352
1353                        schedule();
1354                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1355                        goto retry1;
1356                }
1357
1358                tmp_buf = map_read(map, ofs + chipstart);
1359
1360                mutex_unlock(&cfi->chips[chipnum].mutex);
1361
1362                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1363
1364                ret = do_write_oneword(map, &cfi->chips[chipnum],
1365                                ofs, tmp_buf);
1366                if (ret)
1367                        return ret;
1368
1369                (*retlen) += len;
1370        }
1371
1372        return 0;
1373}
1374
1375
1376/*
1377 * FIXME: interleaved mode not tested, and probably not supported!
1378 */
1379static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1380                                    unsigned long adr, const u_char *buf,
1381                                    int len)
1382{
1383        struct cfi_private *cfi = map->fldrv_priv;
1384        unsigned long timeo = jiffies + HZ;
1385        /* see comments in do_write_oneword() regarding uWriteTimeo. */
1386        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1387        int ret = -EIO;
1388        unsigned long cmd_adr;
1389        int z, words;
1390        map_word datum;
1391
1392        adr += chip->start;
1393        cmd_adr = adr;
1394
1395        mutex_lock(&chip->mutex);
1396        ret = get_chip(map, chip, adr, FL_WRITING);
1397        if (ret) {
1398                mutex_unlock(&chip->mutex);
1399                return ret;
1400        }
1401
1402        datum = map_word_load(map, buf);
1403
1404        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1405               __func__, adr, datum.x[0] );
1406
1407        XIP_INVAL_CACHED_RANGE(map, adr, len);
1408        ENABLE_VPP(map);
1409        xip_disable(map, chip, cmd_adr);
1410
1411        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1412        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1413
1414        /* Write Buffer Load */
1415        map_write(map, CMD(0x25), cmd_adr);
1416
1417        chip->state = FL_WRITING_TO_BUFFER;
1418
1419        /* Write length of data to come */
1420        words = len / map_bankwidth(map);
1421        map_write(map, CMD(words - 1), cmd_adr);
1422        /* Write data */
1423        z = 0;
1424        while(z < words * map_bankwidth(map)) {
1425                datum = map_word_load(map, buf);
1426                map_write(map, datum, adr + z);
1427
1428                z += map_bankwidth(map);
1429                buf += map_bankwidth(map);
1430        }
1431        z -= map_bankwidth(map);
1432
1433        adr += z;
1434
1435        /* Write Buffer Program Confirm: GO GO GO */
1436        map_write(map, CMD(0x29), cmd_adr);
1437        chip->state = FL_WRITING;
1438
1439        INVALIDATE_CACHE_UDELAY(map, chip,
1440                                adr, map_bankwidth(map),
1441                                chip->word_write_time);
1442
1443        timeo = jiffies + uWriteTimeout;
1444
1445        for (;;) {
1446                if (chip->state != FL_WRITING) {
1447                        /* Someone's suspended the write. Sleep */
1448                        DECLARE_WAITQUEUE(wait, current);
1449
1450                        set_current_state(TASK_UNINTERRUPTIBLE);
1451                        add_wait_queue(&chip->wq, &wait);
1452                        mutex_unlock(&chip->mutex);
1453                        schedule();
1454                        remove_wait_queue(&chip->wq, &wait);
1455                        timeo = jiffies + (HZ / 2); /* FIXME */
1456                        mutex_lock(&chip->mutex);
1457                        continue;
1458                }
1459
1460                if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1461                        break;
1462
1463                if (chip_ready(map, adr)) {
1464                        xip_enable(map, chip, adr);
1465                        goto op_done;
1466                }
1467
1468                /* Latency issues. Drop the lock, wait a while and retry */
1469                UDELAY(map, chip, adr, 1);
1470        }
1471
1472        /* reset on all failures. */
1473        map_write( map, CMD(0xF0), chip->start );
1474        xip_enable(map, chip, adr);
1475        /* FIXME - should have reset delay before continuing */
1476
1477        printk(KERN_WARNING "MTD %s(): software timeout\n",
1478               __func__ );
1479
1480        ret = -EIO;
1481 op_done:
1482        chip->state = FL_READY;
1483        DISABLE_VPP(map);
1484        put_chip(map, chip, adr);
1485        mutex_unlock(&chip->mutex);
1486
1487        return ret;
1488}
1489
1490
1491static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1492                                    size_t *retlen, const u_char *buf)
1493{
1494        struct map_info *map = mtd->priv;
1495        struct cfi_private *cfi = map->fldrv_priv;
1496        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1497        int ret = 0;
1498        int chipnum;
1499        unsigned long ofs;
1500
1501        chipnum = to >> cfi->chipshift;
1502        ofs = to  - (chipnum << cfi->chipshift);
1503
1504        /* If it's not bus-aligned, do the first word write */
1505        if (ofs & (map_bankwidth(map)-1)) {
1506                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1507                if (local_len > len)
1508                        local_len = len;
1509                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1510                                             local_len, retlen, buf);
1511                if (ret)
1512                        return ret;
1513                ofs += local_len;
1514                buf += local_len;
1515                len -= local_len;
1516
1517                if (ofs >> cfi->chipshift) {
1518                        chipnum ++;
1519                        ofs = 0;
1520                        if (chipnum == cfi->numchips)
1521                                return 0;
1522                }
1523        }
1524
1525        /* Write buffer is worth it only if more than one word to write... */
1526        while (len >= map_bankwidth(map) * 2) {
1527                /* We must not cross write block boundaries */
1528                int size = wbufsize - (ofs & (wbufsize-1));
1529
1530                if (size > len)
1531                        size = len;
1532                if (size % map_bankwidth(map))
1533                        size -= size % map_bankwidth(map);
1534
1535                ret = do_write_buffer(map, &cfi->chips[chipnum],
1536                                      ofs, buf, size);
1537                if (ret)
1538                        return ret;
1539
1540                ofs += size;
1541                buf += size;
1542                (*retlen) += size;
1543                len -= size;
1544
1545                if (ofs >> cfi->chipshift) {
1546                        chipnum ++;
1547                        ofs = 0;
1548                        if (chipnum == cfi->numchips)
1549                                return 0;
1550                }
1551        }
1552
1553        if (len) {
1554                size_t retlen_dregs = 0;
1555
1556                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1557                                             len, &retlen_dregs, buf);
1558
1559                *retlen += retlen_dregs;
1560                return ret;
1561        }
1562
1563        return 0;
1564}
1565
1566/*
1567 * Wait for the flash chip to become ready to write data
1568 *
1569 * This is only called during the panic_write() path. When panic_write()
1570 * is called, the kernel is in the process of a panic, and will soon be
1571 * dead. Therefore we don't take any locks, and attempt to get access
1572 * to the chip as soon as possible.
1573 */
1574static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1575                                 unsigned long adr)
1576{
1577        struct cfi_private *cfi = map->fldrv_priv;
1578        int retries = 10;
1579        int i;
1580
1581        /*
1582         * If the driver thinks the chip is idle, and no toggle bits
1583         * are changing, then the chip is actually idle for sure.
1584         */
1585        if (chip->state == FL_READY && chip_ready(map, adr))
1586                return 0;
1587
1588        /*
1589         * Try several times to reset the chip and then wait for it
1590         * to become idle. The upper limit of a few milliseconds of
1591         * delay isn't a big problem: the kernel is dying anyway. It
1592         * is more important to save the messages.
1593         */
1594        while (retries > 0) {
1595                const unsigned long timeo = (HZ / 1000) + 1;
1596
1597                /* send the reset command */
1598                map_write(map, CMD(0xF0), chip->start);
1599
1600                /* wait for the chip to become ready */
1601                for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1602                        if (chip_ready(map, adr))
1603                                return 0;
1604
1605                        udelay(1);
1606                }
1607        }
1608
1609        /* the chip never became ready */
1610        return -EBUSY;
1611}
1612
1613/*
1614 * Write out one word of data to a single flash chip during a kernel panic
1615 *
1616 * This is only called during the panic_write() path. When panic_write()
1617 * is called, the kernel is in the process of a panic, and will soon be
1618 * dead. Therefore we don't take any locks, and attempt to get access
1619 * to the chip as soon as possible.
1620 *
1621 * The implementation of this routine is intentionally similar to
1622 * do_write_oneword(), in order to ease code maintenance.
1623 */
1624static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1625                                  unsigned long adr, map_word datum)
1626{
1627        const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1628        struct cfi_private *cfi = map->fldrv_priv;
1629        int retry_cnt = 0;
1630        map_word oldd;
1631        int ret = 0;
1632        int i;
1633
1634        adr += chip->start;
1635
1636        ret = cfi_amdstd_panic_wait(map, chip, adr);
1637        if (ret)
1638                return ret;
1639
1640        pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1641                        __func__, adr, datum.x[0]);
1642
1643        /*
1644         * Check for a NOP for the case when the datum to write is already
1645         * present - it saves time and works around buggy chips that corrupt
1646         * data at other locations when 0xff is written to a location that
1647         * already contains 0xff.
1648         */
1649        oldd = map_read(map, adr);
1650        if (map_word_equal(map, oldd, datum)) {
1651                pr_debug("MTD %s(): NOP\n", __func__);
1652                goto op_done;
1653        }
1654
1655        ENABLE_VPP(map);
1656
1657retry:
1658        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1659        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1660        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1661        map_write(map, datum, adr);
1662
1663        for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1664                if (chip_ready(map, adr))
1665                        break;
1666
1667                udelay(1);
1668        }
1669
1670        if (!chip_good(map, adr, datum)) {
1671                /* reset on all failures. */
1672                map_write(map, CMD(0xF0), chip->start);
1673                /* FIXME - should have reset delay before continuing */
1674
1675                if (++retry_cnt <= MAX_WORD_RETRIES)
1676                        goto retry;
1677
1678                ret = -EIO;
1679        }
1680
1681op_done:
1682        DISABLE_VPP(map);
1683        return ret;
1684}
1685
1686/*
1687 * Write out some data during a kernel panic
1688 *
1689 * This is used by the mtdoops driver to save the dying messages from a
1690 * kernel which has panic'd.
1691 *
1692 * This routine ignores all of the locking used throughout the rest of the
1693 * driver, in order to ensure that the data gets written out no matter what
1694 * state this driver (and the flash chip itself) was in when the kernel crashed.
1695 *
1696 * The implementation of this routine is intentionally similar to
1697 * cfi_amdstd_write_words(), in order to ease code maintenance.
1698 */
1699static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1700                                  size_t *retlen, const u_char *buf)
1701{
1702        struct map_info *map = mtd->priv;
1703        struct cfi_private *cfi = map->fldrv_priv;
1704        unsigned long ofs, chipstart;
1705        int ret = 0;
1706        int chipnum;
1707
1708        chipnum = to >> cfi->chipshift;
1709        ofs = to - (chipnum << cfi->chipshift);
1710        chipstart = cfi->chips[chipnum].start;
1711
1712        /* If it's not bus aligned, do the first byte write */
1713        if (ofs & (map_bankwidth(map) - 1)) {
1714                unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1715                int i = ofs - bus_ofs;
1716                int n = 0;
1717                map_word tmp_buf;
1718
1719                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1720                if (ret)
1721                        return ret;
1722
1723                /* Load 'tmp_buf' with old contents of flash */
1724                tmp_buf = map_read(map, bus_ofs + chipstart);
1725
1726                /* Number of bytes to copy from buffer */
1727                n = min_t(int, len, map_bankwidth(map) - i);
1728
1729                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1730
1731                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1732                                             bus_ofs, tmp_buf);
1733                if (ret)
1734                        return ret;
1735
1736                ofs += n;
1737                buf += n;
1738                (*retlen) += n;
1739                len -= n;
1740
1741                if (ofs >> cfi->chipshift) {
1742                        chipnum++;
1743                        ofs = 0;
1744                        if (chipnum == cfi->numchips)
1745                                return 0;
1746                }
1747        }
1748
1749        /* We are now aligned, write as much as possible */
1750        while (len >= map_bankwidth(map)) {
1751                map_word datum;
1752
1753                datum = map_word_load(map, buf);
1754
1755                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1756                                             ofs, datum);
1757                if (ret)
1758                        return ret;
1759
1760                ofs += map_bankwidth(map);
1761                buf += map_bankwidth(map);
1762                (*retlen) += map_bankwidth(map);
1763                len -= map_bankwidth(map);
1764
1765                if (ofs >> cfi->chipshift) {
1766                        chipnum++;
1767                        ofs = 0;
1768                        if (chipnum == cfi->numchips)
1769                                return 0;
1770
1771                        chipstart = cfi->chips[chipnum].start;
1772                }
1773        }
1774
1775        /* Write the trailing bytes if any */
1776        if (len & (map_bankwidth(map) - 1)) {
1777                map_word tmp_buf;
1778
1779                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1780                if (ret)
1781                        return ret;
1782
1783                tmp_buf = map_read(map, ofs + chipstart);
1784
1785                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1786
1787                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1788                                             ofs, tmp_buf);
1789                if (ret)
1790                        return ret;
1791
1792                (*retlen) += len;
1793        }
1794
1795        return 0;
1796}
1797
1798
1799/*
1800 * Handle devices with one erase region, that only implement
1801 * the chip erase command.
1802 */
1803static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1804{
1805        struct cfi_private *cfi = map->fldrv_priv;
1806        unsigned long timeo = jiffies + HZ;
1807        unsigned long int adr;
1808        DECLARE_WAITQUEUE(wait, current);
1809        int ret = 0;
1810
1811        adr = cfi->addr_unlock1;
1812
1813        mutex_lock(&chip->mutex);
1814        ret = get_chip(map, chip, adr, FL_WRITING);
1815        if (ret) {
1816                mutex_unlock(&chip->mutex);
1817                return ret;
1818        }
1819
1820        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1821               __func__, chip->start );
1822
1823        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1824        ENABLE_VPP(map);
1825        xip_disable(map, chip, adr);
1826
1827        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1828        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1829        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1830        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1831        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1832        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1833
1834        chip->state = FL_ERASING;
1835        chip->erase_suspended = 0;
1836        chip->in_progress_block_addr = adr;
1837
1838        INVALIDATE_CACHE_UDELAY(map, chip,
1839                                adr, map->size,
1840                                chip->erase_time*500);
1841
1842        timeo = jiffies + (HZ*20);
1843
1844        for (;;) {
1845                if (chip->state != FL_ERASING) {
1846                        /* Someone's suspended the erase. Sleep */
1847                        set_current_state(TASK_UNINTERRUPTIBLE);
1848                        add_wait_queue(&chip->wq, &wait);
1849                        mutex_unlock(&chip->mutex);
1850                        schedule();
1851                        remove_wait_queue(&chip->wq, &wait);
1852                        mutex_lock(&chip->mutex);
1853                        continue;
1854                }
1855                if (chip->erase_suspended) {
1856                        /* This erase was suspended and resumed.
1857                           Adjust the timeout */
1858                        timeo = jiffies + (HZ*20); /* FIXME */
1859                        chip->erase_suspended = 0;
1860                }
1861
1862                if (chip_ready(map, adr))
1863                        break;
1864
1865                if (time_after(jiffies, timeo)) {
1866                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1867                                __func__ );
1868                        break;
1869                }
1870
1871                /* Latency issues. Drop the lock, wait a while and retry */
1872                UDELAY(map, chip, adr, 1000000/HZ);
1873        }
1874        /* Did we succeed? */
1875        if (!chip_good(map, adr, map_word_ff(map))) {
1876                /* reset on all failures. */
1877                map_write( map, CMD(0xF0), chip->start );
1878                /* FIXME - should have reset delay before continuing */
1879
1880                ret = -EIO;
1881        }
1882
1883        chip->state = FL_READY;
1884        xip_enable(map, chip, adr);
1885        DISABLE_VPP(map);
1886        put_chip(map, chip, adr);
1887        mutex_unlock(&chip->mutex);
1888
1889        return ret;
1890}
1891
1892
1893static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1894{
1895        struct cfi_private *cfi = map->fldrv_priv;
1896        unsigned long timeo = jiffies + HZ;
1897        DECLARE_WAITQUEUE(wait, current);
1898        int ret = 0;
1899
1900        adr += chip->start;
1901
1902        mutex_lock(&chip->mutex);
1903        ret = get_chip(map, chip, adr, FL_ERASING);
1904        if (ret) {
1905                mutex_unlock(&chip->mutex);
1906                return ret;
1907        }
1908
1909        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1910               __func__, adr );
1911
1912        XIP_INVAL_CACHED_RANGE(map, adr, len);
1913        ENABLE_VPP(map);
1914        xip_disable(map, chip, adr);
1915
1916        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1917        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1918        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1919        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1920        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1921        map_write(map, cfi->sector_erase_cmd, adr);
1922
1923        chip->state = FL_ERASING;
1924        chip->erase_suspended = 0;
1925        chip->in_progress_block_addr = adr;
1926
1927        INVALIDATE_CACHE_UDELAY(map, chip,
1928                                adr, len,
1929                                chip->erase_time*500);
1930
1931        timeo = jiffies + (HZ*20);
1932
1933        for (;;) {
1934                if (chip->state != FL_ERASING) {
1935                        /* Someone's suspended the erase. Sleep */
1936                        set_current_state(TASK_UNINTERRUPTIBLE);
1937                        add_wait_queue(&chip->wq, &wait);
1938                        mutex_unlock(&chip->mutex);
1939                        schedule();
1940                        remove_wait_queue(&chip->wq, &wait);
1941                        mutex_lock(&chip->mutex);
1942                        continue;
1943                }
1944                if (chip->erase_suspended) {
1945                        /* This erase was suspended and resumed.
1946                           Adjust the timeout */
1947                        timeo = jiffies + (HZ*20); /* FIXME */
1948                        chip->erase_suspended = 0;
1949                }
1950
1951                if (chip_ready(map, adr)) {
1952                        xip_enable(map, chip, adr);
1953                        break;
1954                }
1955
1956                if (time_after(jiffies, timeo)) {
1957                        xip_enable(map, chip, adr);
1958                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1959                                __func__ );
1960                        break;
1961                }
1962
1963                /* Latency issues. Drop the lock, wait a while and retry */
1964                UDELAY(map, chip, adr, 1000000/HZ);
1965        }
1966        /* Did we succeed? */
1967        if (!chip_good(map, adr, map_word_ff(map))) {
1968                /* reset on all failures. */
1969                map_write( map, CMD(0xF0), chip->start );
1970                /* FIXME - should have reset delay before continuing */
1971
1972                ret = -EIO;
1973        }
1974
1975        chip->state = FL_READY;
1976        DISABLE_VPP(map);
1977        put_chip(map, chip, adr);
1978        mutex_unlock(&chip->mutex);
1979        return ret;
1980}
1981
1982
1983static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1984{
1985        unsigned long ofs, len;
1986        int ret;
1987
1988        ofs = instr->addr;
1989        len = instr->len;
1990
1991        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1992        if (ret)
1993                return ret;
1994
1995        instr->state = MTD_ERASE_DONE;
1996        mtd_erase_callback(instr);
1997
1998        return 0;
1999}
2000
2001
2002static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2003{
2004        struct map_info *map = mtd->priv;
2005        struct cfi_private *cfi = map->fldrv_priv;
2006        int ret = 0;
2007
2008        if (instr->addr != 0)
2009                return -EINVAL;
2010
2011        if (instr->len != mtd->size)
2012                return -EINVAL;
2013
2014        ret = do_erase_chip(map, &cfi->chips[0]);
2015        if (ret)
2016                return ret;
2017
2018        instr->state = MTD_ERASE_DONE;
2019        mtd_erase_callback(instr);
2020
2021        return 0;
2022}
2023
2024static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2025                         unsigned long adr, int len, void *thunk)
2026{
2027        struct cfi_private *cfi = map->fldrv_priv;
2028        int ret;
2029
2030        mutex_lock(&chip->mutex);
2031        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2032        if (ret)
2033                goto out_unlock;
2034        chip->state = FL_LOCKING;
2035
2036        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2037
2038        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2039                         cfi->device_type, NULL);
2040        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2041                         cfi->device_type, NULL);
2042        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2043                         cfi->device_type, NULL);
2044        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2045                         cfi->device_type, NULL);
2046        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2047                         cfi->device_type, NULL);
2048        map_write(map, CMD(0x40), chip->start + adr);
2049
2050        chip->state = FL_READY;
2051        put_chip(map, chip, adr + chip->start);
2052        ret = 0;
2053
2054out_unlock:
2055        mutex_unlock(&chip->mutex);
2056        return ret;
2057}
2058
2059static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2060                           unsigned long adr, int len, void *thunk)
2061{
2062        struct cfi_private *cfi = map->fldrv_priv;
2063        int ret;
2064
2065        mutex_lock(&chip->mutex);
2066        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2067        if (ret)
2068                goto out_unlock;
2069        chip->state = FL_UNLOCKING;
2070
2071        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2072
2073        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2074                         cfi->device_type, NULL);
2075        map_write(map, CMD(0x70), adr);
2076
2077        chip->state = FL_READY;
2078        put_chip(map, chip, adr + chip->start);
2079        ret = 0;
2080
2081out_unlock:
2082        mutex_unlock(&chip->mutex);
2083        return ret;
2084}
2085
2086static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2087{
2088        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2089}
2090
2091static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2092{
2093        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2094}
2095
2096
2097static void cfi_amdstd_sync (struct mtd_info *mtd)
2098{
2099        struct map_info *map = mtd->priv;
2100        struct cfi_private *cfi = map->fldrv_priv;
2101        int i;
2102        struct flchip *chip;
2103        int ret = 0;
2104        DECLARE_WAITQUEUE(wait, current);
2105
2106        for (i=0; !ret && i<cfi->numchips; i++) {
2107                chip = &cfi->chips[i];
2108
2109        retry:
2110                mutex_lock(&chip->mutex);
2111
2112                switch(chip->state) {
2113                case FL_READY:
2114                case FL_STATUS:
2115                case FL_CFI_QUERY:
2116                case FL_JEDEC_QUERY:
2117                        chip->oldstate = chip->state;
2118                        chip->state = FL_SYNCING;
2119                        /* No need to wake_up() on this state change -
2120                         * as the whole point is that nobody can do anything
2121                         * with the chip now anyway.
2122                         */
2123                case FL_SYNCING:
2124                        mutex_unlock(&chip->mutex);
2125                        break;
2126
2127                default:
2128                        /* Not an idle state */
2129                        set_current_state(TASK_UNINTERRUPTIBLE);
2130                        add_wait_queue(&chip->wq, &wait);
2131
2132                        mutex_unlock(&chip->mutex);
2133
2134                        schedule();
2135
2136                        remove_wait_queue(&chip->wq, &wait);
2137
2138                        goto retry;
2139                }
2140        }
2141
2142        /* Unlock the chips again */
2143
2144        for (i--; i >=0; i--) {
2145                chip = &cfi->chips[i];
2146
2147                mutex_lock(&chip->mutex);
2148
2149                if (chip->state == FL_SYNCING) {
2150                        chip->state = chip->oldstate;
2151                        wake_up(&chip->wq);
2152                }
2153                mutex_unlock(&chip->mutex);
2154        }
2155}
2156
2157
2158static int cfi_amdstd_suspend(struct mtd_info *mtd)
2159{
2160        struct map_info *map = mtd->priv;
2161        struct cfi_private *cfi = map->fldrv_priv;
2162        int i;
2163        struct flchip *chip;
2164        int ret = 0;
2165
2166        for (i=0; !ret && i<cfi->numchips; i++) {
2167                chip = &cfi->chips[i];
2168
2169                mutex_lock(&chip->mutex);
2170
2171                switch(chip->state) {
2172                case FL_READY:
2173                case FL_STATUS:
2174                case FL_CFI_QUERY:
2175                case FL_JEDEC_QUERY:
2176                        chip->oldstate = chip->state;
2177                        chip->state = FL_PM_SUSPENDED;
2178                        /* No need to wake_up() on this state change -
2179                         * as the whole point is that nobody can do anything
2180                         * with the chip now anyway.
2181                         */
2182                case FL_PM_SUSPENDED:
2183                        break;
2184
2185                default:
2186                        ret = -EAGAIN;
2187                        break;
2188                }
2189                mutex_unlock(&chip->mutex);
2190        }
2191
2192        /* Unlock the chips again */
2193
2194        if (ret) {
2195                for (i--; i >=0; i--) {
2196                        chip = &cfi->chips[i];
2197
2198                        mutex_lock(&chip->mutex);
2199
2200                        if (chip->state == FL_PM_SUSPENDED) {
2201                                chip->state = chip->oldstate;
2202                                wake_up(&chip->wq);
2203                        }
2204                        mutex_unlock(&chip->mutex);
2205                }
2206        }
2207
2208        return ret;
2209}
2210
2211
2212static void cfi_amdstd_resume(struct mtd_info *mtd)
2213{
2214        struct map_info *map = mtd->priv;
2215        struct cfi_private *cfi = map->fldrv_priv;
2216        int i;
2217        struct flchip *chip;
2218
2219        for (i=0; i<cfi->numchips; i++) {
2220
2221                chip = &cfi->chips[i];
2222
2223                mutex_lock(&chip->mutex);
2224
2225                if (chip->state == FL_PM_SUSPENDED) {
2226                        chip->state = FL_READY;
2227                        map_write(map, CMD(0xF0), chip->start);
2228                        wake_up(&chip->wq);
2229                }
2230                else
2231                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2232
2233                mutex_unlock(&chip->mutex);
2234        }
2235}
2236
2237
2238/*
2239 * Ensure that the flash device is put back into read array mode before
2240 * unloading the driver or rebooting.  On some systems, rebooting while
2241 * the flash is in query/program/erase mode will prevent the CPU from
2242 * fetching the bootloader code, requiring a hard reset or power cycle.
2243 */
2244static int cfi_amdstd_reset(struct mtd_info *mtd)
2245{
2246        struct map_info *map = mtd->priv;
2247        struct cfi_private *cfi = map->fldrv_priv;
2248        int i, ret;
2249        struct flchip *chip;
2250
2251        for (i = 0; i < cfi->numchips; i++) {
2252
2253                chip = &cfi->chips[i];
2254
2255                mutex_lock(&chip->mutex);
2256
2257                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2258                if (!ret) {
2259                        map_write(map, CMD(0xF0), chip->start);
2260                        chip->state = FL_SHUTDOWN;
2261                        put_chip(map, chip, chip->start);
2262                }
2263
2264                mutex_unlock(&chip->mutex);
2265        }
2266
2267        return 0;
2268}
2269
2270
2271static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2272                               void *v)
2273{
2274        struct mtd_info *mtd;
2275
2276        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2277        cfi_amdstd_reset(mtd);
2278        return NOTIFY_DONE;
2279}
2280
2281
2282static void cfi_amdstd_destroy(struct mtd_info *mtd)
2283{
2284        struct map_info *map = mtd->priv;
2285        struct cfi_private *cfi = map->fldrv_priv;
2286
2287        cfi_amdstd_reset(mtd);
2288        unregister_reboot_notifier(&mtd->reboot_notifier);
2289        kfree(cfi->cmdset_priv);
2290        kfree(cfi->cfiq);
2291        kfree(cfi);
2292        kfree(mtd->eraseregions);
2293}
2294
2295MODULE_LICENSE("GPL");
2296MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2297MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2298MODULE_ALIAS("cfi_cmdset_0006");
2299MODULE_ALIAS("cfi_cmdset_0701");
2300