linux/drivers/mtd/chips/cfi_cmdset_0002.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <linux/init.h>
  28#include <asm/io.h>
  29#include <asm/byteorder.h>
  30
  31#include <linux/errno.h>
  32#include <linux/slab.h>
  33#include <linux/delay.h>
  34#include <linux/interrupt.h>
  35#include <linux/reboot.h>
  36#include <linux/mtd/map.h>
  37#include <linux/mtd/mtd.h>
  38#include <linux/mtd/cfi.h>
  39#include <linux/mtd/xip.h>
  40
  41#define AMD_BOOTLOC_BUG
  42#define FORCE_WORD_WRITE 0
  43
  44#define MAX_WORD_RETRIES 3
  45
  46#define SST49LF004B             0x0060
  47#define SST49LF040B             0x0050
  48#define SST49LF008A             0x005a
  49#define AT49BV6416              0x00d6
  50
  51static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  52static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  53static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  54static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  55static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  56static void cfi_amdstd_sync (struct mtd_info *);
  57static int cfi_amdstd_suspend (struct mtd_info *);
  58static void cfi_amdstd_resume (struct mtd_info *);
  59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  61
  62static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  63                                  size_t *retlen, const u_char *buf);
  64
  65static void cfi_amdstd_destroy(struct mtd_info *);
  66
  67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  68static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  69
  70static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  72#include "fwh_lock.h"
  73
  74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  76
  77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  78        .probe          = NULL, /* Not usable directly */
  79        .destroy        = cfi_amdstd_destroy,
  80        .name           = "cfi_cmdset_0002",
  81        .module         = THIS_MODULE
  82};
  83
  84
  85/* #define DEBUG_CFI_FEATURES */
  86
  87
  88#ifdef DEBUG_CFI_FEATURES
  89static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  90{
  91        const char* erase_suspend[3] = {
  92                "Not supported", "Read only", "Read/write"
  93        };
  94        const char* top_bottom[6] = {
  95                "No WP", "8x8KiB sectors at top & bottom, no WP",
  96                "Bottom boot", "Top boot",
  97                "Uniform, Bottom WP", "Uniform, Top WP"
  98        };
  99
 100        printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 101        printk("  Address sensitive unlock: %s\n",
 102               (extp->SiliconRevision & 1) ? "Not required" : "Required");
 103
 104        if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 105                printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 106        else
 107                printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 108
 109        if (extp->BlkProt == 0)
 110                printk("  Block protection: Not supported\n");
 111        else
 112                printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 113
 114
 115        printk("  Temporary block unprotect: %s\n",
 116               extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 117        printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 118        printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 119        printk("  Burst mode: %s\n",
 120               extp->BurstMode ? "Supported" : "Not supported");
 121        if (extp->PageMode == 0)
 122                printk("  Page mode: Not supported\n");
 123        else
 124                printk("  Page mode: %d word page\n", extp->PageMode << 2);
 125
 126        printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 127               extp->VppMin >> 4, extp->VppMin & 0xf);
 128        printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 129               extp->VppMax >> 4, extp->VppMax & 0xf);
 130
 131        if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 132                printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 133        else
 134                printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 135}
 136#endif
 137
 138#ifdef AMD_BOOTLOC_BUG
 139/* Wheee. Bring me the head of someone at AMD. */
 140static void fixup_amd_bootblock(struct mtd_info *mtd)
 141{
 142        struct map_info *map = mtd->priv;
 143        struct cfi_private *cfi = map->fldrv_priv;
 144        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 145        __u8 major = extp->MajorVersion;
 146        __u8 minor = extp->MinorVersion;
 147
 148        if (((major << 8) | minor) < 0x3131) {
 149                /* CFI version 1.0 => don't trust bootloc */
 150
 151                pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 152                        map->name, cfi->mfr, cfi->id);
 153
 154                /* AFAICS all 29LV400 with a bottom boot block have a device ID
 155                 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 156                 * These were badly detected as they have the 0x80 bit set
 157                 * so treat them as a special case.
 158                 */
 159                if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 160
 161                        /* Macronix added CFI to their 2nd generation
 162                         * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 163                         * Fujitsu, Spansion, EON, ESI and older Macronix)
 164                         * has CFI.
 165                         *
 166                         * Therefore also check the manufacturer.
 167                         * This reduces the risk of false detection due to
 168                         * the 8-bit device ID.
 169                         */
 170                        (cfi->mfr == CFI_MFR_MACRONIX)) {
 171                        pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 172                                " detected\n", map->name);
 173                        extp->TopBottom = 2;    /* bottom boot */
 174                } else
 175                if (cfi->id & 0x80) {
 176                        printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 177                        extp->TopBottom = 3;    /* top boot */
 178                } else {
 179                        extp->TopBottom = 2;    /* bottom boot */
 180                }
 181
 182                pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 183                        " deduced %s from Device ID\n", map->name, major, minor,
 184                        extp->TopBottom == 2 ? "bottom" : "top");
 185        }
 186}
 187#endif
 188
 189static void fixup_use_write_buffers(struct mtd_info *mtd)
 190{
 191        struct map_info *map = mtd->priv;
 192        struct cfi_private *cfi = map->fldrv_priv;
 193        if (cfi->cfiq->BufWriteTimeoutTyp) {
 194                pr_debug("Using buffer write method\n" );
 195                mtd->_write = cfi_amdstd_write_buffers;
 196        }
 197}
 198
 199/* Atmel chips don't use the same PRI format as AMD chips */
 200static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 201{
 202        struct map_info *map = mtd->priv;
 203        struct cfi_private *cfi = map->fldrv_priv;
 204        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 205        struct cfi_pri_atmel atmel_pri;
 206
 207        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 208        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 209
 210        if (atmel_pri.Features & 0x02)
 211                extp->EraseSuspend = 2;
 212
 213        /* Some chips got it backwards... */
 214        if (cfi->id == AT49BV6416) {
 215                if (atmel_pri.BottomBoot)
 216                        extp->TopBottom = 3;
 217                else
 218                        extp->TopBottom = 2;
 219        } else {
 220                if (atmel_pri.BottomBoot)
 221                        extp->TopBottom = 2;
 222                else
 223                        extp->TopBottom = 3;
 224        }
 225
 226        /* burst write mode not supported */
 227        cfi->cfiq->BufWriteTimeoutTyp = 0;
 228        cfi->cfiq->BufWriteTimeoutMax = 0;
 229}
 230
 231static void fixup_use_secsi(struct mtd_info *mtd)
 232{
 233        /* Setup for chips with a secsi area */
 234        mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
 235        mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 236}
 237
 238static void fixup_use_erase_chip(struct mtd_info *mtd)
 239{
 240        struct map_info *map = mtd->priv;
 241        struct cfi_private *cfi = map->fldrv_priv;
 242        if ((cfi->cfiq->NumEraseRegions == 1) &&
 243                ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 244                mtd->_erase = cfi_amdstd_erase_chip;
 245        }
 246
 247}
 248
 249/*
 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 251 * locked by default.
 252 */
 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
 254{
 255        mtd->_lock = cfi_atmel_lock;
 256        mtd->_unlock = cfi_atmel_unlock;
 257        mtd->flags |= MTD_POWERUP_LOCK;
 258}
 259
 260static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 261{
 262        struct map_info *map = mtd->priv;
 263        struct cfi_private *cfi = map->fldrv_priv;
 264
 265        /*
 266         * These flashes report two separate eraseblock regions based on the
 267         * sector_erase-size and block_erase-size, although they both operate on the
 268         * same memory. This is not allowed according to CFI, so we just pick the
 269         * sector_erase-size.
 270         */
 271        cfi->cfiq->NumEraseRegions = 1;
 272}
 273
 274static void fixup_sst39vf(struct mtd_info *mtd)
 275{
 276        struct map_info *map = mtd->priv;
 277        struct cfi_private *cfi = map->fldrv_priv;
 278
 279        fixup_old_sst_eraseregion(mtd);
 280
 281        cfi->addr_unlock1 = 0x5555;
 282        cfi->addr_unlock2 = 0x2AAA;
 283}
 284
 285static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 286{
 287        struct map_info *map = mtd->priv;
 288        struct cfi_private *cfi = map->fldrv_priv;
 289
 290        fixup_old_sst_eraseregion(mtd);
 291
 292        cfi->addr_unlock1 = 0x555;
 293        cfi->addr_unlock2 = 0x2AA;
 294
 295        cfi->sector_erase_cmd = CMD(0x50);
 296}
 297
 298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 299{
 300        struct map_info *map = mtd->priv;
 301        struct cfi_private *cfi = map->fldrv_priv;
 302
 303        fixup_sst39vf_rev_b(mtd);
 304
 305        /*
 306         * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 307         * it should report a size of 8KBytes (0x0020*256).
 308         */
 309        cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 310        pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 311}
 312
 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 314{
 315        struct map_info *map = mtd->priv;
 316        struct cfi_private *cfi = map->fldrv_priv;
 317
 318        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 319                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 320                pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
 321        }
 322}
 323
 324static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 325{
 326        struct map_info *map = mtd->priv;
 327        struct cfi_private *cfi = map->fldrv_priv;
 328
 329        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 330                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 331                pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
 332        }
 333}
 334
 335static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
 336{
 337        struct map_info *map = mtd->priv;
 338        struct cfi_private *cfi = map->fldrv_priv;
 339
 340        /*
 341         *  S29NS512P flash uses more than 8bits to report number of sectors,
 342         * which is not permitted by CFI.
 343         */
 344        cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
 345        pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
 346}
 347
 348/* Used to fix CFI-Tables of chips without Extended Query Tables */
 349static struct cfi_fixup cfi_nopri_fixup_table[] = {
 350        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 351        { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 352        { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 353        { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 354        { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 355        { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 356        { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 357        { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 358        { 0, 0, NULL }
 359};
 360
 361static struct cfi_fixup cfi_fixup_table[] = {
 362        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 363#ifdef AMD_BOOTLOC_BUG
 364        { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 365        { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 366        { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 367#endif
 368        { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 369        { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 370        { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 371        { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 372        { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 373        { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 374        { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 375        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 376        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 377        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 378        { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 379        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 380        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 381        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 382        { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 383#if !FORCE_WORD_WRITE
 384        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 385#endif
 386        { 0, 0, NULL }
 387};
 388static struct cfi_fixup jedec_fixup_table[] = {
 389        { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 390        { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 391        { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 392        { 0, 0, NULL }
 393};
 394
 395static struct cfi_fixup fixup_table[] = {
 396        /* The CFI vendor ids and the JEDEC vendor IDs appear
 397         * to be common.  It is like the devices id's are as
 398         * well.  This table is to pick all cases where
 399         * we know that is the case.
 400         */
 401        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 402        { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 403        { 0, 0, NULL }
 404};
 405
 406
 407static void cfi_fixup_major_minor(struct cfi_private *cfi,
 408                                  struct cfi_pri_amdstd *extp)
 409{
 410        if (cfi->mfr == CFI_MFR_SAMSUNG) {
 411                if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 412                    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 413                        /*
 414                         * Samsung K8P2815UQB and K8D6x16UxM chips
 415                         * report major=0 / minor=0.
 416                         * K8D3x16UxC chips report major=3 / minor=3.
 417                         */
 418                        printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 419                               " Extended Query version to 1.%c\n",
 420                               extp->MinorVersion);
 421                        extp->MajorVersion = '1';
 422                }
 423        }
 424
 425        /*
 426         * SST 38VF640x chips report major=0xFF / minor=0xFF.
 427         */
 428        if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 429                extp->MajorVersion = '1';
 430                extp->MinorVersion = '0';
 431        }
 432}
 433
 434static int is_m29ew(struct cfi_private *cfi)
 435{
 436        if (cfi->mfr == CFI_MFR_INTEL &&
 437            ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
 438             (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
 439                return 1;
 440        return 0;
 441}
 442
 443/*
 444 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
 445 * Some revisions of the M29EW suffer from erase suspend hang ups. In
 446 * particular, it can occur when the sequence
 447 * Erase Confirm -> Suspend -> Program -> Resume
 448 * causes a lockup due to internal timing issues. The consequence is that the
 449 * erase cannot be resumed without inserting a dummy command after programming
 450 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
 451 * that writes an F0 command code before the RESUME command.
 452 */
 453static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
 454                                          unsigned long adr)
 455{
 456        struct cfi_private *cfi = map->fldrv_priv;
 457        /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
 458        if (is_m29ew(cfi))
 459                map_write(map, CMD(0xF0), adr);
 460}
 461
 462/*
 463 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
 464 *
 465 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
 466 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
 467 * command is issued after an ERASE RESUME operation without waiting for a
 468 * minimum delay.  The result is that once the ERASE seems to be completed
 469 * (no bits are toggling), the contents of the Flash memory block on which
 470 * the erase was ongoing could be inconsistent with the expected values
 471 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
 472 * values), causing a consequent failure of the ERASE operation.
 473 * The occurrence of this issue could be high, especially when file system
 474 * operations on the Flash are intensive.  As a result, it is recommended
 475 * that a patch be applied.  Intensive file system operations can cause many
 476 * calls to the garbage routine to free Flash space (also by erasing physical
 477 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
 478 * commands can occur.  The problem disappears when a delay is inserted after
 479 * the RESUME command by using the udelay() function available in Linux.
 480 * The DELAY value must be tuned based on the customer's platform.
 481 * The maximum value that fixes the problem in all cases is 500us.
 482 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
 483 * in most cases.
 484 * We have chosen 500µs because this latency is acceptable.
 485 */
 486static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
 487{
 488        /*
 489         * Resolving the Delay After Resume Issue see Micron TN-13-07
 490         * Worst case delay must be 500µs but 30-50µs should be ok as well
 491         */
 492        if (is_m29ew(cfi))
 493                cfi_udelay(500);
 494}
 495
 496struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 497{
 498        struct cfi_private *cfi = map->fldrv_priv;
 499        struct mtd_info *mtd;
 500        int i;
 501
 502        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 503        if (!mtd) {
 504                printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
 505                return NULL;
 506        }
 507        mtd->priv = map;
 508        mtd->type = MTD_NORFLASH;
 509
 510        /* Fill in the default mtd operations */
 511        mtd->_erase   = cfi_amdstd_erase_varsize;
 512        mtd->_write   = cfi_amdstd_write_words;
 513        mtd->_read    = cfi_amdstd_read;
 514        mtd->_sync    = cfi_amdstd_sync;
 515        mtd->_suspend = cfi_amdstd_suspend;
 516        mtd->_resume  = cfi_amdstd_resume;
 517        mtd->flags   = MTD_CAP_NORFLASH;
 518        mtd->name    = map->name;
 519        mtd->writesize = 1;
 520        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 521
 522        pr_debug("MTD %s(): write buffer size %d\n", __func__,
 523                        mtd->writebufsize);
 524
 525        mtd->_panic_write = cfi_amdstd_panic_write;
 526        mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 527
 528        if (cfi->cfi_mode==CFI_MODE_CFI){
 529                unsigned char bootloc;
 530                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 531                struct cfi_pri_amdstd *extp;
 532
 533                extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 534                if (extp) {
 535                        /*
 536                         * It's a real CFI chip, not one for which the probe
 537                         * routine faked a CFI structure.
 538                         */
 539                        cfi_fixup_major_minor(cfi, extp);
 540
 541                        /*
 542                         * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 543                         * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 544                         *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 545                         *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 546                         *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 547                         */
 548                        if (extp->MajorVersion != '1' ||
 549                            (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 550                                printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 551                                       "version %c.%c (%#02x/%#02x).\n",
 552                                       extp->MajorVersion, extp->MinorVersion,
 553                                       extp->MajorVersion, extp->MinorVersion);
 554                                kfree(extp);
 555                                kfree(mtd);
 556                                return NULL;
 557                        }
 558
 559                        printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 560                               extp->MajorVersion, extp->MinorVersion);
 561
 562                        /* Install our own private info structure */
 563                        cfi->cmdset_priv = extp;
 564
 565                        /* Apply cfi device specific fixups */
 566                        cfi_fixup(mtd, cfi_fixup_table);
 567
 568#ifdef DEBUG_CFI_FEATURES
 569                        /* Tell the user about it in lots of lovely detail */
 570                        cfi_tell_features(extp);
 571#endif
 572
 573                        bootloc = extp->TopBottom;
 574                        if ((bootloc < 2) || (bootloc > 5)) {
 575                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 576                                       "bank location (%d). Assuming bottom.\n",
 577                                       map->name, bootloc);
 578                                bootloc = 2;
 579                        }
 580
 581                        if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 582                                printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 583
 584                                for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 585                                        int j = (cfi->cfiq->NumEraseRegions-1)-i;
 586                                        __u32 swap;
 587
 588                                        swap = cfi->cfiq->EraseRegionInfo[i];
 589                                        cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
 590                                        cfi->cfiq->EraseRegionInfo[j] = swap;
 591                                }
 592                        }
 593                        /* Set the default CFI lock/unlock addresses */
 594                        cfi->addr_unlock1 = 0x555;
 595                        cfi->addr_unlock2 = 0x2aa;
 596                }
 597                cfi_fixup(mtd, cfi_nopri_fixup_table);
 598
 599                if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 600                        kfree(mtd);
 601                        return NULL;
 602                }
 603
 604        } /* CFI mode */
 605        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 606                /* Apply jedec specific fixups */
 607                cfi_fixup(mtd, jedec_fixup_table);
 608        }
 609        /* Apply generic fixups */
 610        cfi_fixup(mtd, fixup_table);
 611
 612        for (i=0; i< cfi->numchips; i++) {
 613                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 614                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 615                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 616                cfi->chips[i].ref_point_counter = 0;
 617                init_waitqueue_head(&(cfi->chips[i].wq));
 618        }
 619
 620        map->fldrv = &cfi_amdstd_chipdrv;
 621
 622        return cfi_amdstd_setup(mtd);
 623}
 624struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 625struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 626EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 627EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 628EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 629
 630static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 631{
 632        struct map_info *map = mtd->priv;
 633        struct cfi_private *cfi = map->fldrv_priv;
 634        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 635        unsigned long offset = 0;
 636        int i,j;
 637
 638        printk(KERN_NOTICE "number of %s chips: %d\n",
 639               (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 640        /* Select the correct geometry setup */
 641        mtd->size = devsize * cfi->numchips;
 642
 643        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 644        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 645                                    * mtd->numeraseregions, GFP_KERNEL);
 646        if (!mtd->eraseregions) {
 647                printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
 648                goto setup_err;
 649        }
 650
 651        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 652                unsigned long ernum, ersize;
 653                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 654                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 655
 656                if (mtd->erasesize < ersize) {
 657                        mtd->erasesize = ersize;
 658                }
 659                for (j=0; j<cfi->numchips; j++) {
 660                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 661                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 662                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 663                }
 664                offset += (ersize * ernum);
 665        }
 666        if (offset != devsize) {
 667                /* Argh */
 668                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 669                goto setup_err;
 670        }
 671
 672        __module_get(THIS_MODULE);
 673        register_reboot_notifier(&mtd->reboot_notifier);
 674        return mtd;
 675
 676 setup_err:
 677        kfree(mtd->eraseregions);
 678        kfree(mtd);
 679        kfree(cfi->cmdset_priv);
 680        kfree(cfi->cfiq);
 681        return NULL;
 682}
 683
 684/*
 685 * Return true if the chip is ready.
 686 *
 687 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 688 * non-suspended sector) and is indicated by no toggle bits toggling.
 689 *
 690 * Note that anything more complicated than checking if no bits are toggling
 691 * (including checking DQ5 for an error status) is tricky to get working
 692 * correctly and is therefore not done  (particularly with interleaved chips
 693 * as each chip must be checked independently of the others).
 694 */
 695static int __xipram chip_ready(struct map_info *map, unsigned long addr)
 696{
 697        map_word d, t;
 698
 699        d = map_read(map, addr);
 700        t = map_read(map, addr);
 701
 702        return map_word_equal(map, d, t);
 703}
 704
 705/*
 706 * Return true if the chip is ready and has the correct value.
 707 *
 708 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 709 * non-suspended sector) and it is indicated by no bits toggling.
 710 *
 711 * Error are indicated by toggling bits or bits held with the wrong value,
 712 * or with bits toggling.
 713 *
 714 * Note that anything more complicated than checking if no bits are toggling
 715 * (including checking DQ5 for an error status) is tricky to get working
 716 * correctly and is therefore not done  (particularly with interleaved chips
 717 * as each chip must be checked independently of the others).
 718 *
 719 */
 720static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
 721{
 722        map_word oldd, curd;
 723
 724        oldd = map_read(map, addr);
 725        curd = map_read(map, addr);
 726
 727        return  map_word_equal(map, oldd, curd) &&
 728                map_word_equal(map, curd, expected);
 729}
 730
 731static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 732{
 733        DECLARE_WAITQUEUE(wait, current);
 734        struct cfi_private *cfi = map->fldrv_priv;
 735        unsigned long timeo;
 736        struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 737
 738 resettime:
 739        timeo = jiffies + HZ;
 740 retry:
 741        switch (chip->state) {
 742
 743        case FL_STATUS:
 744                for (;;) {
 745                        if (chip_ready(map, adr))
 746                                break;
 747
 748                        if (time_after(jiffies, timeo)) {
 749                                printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 750                                return -EIO;
 751                        }
 752                        mutex_unlock(&chip->mutex);
 753                        cfi_udelay(1);
 754                        mutex_lock(&chip->mutex);
 755                        /* Someone else might have been playing with it. */
 756                        goto retry;
 757                }
 758
 759        case FL_READY:
 760        case FL_CFI_QUERY:
 761        case FL_JEDEC_QUERY:
 762                return 0;
 763
 764        case FL_ERASING:
 765                if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 766                    !(mode == FL_READY || mode == FL_POINT ||
 767                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 768                        goto sleep;
 769
 770                /* We could check to see if we're trying to access the sector
 771                 * that is currently being erased. However, no user will try
 772                 * anything like that so we just wait for the timeout. */
 773
 774                /* Erase suspend */
 775                /* It's harmless to issue the Erase-Suspend and Erase-Resume
 776                 * commands when the erase algorithm isn't in progress. */
 777                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 778                chip->oldstate = FL_ERASING;
 779                chip->state = FL_ERASE_SUSPENDING;
 780                chip->erase_suspended = 1;
 781                for (;;) {
 782                        if (chip_ready(map, adr))
 783                                break;
 784
 785                        if (time_after(jiffies, timeo)) {
 786                                /* Should have suspended the erase by now.
 787                                 * Send an Erase-Resume command as either
 788                                 * there was an error (so leave the erase
 789                                 * routine to recover from it) or we trying to
 790                                 * use the erase-in-progress sector. */
 791                                put_chip(map, chip, adr);
 792                                printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 793                                return -EIO;
 794                        }
 795
 796                        mutex_unlock(&chip->mutex);
 797                        cfi_udelay(1);
 798                        mutex_lock(&chip->mutex);
 799                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 800                           So we can just loop here. */
 801                }
 802                chip->state = FL_READY;
 803                return 0;
 804
 805        case FL_XIP_WHILE_ERASING:
 806                if (mode != FL_READY && mode != FL_POINT &&
 807                    (!cfip || !(cfip->EraseSuspend&2)))
 808                        goto sleep;
 809                chip->oldstate = chip->state;
 810                chip->state = FL_READY;
 811                return 0;
 812
 813        case FL_SHUTDOWN:
 814                /* The machine is rebooting */
 815                return -EIO;
 816
 817        case FL_POINT:
 818                /* Only if there's no operation suspended... */
 819                if (mode == FL_READY && chip->oldstate == FL_READY)
 820                        return 0;
 821
 822        default:
 823        sleep:
 824                set_current_state(TASK_UNINTERRUPTIBLE);
 825                add_wait_queue(&chip->wq, &wait);
 826                mutex_unlock(&chip->mutex);
 827                schedule();
 828                remove_wait_queue(&chip->wq, &wait);
 829                mutex_lock(&chip->mutex);
 830                goto resettime;
 831        }
 832}
 833
 834
 835static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 836{
 837        struct cfi_private *cfi = map->fldrv_priv;
 838
 839        switch(chip->oldstate) {
 840        case FL_ERASING:
 841                cfi_fixup_m29ew_erase_suspend(map,
 842                        chip->in_progress_block_addr);
 843                map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 844                cfi_fixup_m29ew_delay_after_resume(cfi);
 845                chip->oldstate = FL_READY;
 846                chip->state = FL_ERASING;
 847                break;
 848
 849        case FL_XIP_WHILE_ERASING:
 850                chip->state = chip->oldstate;
 851                chip->oldstate = FL_READY;
 852                break;
 853
 854        case FL_READY:
 855        case FL_STATUS:
 856                break;
 857        default:
 858                printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
 859        }
 860        wake_up(&chip->wq);
 861}
 862
 863#ifdef CONFIG_MTD_XIP
 864
 865/*
 866 * No interrupt what so ever can be serviced while the flash isn't in array
 867 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 868 * enclosing any code path where the flash is known not to be in array mode.
 869 * And within a XIP disabled code path, only functions marked with __xipram
 870 * may be called and nothing else (it's a good thing to inspect generated
 871 * assembly to make sure inline functions were actually inlined and that gcc
 872 * didn't emit calls to its own support functions). Also configuring MTD CFI
 873 * support to a single buswidth and a single interleave is also recommended.
 874 */
 875
 876static void xip_disable(struct map_info *map, struct flchip *chip,
 877                        unsigned long adr)
 878{
 879        /* TODO: chips with no XIP use should ignore and return */
 880        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 881        local_irq_disable();
 882}
 883
 884static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 885                                unsigned long adr)
 886{
 887        struct cfi_private *cfi = map->fldrv_priv;
 888
 889        if (chip->state != FL_POINT && chip->state != FL_READY) {
 890                map_write(map, CMD(0xf0), adr);
 891                chip->state = FL_READY;
 892        }
 893        (void) map_read(map, adr);
 894        xip_iprefetch();
 895        local_irq_enable();
 896}
 897
 898/*
 899 * When a delay is required for the flash operation to complete, the
 900 * xip_udelay() function is polling for both the given timeout and pending
 901 * (but still masked) hardware interrupts.  Whenever there is an interrupt
 902 * pending then the flash erase operation is suspended, array mode restored
 903 * and interrupts unmasked.  Task scheduling might also happen at that
 904 * point.  The CPU eventually returns from the interrupt or the call to
 905 * schedule() and the suspended flash operation is resumed for the remaining
 906 * of the delay period.
 907 *
 908 * Warning: this function _will_ fool interrupt latency tracing tools.
 909 */
 910
 911static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
 912                                unsigned long adr, int usec)
 913{
 914        struct cfi_private *cfi = map->fldrv_priv;
 915        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 916        map_word status, OK = CMD(0x80);
 917        unsigned long suspended, start = xip_currtime();
 918        flstate_t oldstate;
 919
 920        do {
 921                cpu_relax();
 922                if (xip_irqpending() && extp &&
 923                    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
 924                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 925                        /*
 926                         * Let's suspend the erase operation when supported.
 927                         * Note that we currently don't try to suspend
 928                         * interleaved chips if there is already another
 929                         * operation suspended (imagine what happens
 930                         * when one chip was already done with the current
 931                         * operation while another chip suspended it, then
 932                         * we resume the whole thing at once).  Yes, it
 933                         * can happen!
 934                         */
 935                        map_write(map, CMD(0xb0), adr);
 936                        usec -= xip_elapsed_since(start);
 937                        suspended = xip_currtime();
 938                        do {
 939                                if (xip_elapsed_since(suspended) > 100000) {
 940                                        /*
 941                                         * The chip doesn't want to suspend
 942                                         * after waiting for 100 msecs.
 943                                         * This is a critical error but there
 944                                         * is not much we can do here.
 945                                         */
 946                                        return;
 947                                }
 948                                status = map_read(map, adr);
 949                        } while (!map_word_andequal(map, status, OK, OK));
 950
 951                        /* Suspend succeeded */
 952                        oldstate = chip->state;
 953                        if (!map_word_bitsset(map, status, CMD(0x40)))
 954                                break;
 955                        chip->state = FL_XIP_WHILE_ERASING;
 956                        chip->erase_suspended = 1;
 957                        map_write(map, CMD(0xf0), adr);
 958                        (void) map_read(map, adr);
 959                        xip_iprefetch();
 960                        local_irq_enable();
 961                        mutex_unlock(&chip->mutex);
 962                        xip_iprefetch();
 963                        cond_resched();
 964
 965                        /*
 966                         * We're back.  However someone else might have
 967                         * decided to go write to the chip if we are in
 968                         * a suspended erase state.  If so let's wait
 969                         * until it's done.
 970                         */
 971                        mutex_lock(&chip->mutex);
 972                        while (chip->state != FL_XIP_WHILE_ERASING) {
 973                                DECLARE_WAITQUEUE(wait, current);
 974                                set_current_state(TASK_UNINTERRUPTIBLE);
 975                                add_wait_queue(&chip->wq, &wait);
 976                                mutex_unlock(&chip->mutex);
 977                                schedule();
 978                                remove_wait_queue(&chip->wq, &wait);
 979                                mutex_lock(&chip->mutex);
 980                        }
 981                        /* Disallow XIP again */
 982                        local_irq_disable();
 983
 984                        /* Correct Erase Suspend Hangups for M29EW */
 985                        cfi_fixup_m29ew_erase_suspend(map, adr);
 986                        /* Resume the write or erase operation */
 987                        map_write(map, cfi->sector_erase_cmd, adr);
 988                        chip->state = oldstate;
 989                        start = xip_currtime();
 990                } else if (usec >= 1000000/HZ) {
 991                        /*
 992                         * Try to save on CPU power when waiting delay
 993                         * is at least a system timer tick period.
 994                         * No need to be extremely accurate here.
 995                         */
 996                        xip_cpu_idle();
 997                }
 998                status = map_read(map, adr);
 999        } while (!map_word_andequal(map, status, OK, OK)
1000                 && xip_elapsed_since(start) < usec);
1001}
1002
1003#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1004
1005/*
1006 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1007 * the flash is actively programming or erasing since we have to poll for
1008 * the operation to complete anyway.  We can't do that in a generic way with
1009 * a XIP setup so do it before the actual flash operation in this case
1010 * and stub it out from INVALIDATE_CACHE_UDELAY.
1011 */
1012#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1013        INVALIDATE_CACHED_RANGE(map, from, size)
1014
1015#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1016        UDELAY(map, chip, adr, usec)
1017
1018/*
1019 * Extra notes:
1020 *
1021 * Activating this XIP support changes the way the code works a bit.  For
1022 * example the code to suspend the current process when concurrent access
1023 * happens is never executed because xip_udelay() will always return with the
1024 * same chip state as it was entered with.  This is why there is no care for
1025 * the presence of add_wait_queue() or schedule() calls from within a couple
1026 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1027 * The queueing and scheduling are always happening within xip_udelay().
1028 *
1029 * Similarly, get_chip() and put_chip() just happen to always be executed
1030 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1031 * is in array mode, therefore never executing many cases therein and not
1032 * causing any problem with XIP.
1033 */
1034
1035#else
1036
1037#define xip_disable(map, chip, adr)
1038#define xip_enable(map, chip, adr)
1039#define XIP_INVAL_CACHED_RANGE(x...)
1040
1041#define UDELAY(map, chip, adr, usec)  \
1042do {  \
1043        mutex_unlock(&chip->mutex);  \
1044        cfi_udelay(usec);  \
1045        mutex_lock(&chip->mutex);  \
1046} while (0)
1047
1048#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1049do {  \
1050        mutex_unlock(&chip->mutex);  \
1051        INVALIDATE_CACHED_RANGE(map, adr, len);  \
1052        cfi_udelay(usec);  \
1053        mutex_lock(&chip->mutex);  \
1054} while (0)
1055
1056#endif
1057
1058static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1059{
1060        unsigned long cmd_addr;
1061        struct cfi_private *cfi = map->fldrv_priv;
1062        int ret;
1063
1064        adr += chip->start;
1065
1066        /* Ensure cmd read/writes are aligned. */
1067        cmd_addr = adr & ~(map_bankwidth(map)-1);
1068
1069        mutex_lock(&chip->mutex);
1070        ret = get_chip(map, chip, cmd_addr, FL_READY);
1071        if (ret) {
1072                mutex_unlock(&chip->mutex);
1073                return ret;
1074        }
1075
1076        if (chip->state != FL_POINT && chip->state != FL_READY) {
1077                map_write(map, CMD(0xf0), cmd_addr);
1078                chip->state = FL_READY;
1079        }
1080
1081        map_copy_from(map, buf, adr, len);
1082
1083        put_chip(map, chip, cmd_addr);
1084
1085        mutex_unlock(&chip->mutex);
1086        return 0;
1087}
1088
1089
1090static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1091{
1092        struct map_info *map = mtd->priv;
1093        struct cfi_private *cfi = map->fldrv_priv;
1094        unsigned long ofs;
1095        int chipnum;
1096        int ret = 0;
1097
1098        /* ofs: offset within the first chip that the first read should start */
1099        chipnum = (from >> cfi->chipshift);
1100        ofs = from - (chipnum <<  cfi->chipshift);
1101
1102        while (len) {
1103                unsigned long thislen;
1104
1105                if (chipnum >= cfi->numchips)
1106                        break;
1107
1108                if ((len + ofs -1) >> cfi->chipshift)
1109                        thislen = (1<<cfi->chipshift) - ofs;
1110                else
1111                        thislen = len;
1112
1113                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1114                if (ret)
1115                        break;
1116
1117                *retlen += thislen;
1118                len -= thislen;
1119                buf += thislen;
1120
1121                ofs = 0;
1122                chipnum++;
1123        }
1124        return ret;
1125}
1126
1127
1128static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1129{
1130        DECLARE_WAITQUEUE(wait, current);
1131        unsigned long timeo = jiffies + HZ;
1132        struct cfi_private *cfi = map->fldrv_priv;
1133
1134 retry:
1135        mutex_lock(&chip->mutex);
1136
1137        if (chip->state != FL_READY){
1138                set_current_state(TASK_UNINTERRUPTIBLE);
1139                add_wait_queue(&chip->wq, &wait);
1140
1141                mutex_unlock(&chip->mutex);
1142
1143                schedule();
1144                remove_wait_queue(&chip->wq, &wait);
1145                timeo = jiffies + HZ;
1146
1147                goto retry;
1148        }
1149
1150        adr += chip->start;
1151
1152        chip->state = FL_READY;
1153
1154        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1155        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1156        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1157
1158        map_copy_from(map, buf, adr, len);
1159
1160        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1161        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1162        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1163        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1164
1165        wake_up(&chip->wq);
1166        mutex_unlock(&chip->mutex);
1167
1168        return 0;
1169}
1170
1171static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1172{
1173        struct map_info *map = mtd->priv;
1174        struct cfi_private *cfi = map->fldrv_priv;
1175        unsigned long ofs;
1176        int chipnum;
1177        int ret = 0;
1178
1179        /* ofs: offset within the first chip that the first read should start */
1180        /* 8 secsi bytes per chip */
1181        chipnum=from>>3;
1182        ofs=from & 7;
1183
1184        while (len) {
1185                unsigned long thislen;
1186
1187                if (chipnum >= cfi->numchips)
1188                        break;
1189
1190                if ((len + ofs -1) >> 3)
1191                        thislen = (1<<3) - ofs;
1192                else
1193                        thislen = len;
1194
1195                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1196                if (ret)
1197                        break;
1198
1199                *retlen += thislen;
1200                len -= thislen;
1201                buf += thislen;
1202
1203                ofs = 0;
1204                chipnum++;
1205        }
1206        return ret;
1207}
1208
1209
1210static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1211{
1212        struct cfi_private *cfi = map->fldrv_priv;
1213        unsigned long timeo = jiffies + HZ;
1214        /*
1215         * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1216         * have a max write time of a few hundreds usec). However, we should
1217         * use the maximum timeout value given by the chip at probe time
1218         * instead.  Unfortunately, struct flchip does have a field for
1219         * maximum timeout, only for typical which can be far too short
1220         * depending of the conditions.  The ' + 1' is to avoid having a
1221         * timeout of 0 jiffies if HZ is smaller than 1000.
1222         */
1223        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1224        int ret = 0;
1225        map_word oldd;
1226        int retry_cnt = 0;
1227
1228        adr += chip->start;
1229
1230        mutex_lock(&chip->mutex);
1231        ret = get_chip(map, chip, adr, FL_WRITING);
1232        if (ret) {
1233                mutex_unlock(&chip->mutex);
1234                return ret;
1235        }
1236
1237        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1238               __func__, adr, datum.x[0] );
1239
1240        /*
1241         * Check for a NOP for the case when the datum to write is already
1242         * present - it saves time and works around buggy chips that corrupt
1243         * data at other locations when 0xff is written to a location that
1244         * already contains 0xff.
1245         */
1246        oldd = map_read(map, adr);
1247        if (map_word_equal(map, oldd, datum)) {
1248                pr_debug("MTD %s(): NOP\n",
1249                       __func__);
1250                goto op_done;
1251        }
1252
1253        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1254        ENABLE_VPP(map);
1255        xip_disable(map, chip, adr);
1256 retry:
1257        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1258        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1259        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1260        map_write(map, datum, adr);
1261        chip->state = FL_WRITING;
1262
1263        INVALIDATE_CACHE_UDELAY(map, chip,
1264                                adr, map_bankwidth(map),
1265                                chip->word_write_time);
1266
1267        /* See comment above for timeout value. */
1268        timeo = jiffies + uWriteTimeout;
1269        for (;;) {
1270                if (chip->state != FL_WRITING) {
1271                        /* Someone's suspended the write. Sleep */
1272                        DECLARE_WAITQUEUE(wait, current);
1273
1274                        set_current_state(TASK_UNINTERRUPTIBLE);
1275                        add_wait_queue(&chip->wq, &wait);
1276                        mutex_unlock(&chip->mutex);
1277                        schedule();
1278                        remove_wait_queue(&chip->wq, &wait);
1279                        timeo = jiffies + (HZ / 2); /* FIXME */
1280                        mutex_lock(&chip->mutex);
1281                        continue;
1282                }
1283
1284                if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1285                        xip_enable(map, chip, adr);
1286                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1287                        xip_disable(map, chip, adr);
1288                        break;
1289                }
1290
1291                if (chip_ready(map, adr))
1292                        break;
1293
1294                /* Latency issues. Drop the lock, wait a while and retry */
1295                UDELAY(map, chip, adr, 1);
1296        }
1297        /* Did we succeed? */
1298        if (!chip_good(map, adr, datum)) {
1299                /* reset on all failures. */
1300                map_write( map, CMD(0xF0), chip->start );
1301                /* FIXME - should have reset delay before continuing */
1302
1303                if (++retry_cnt <= MAX_WORD_RETRIES)
1304                        goto retry;
1305
1306                ret = -EIO;
1307        }
1308        xip_enable(map, chip, adr);
1309 op_done:
1310        chip->state = FL_READY;
1311        DISABLE_VPP(map);
1312        put_chip(map, chip, adr);
1313        mutex_unlock(&chip->mutex);
1314
1315        return ret;
1316}
1317
1318
1319static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1320                                  size_t *retlen, const u_char *buf)
1321{
1322        struct map_info *map = mtd->priv;
1323        struct cfi_private *cfi = map->fldrv_priv;
1324        int ret = 0;
1325        int chipnum;
1326        unsigned long ofs, chipstart;
1327        DECLARE_WAITQUEUE(wait, current);
1328
1329        chipnum = to >> cfi->chipshift;
1330        ofs = to  - (chipnum << cfi->chipshift);
1331        chipstart = cfi->chips[chipnum].start;
1332
1333        /* If it's not bus-aligned, do the first byte write */
1334        if (ofs & (map_bankwidth(map)-1)) {
1335                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1336                int i = ofs - bus_ofs;
1337                int n = 0;
1338                map_word tmp_buf;
1339
1340 retry:
1341                mutex_lock(&cfi->chips[chipnum].mutex);
1342
1343                if (cfi->chips[chipnum].state != FL_READY) {
1344                        set_current_state(TASK_UNINTERRUPTIBLE);
1345                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1346
1347                        mutex_unlock(&cfi->chips[chipnum].mutex);
1348
1349                        schedule();
1350                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1351                        goto retry;
1352                }
1353
1354                /* Load 'tmp_buf' with old contents of flash */
1355                tmp_buf = map_read(map, bus_ofs+chipstart);
1356
1357                mutex_unlock(&cfi->chips[chipnum].mutex);
1358
1359                /* Number of bytes to copy from buffer */
1360                n = min_t(int, len, map_bankwidth(map)-i);
1361
1362                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1363
1364                ret = do_write_oneword(map, &cfi->chips[chipnum],
1365                                       bus_ofs, tmp_buf);
1366                if (ret)
1367                        return ret;
1368
1369                ofs += n;
1370                buf += n;
1371                (*retlen) += n;
1372                len -= n;
1373
1374                if (ofs >> cfi->chipshift) {
1375                        chipnum ++;
1376                        ofs = 0;
1377                        if (chipnum == cfi->numchips)
1378                                return 0;
1379                }
1380        }
1381
1382        /* We are now aligned, write as much as possible */
1383        while(len >= map_bankwidth(map)) {
1384                map_word datum;
1385
1386                datum = map_word_load(map, buf);
1387
1388                ret = do_write_oneword(map, &cfi->chips[chipnum],
1389                                       ofs, datum);
1390                if (ret)
1391                        return ret;
1392
1393                ofs += map_bankwidth(map);
1394                buf += map_bankwidth(map);
1395                (*retlen) += map_bankwidth(map);
1396                len -= map_bankwidth(map);
1397
1398                if (ofs >> cfi->chipshift) {
1399                        chipnum ++;
1400                        ofs = 0;
1401                        if (chipnum == cfi->numchips)
1402                                return 0;
1403                        chipstart = cfi->chips[chipnum].start;
1404                }
1405        }
1406
1407        /* Write the trailing bytes if any */
1408        if (len & (map_bankwidth(map)-1)) {
1409                map_word tmp_buf;
1410
1411 retry1:
1412                mutex_lock(&cfi->chips[chipnum].mutex);
1413
1414                if (cfi->chips[chipnum].state != FL_READY) {
1415                        set_current_state(TASK_UNINTERRUPTIBLE);
1416                        add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1417
1418                        mutex_unlock(&cfi->chips[chipnum].mutex);
1419
1420                        schedule();
1421                        remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1422                        goto retry1;
1423                }
1424
1425                tmp_buf = map_read(map, ofs + chipstart);
1426
1427                mutex_unlock(&cfi->chips[chipnum].mutex);
1428
1429                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1430
1431                ret = do_write_oneword(map, &cfi->chips[chipnum],
1432                                ofs, tmp_buf);
1433                if (ret)
1434                        return ret;
1435
1436                (*retlen) += len;
1437        }
1438
1439        return 0;
1440}
1441
1442
1443/*
1444 * FIXME: interleaved mode not tested, and probably not supported!
1445 */
1446static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1447                                    unsigned long adr, const u_char *buf,
1448                                    int len)
1449{
1450        struct cfi_private *cfi = map->fldrv_priv;
1451        unsigned long timeo = jiffies + HZ;
1452        /* see comments in do_write_oneword() regarding uWriteTimeo. */
1453        unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1454        int ret = -EIO;
1455        unsigned long cmd_adr;
1456        int z, words;
1457        map_word datum;
1458
1459        adr += chip->start;
1460        cmd_adr = adr;
1461
1462        mutex_lock(&chip->mutex);
1463        ret = get_chip(map, chip, adr, FL_WRITING);
1464        if (ret) {
1465                mutex_unlock(&chip->mutex);
1466                return ret;
1467        }
1468
1469        datum = map_word_load(map, buf);
1470
1471        pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1472               __func__, adr, datum.x[0] );
1473
1474        XIP_INVAL_CACHED_RANGE(map, adr, len);
1475        ENABLE_VPP(map);
1476        xip_disable(map, chip, cmd_adr);
1477
1478        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1479        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1480
1481        /* Write Buffer Load */
1482        map_write(map, CMD(0x25), cmd_adr);
1483
1484        chip->state = FL_WRITING_TO_BUFFER;
1485
1486        /* Write length of data to come */
1487        words = len / map_bankwidth(map);
1488        map_write(map, CMD(words - 1), cmd_adr);
1489        /* Write data */
1490        z = 0;
1491        while(z < words * map_bankwidth(map)) {
1492                datum = map_word_load(map, buf);
1493                map_write(map, datum, adr + z);
1494
1495                z += map_bankwidth(map);
1496                buf += map_bankwidth(map);
1497        }
1498        z -= map_bankwidth(map);
1499
1500        adr += z;
1501
1502        /* Write Buffer Program Confirm: GO GO GO */
1503        map_write(map, CMD(0x29), cmd_adr);
1504        chip->state = FL_WRITING;
1505
1506        INVALIDATE_CACHE_UDELAY(map, chip,
1507                                adr, map_bankwidth(map),
1508                                chip->word_write_time);
1509
1510        timeo = jiffies + uWriteTimeout;
1511
1512        for (;;) {
1513                if (chip->state != FL_WRITING) {
1514                        /* Someone's suspended the write. Sleep */
1515                        DECLARE_WAITQUEUE(wait, current);
1516
1517                        set_current_state(TASK_UNINTERRUPTIBLE);
1518                        add_wait_queue(&chip->wq, &wait);
1519                        mutex_unlock(&chip->mutex);
1520                        schedule();
1521                        remove_wait_queue(&chip->wq, &wait);
1522                        timeo = jiffies + (HZ / 2); /* FIXME */
1523                        mutex_lock(&chip->mutex);
1524                        continue;
1525                }
1526
1527                if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1528                        break;
1529
1530                if (chip_ready(map, adr)) {
1531                        xip_enable(map, chip, adr);
1532                        goto op_done;
1533                }
1534
1535                /* Latency issues. Drop the lock, wait a while and retry */
1536                UDELAY(map, chip, adr, 1);
1537        }
1538
1539        /* reset on all failures. */
1540        map_write( map, CMD(0xF0), chip->start );
1541        xip_enable(map, chip, adr);
1542        /* FIXME - should have reset delay before continuing */
1543
1544        printk(KERN_WARNING "MTD %s(): software timeout\n",
1545               __func__ );
1546
1547        ret = -EIO;
1548 op_done:
1549        chip->state = FL_READY;
1550        DISABLE_VPP(map);
1551        put_chip(map, chip, adr);
1552        mutex_unlock(&chip->mutex);
1553
1554        return ret;
1555}
1556
1557
1558static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1559                                    size_t *retlen, const u_char *buf)
1560{
1561        struct map_info *map = mtd->priv;
1562        struct cfi_private *cfi = map->fldrv_priv;
1563        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1564        int ret = 0;
1565        int chipnum;
1566        unsigned long ofs;
1567
1568        chipnum = to >> cfi->chipshift;
1569        ofs = to  - (chipnum << cfi->chipshift);
1570
1571        /* If it's not bus-aligned, do the first word write */
1572        if (ofs & (map_bankwidth(map)-1)) {
1573                size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1574                if (local_len > len)
1575                        local_len = len;
1576                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1577                                             local_len, retlen, buf);
1578                if (ret)
1579                        return ret;
1580                ofs += local_len;
1581                buf += local_len;
1582                len -= local_len;
1583
1584                if (ofs >> cfi->chipshift) {
1585                        chipnum ++;
1586                        ofs = 0;
1587                        if (chipnum == cfi->numchips)
1588                                return 0;
1589                }
1590        }
1591
1592        /* Write buffer is worth it only if more than one word to write... */
1593        while (len >= map_bankwidth(map) * 2) {
1594                /* We must not cross write block boundaries */
1595                int size = wbufsize - (ofs & (wbufsize-1));
1596
1597                if (size > len)
1598                        size = len;
1599                if (size % map_bankwidth(map))
1600                        size -= size % map_bankwidth(map);
1601
1602                ret = do_write_buffer(map, &cfi->chips[chipnum],
1603                                      ofs, buf, size);
1604                if (ret)
1605                        return ret;
1606
1607                ofs += size;
1608                buf += size;
1609                (*retlen) += size;
1610                len -= size;
1611
1612                if (ofs >> cfi->chipshift) {
1613                        chipnum ++;
1614                        ofs = 0;
1615                        if (chipnum == cfi->numchips)
1616                                return 0;
1617                }
1618        }
1619
1620        if (len) {
1621                size_t retlen_dregs = 0;
1622
1623                ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1624                                             len, &retlen_dregs, buf);
1625
1626                *retlen += retlen_dregs;
1627                return ret;
1628        }
1629
1630        return 0;
1631}
1632
1633/*
1634 * Wait for the flash chip to become ready to write data
1635 *
1636 * This is only called during the panic_write() path. When panic_write()
1637 * is called, the kernel is in the process of a panic, and will soon be
1638 * dead. Therefore we don't take any locks, and attempt to get access
1639 * to the chip as soon as possible.
1640 */
1641static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1642                                 unsigned long adr)
1643{
1644        struct cfi_private *cfi = map->fldrv_priv;
1645        int retries = 10;
1646        int i;
1647
1648        /*
1649         * If the driver thinks the chip is idle, and no toggle bits
1650         * are changing, then the chip is actually idle for sure.
1651         */
1652        if (chip->state == FL_READY && chip_ready(map, adr))
1653                return 0;
1654
1655        /*
1656         * Try several times to reset the chip and then wait for it
1657         * to become idle. The upper limit of a few milliseconds of
1658         * delay isn't a big problem: the kernel is dying anyway. It
1659         * is more important to save the messages.
1660         */
1661        while (retries > 0) {
1662                const unsigned long timeo = (HZ / 1000) + 1;
1663
1664                /* send the reset command */
1665                map_write(map, CMD(0xF0), chip->start);
1666
1667                /* wait for the chip to become ready */
1668                for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1669                        if (chip_ready(map, adr))
1670                                return 0;
1671
1672                        udelay(1);
1673                }
1674        }
1675
1676        /* the chip never became ready */
1677        return -EBUSY;
1678}
1679
1680/*
1681 * Write out one word of data to a single flash chip during a kernel panic
1682 *
1683 * This is only called during the panic_write() path. When panic_write()
1684 * is called, the kernel is in the process of a panic, and will soon be
1685 * dead. Therefore we don't take any locks, and attempt to get access
1686 * to the chip as soon as possible.
1687 *
1688 * The implementation of this routine is intentionally similar to
1689 * do_write_oneword(), in order to ease code maintenance.
1690 */
1691static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1692                                  unsigned long adr, map_word datum)
1693{
1694        const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1695        struct cfi_private *cfi = map->fldrv_priv;
1696        int retry_cnt = 0;
1697        map_word oldd;
1698        int ret = 0;
1699        int i;
1700
1701        adr += chip->start;
1702
1703        ret = cfi_amdstd_panic_wait(map, chip, adr);
1704        if (ret)
1705                return ret;
1706
1707        pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1708                        __func__, adr, datum.x[0]);
1709
1710        /*
1711         * Check for a NOP for the case when the datum to write is already
1712         * present - it saves time and works around buggy chips that corrupt
1713         * data at other locations when 0xff is written to a location that
1714         * already contains 0xff.
1715         */
1716        oldd = map_read(map, adr);
1717        if (map_word_equal(map, oldd, datum)) {
1718                pr_debug("MTD %s(): NOP\n", __func__);
1719                goto op_done;
1720        }
1721
1722        ENABLE_VPP(map);
1723
1724retry:
1725        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1726        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1727        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1728        map_write(map, datum, adr);
1729
1730        for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1731                if (chip_ready(map, adr))
1732                        break;
1733
1734                udelay(1);
1735        }
1736
1737        if (!chip_good(map, adr, datum)) {
1738                /* reset on all failures. */
1739                map_write(map, CMD(0xF0), chip->start);
1740                /* FIXME - should have reset delay before continuing */
1741
1742                if (++retry_cnt <= MAX_WORD_RETRIES)
1743                        goto retry;
1744
1745                ret = -EIO;
1746        }
1747
1748op_done:
1749        DISABLE_VPP(map);
1750        return ret;
1751}
1752
1753/*
1754 * Write out some data during a kernel panic
1755 *
1756 * This is used by the mtdoops driver to save the dying messages from a
1757 * kernel which has panic'd.
1758 *
1759 * This routine ignores all of the locking used throughout the rest of the
1760 * driver, in order to ensure that the data gets written out no matter what
1761 * state this driver (and the flash chip itself) was in when the kernel crashed.
1762 *
1763 * The implementation of this routine is intentionally similar to
1764 * cfi_amdstd_write_words(), in order to ease code maintenance.
1765 */
1766static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1767                                  size_t *retlen, const u_char *buf)
1768{
1769        struct map_info *map = mtd->priv;
1770        struct cfi_private *cfi = map->fldrv_priv;
1771        unsigned long ofs, chipstart;
1772        int ret = 0;
1773        int chipnum;
1774
1775        chipnum = to >> cfi->chipshift;
1776        ofs = to - (chipnum << cfi->chipshift);
1777        chipstart = cfi->chips[chipnum].start;
1778
1779        /* If it's not bus aligned, do the first byte write */
1780        if (ofs & (map_bankwidth(map) - 1)) {
1781                unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1782                int i = ofs - bus_ofs;
1783                int n = 0;
1784                map_word tmp_buf;
1785
1786                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1787                if (ret)
1788                        return ret;
1789
1790                /* Load 'tmp_buf' with old contents of flash */
1791                tmp_buf = map_read(map, bus_ofs + chipstart);
1792
1793                /* Number of bytes to copy from buffer */
1794                n = min_t(int, len, map_bankwidth(map) - i);
1795
1796                tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1797
1798                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1799                                             bus_ofs, tmp_buf);
1800                if (ret)
1801                        return ret;
1802
1803                ofs += n;
1804                buf += n;
1805                (*retlen) += n;
1806                len -= n;
1807
1808                if (ofs >> cfi->chipshift) {
1809                        chipnum++;
1810                        ofs = 0;
1811                        if (chipnum == cfi->numchips)
1812                                return 0;
1813                }
1814        }
1815
1816        /* We are now aligned, write as much as possible */
1817        while (len >= map_bankwidth(map)) {
1818                map_word datum;
1819
1820                datum = map_word_load(map, buf);
1821
1822                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1823                                             ofs, datum);
1824                if (ret)
1825                        return ret;
1826
1827                ofs += map_bankwidth(map);
1828                buf += map_bankwidth(map);
1829                (*retlen) += map_bankwidth(map);
1830                len -= map_bankwidth(map);
1831
1832                if (ofs >> cfi->chipshift) {
1833                        chipnum++;
1834                        ofs = 0;
1835                        if (chipnum == cfi->numchips)
1836                                return 0;
1837
1838                        chipstart = cfi->chips[chipnum].start;
1839                }
1840        }
1841
1842        /* Write the trailing bytes if any */
1843        if (len & (map_bankwidth(map) - 1)) {
1844                map_word tmp_buf;
1845
1846                ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1847                if (ret)
1848                        return ret;
1849
1850                tmp_buf = map_read(map, ofs + chipstart);
1851
1852                tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1853
1854                ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1855                                             ofs, tmp_buf);
1856                if (ret)
1857                        return ret;
1858
1859                (*retlen) += len;
1860        }
1861
1862        return 0;
1863}
1864
1865
1866/*
1867 * Handle devices with one erase region, that only implement
1868 * the chip erase command.
1869 */
1870static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1871{
1872        struct cfi_private *cfi = map->fldrv_priv;
1873        unsigned long timeo = jiffies + HZ;
1874        unsigned long int adr;
1875        DECLARE_WAITQUEUE(wait, current);
1876        int ret = 0;
1877
1878        adr = cfi->addr_unlock1;
1879
1880        mutex_lock(&chip->mutex);
1881        ret = get_chip(map, chip, adr, FL_WRITING);
1882        if (ret) {
1883                mutex_unlock(&chip->mutex);
1884                return ret;
1885        }
1886
1887        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1888               __func__, chip->start );
1889
1890        XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1891        ENABLE_VPP(map);
1892        xip_disable(map, chip, adr);
1893
1894        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1895        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1896        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1897        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1898        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1899        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1900
1901        chip->state = FL_ERASING;
1902        chip->erase_suspended = 0;
1903        chip->in_progress_block_addr = adr;
1904
1905        INVALIDATE_CACHE_UDELAY(map, chip,
1906                                adr, map->size,
1907                                chip->erase_time*500);
1908
1909        timeo = jiffies + (HZ*20);
1910
1911        for (;;) {
1912                if (chip->state != FL_ERASING) {
1913                        /* Someone's suspended the erase. Sleep */
1914                        set_current_state(TASK_UNINTERRUPTIBLE);
1915                        add_wait_queue(&chip->wq, &wait);
1916                        mutex_unlock(&chip->mutex);
1917                        schedule();
1918                        remove_wait_queue(&chip->wq, &wait);
1919                        mutex_lock(&chip->mutex);
1920                        continue;
1921                }
1922                if (chip->erase_suspended) {
1923                        /* This erase was suspended and resumed.
1924                           Adjust the timeout */
1925                        timeo = jiffies + (HZ*20); /* FIXME */
1926                        chip->erase_suspended = 0;
1927                }
1928
1929                if (chip_ready(map, adr))
1930                        break;
1931
1932                if (time_after(jiffies, timeo)) {
1933                        printk(KERN_WARNING "MTD %s(): software timeout\n",
1934                                __func__ );
1935                        break;
1936                }
1937
1938                /* Latency issues. Drop the lock, wait a while and retry */
1939                UDELAY(map, chip, adr, 1000000/HZ);
1940        }
1941        /* Did we succeed? */
1942        if (!chip_good(map, adr, map_word_ff(map))) {
1943                /* reset on all failures. */
1944                map_write( map, CMD(0xF0), chip->start );
1945                /* FIXME - should have reset delay before continuing */
1946
1947                ret = -EIO;
1948        }
1949
1950        chip->state = FL_READY;
1951        xip_enable(map, chip, adr);
1952        DISABLE_VPP(map);
1953        put_chip(map, chip, adr);
1954        mutex_unlock(&chip->mutex);
1955
1956        return ret;
1957}
1958
1959
1960static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1961{
1962        struct cfi_private *cfi = map->fldrv_priv;
1963        unsigned long timeo = jiffies + HZ;
1964        DECLARE_WAITQUEUE(wait, current);
1965        int ret = 0;
1966
1967        adr += chip->start;
1968
1969        mutex_lock(&chip->mutex);
1970        ret = get_chip(map, chip, adr, FL_ERASING);
1971        if (ret) {
1972                mutex_unlock(&chip->mutex);
1973                return ret;
1974        }
1975
1976        pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1977               __func__, adr );
1978
1979        XIP_INVAL_CACHED_RANGE(map, adr, len);
1980        ENABLE_VPP(map);
1981        xip_disable(map, chip, adr);
1982
1983        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1984        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1985        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1986        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1987        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1988        map_write(map, cfi->sector_erase_cmd, adr);
1989
1990        chip->state = FL_ERASING;
1991        chip->erase_suspended = 0;
1992        chip->in_progress_block_addr = adr;
1993
1994        INVALIDATE_CACHE_UDELAY(map, chip,
1995                                adr, len,
1996                                chip->erase_time*500);
1997
1998        timeo = jiffies + (HZ*20);
1999
2000        for (;;) {
2001                if (chip->state != FL_ERASING) {
2002                        /* Someone's suspended the erase. Sleep */
2003                        set_current_state(TASK_UNINTERRUPTIBLE);
2004                        add_wait_queue(&chip->wq, &wait);
2005                        mutex_unlock(&chip->mutex);
2006                        schedule();
2007                        remove_wait_queue(&chip->wq, &wait);
2008                        mutex_lock(&chip->mutex);
2009                        continue;
2010                }
2011                if (chip->erase_suspended) {
2012                        /* This erase was suspended and resumed.
2013                           Adjust the timeout */
2014                        timeo = jiffies + (HZ*20); /* FIXME */
2015                        chip->erase_suspended = 0;
2016                }
2017
2018                if (chip_ready(map, adr)) {
2019                        xip_enable(map, chip, adr);
2020                        break;
2021                }
2022
2023                if (time_after(jiffies, timeo)) {
2024                        xip_enable(map, chip, adr);
2025                        printk(KERN_WARNING "MTD %s(): software timeout\n",
2026                                __func__ );
2027                        break;
2028                }
2029
2030                /* Latency issues. Drop the lock, wait a while and retry */
2031                UDELAY(map, chip, adr, 1000000/HZ);
2032        }
2033        /* Did we succeed? */
2034        if (!chip_good(map, adr, map_word_ff(map))) {
2035                /* reset on all failures. */
2036                map_write( map, CMD(0xF0), chip->start );
2037                /* FIXME - should have reset delay before continuing */
2038
2039                ret = -EIO;
2040        }
2041
2042        chip->state = FL_READY;
2043        DISABLE_VPP(map);
2044        put_chip(map, chip, adr);
2045        mutex_unlock(&chip->mutex);
2046        return ret;
2047}
2048
2049
2050static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2051{
2052        unsigned long ofs, len;
2053        int ret;
2054
2055        ofs = instr->addr;
2056        len = instr->len;
2057
2058        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2059        if (ret)
2060                return ret;
2061
2062        instr->state = MTD_ERASE_DONE;
2063        mtd_erase_callback(instr);
2064
2065        return 0;
2066}
2067
2068
2069static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2070{
2071        struct map_info *map = mtd->priv;
2072        struct cfi_private *cfi = map->fldrv_priv;
2073        int ret = 0;
2074
2075        if (instr->addr != 0)
2076                return -EINVAL;
2077
2078        if (instr->len != mtd->size)
2079                return -EINVAL;
2080
2081        ret = do_erase_chip(map, &cfi->chips[0]);
2082        if (ret)
2083                return ret;
2084
2085        instr->state = MTD_ERASE_DONE;
2086        mtd_erase_callback(instr);
2087
2088        return 0;
2089}
2090
2091static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2092                         unsigned long adr, int len, void *thunk)
2093{
2094        struct cfi_private *cfi = map->fldrv_priv;
2095        int ret;
2096
2097        mutex_lock(&chip->mutex);
2098        ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2099        if (ret)
2100                goto out_unlock;
2101        chip->state = FL_LOCKING;
2102
2103        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2104
2105        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2106                         cfi->device_type, NULL);
2107        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2108                         cfi->device_type, NULL);
2109        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2110                         cfi->device_type, NULL);
2111        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2112                         cfi->device_type, NULL);
2113        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2114                         cfi->device_type, NULL);
2115        map_write(map, CMD(0x40), chip->start + adr);
2116
2117        chip->state = FL_READY;
2118        put_chip(map, chip, adr + chip->start);
2119        ret = 0;
2120
2121out_unlock:
2122        mutex_unlock(&chip->mutex);
2123        return ret;
2124}
2125
2126static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2127                           unsigned long adr, int len, void *thunk)
2128{
2129        struct cfi_private *cfi = map->fldrv_priv;
2130        int ret;
2131
2132        mutex_lock(&chip->mutex);
2133        ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2134        if (ret)
2135                goto out_unlock;
2136        chip->state = FL_UNLOCKING;
2137
2138        pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2139
2140        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2141                         cfi->device_type, NULL);
2142        map_write(map, CMD(0x70), adr);
2143
2144        chip->state = FL_READY;
2145        put_chip(map, chip, adr + chip->start);
2146        ret = 0;
2147
2148out_unlock:
2149        mutex_unlock(&chip->mutex);
2150        return ret;
2151}
2152
2153static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2154{
2155        return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2156}
2157
2158static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2159{
2160        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2161}
2162
2163
2164static void cfi_amdstd_sync (struct mtd_info *mtd)
2165{
2166        struct map_info *map = mtd->priv;
2167        struct cfi_private *cfi = map->fldrv_priv;
2168        int i;
2169        struct flchip *chip;
2170        int ret = 0;
2171        DECLARE_WAITQUEUE(wait, current);
2172
2173        for (i=0; !ret && i<cfi->numchips; i++) {
2174                chip = &cfi->chips[i];
2175
2176        retry:
2177                mutex_lock(&chip->mutex);
2178
2179                switch(chip->state) {
2180                case FL_READY:
2181                case FL_STATUS:
2182                case FL_CFI_QUERY:
2183                case FL_JEDEC_QUERY:
2184                        chip->oldstate = chip->state;
2185                        chip->state = FL_SYNCING;
2186                        /* No need to wake_up() on this state change -
2187                         * as the whole point is that nobody can do anything
2188                         * with the chip now anyway.
2189                         */
2190                case FL_SYNCING:
2191                        mutex_unlock(&chip->mutex);
2192                        break;
2193
2194                default:
2195                        /* Not an idle state */
2196                        set_current_state(TASK_UNINTERRUPTIBLE);
2197                        add_wait_queue(&chip->wq, &wait);
2198
2199                        mutex_unlock(&chip->mutex);
2200
2201                        schedule();
2202
2203                        remove_wait_queue(&chip->wq, &wait);
2204
2205                        goto retry;
2206                }
2207        }
2208
2209        /* Unlock the chips again */
2210
2211        for (i--; i >=0; i--) {
2212                chip = &cfi->chips[i];
2213
2214                mutex_lock(&chip->mutex);
2215
2216                if (chip->state == FL_SYNCING) {
2217                        chip->state = chip->oldstate;
2218                        wake_up(&chip->wq);
2219                }
2220                mutex_unlock(&chip->mutex);
2221        }
2222}
2223
2224
2225static int cfi_amdstd_suspend(struct mtd_info *mtd)
2226{
2227        struct map_info *map = mtd->priv;
2228        struct cfi_private *cfi = map->fldrv_priv;
2229        int i;
2230        struct flchip *chip;
2231        int ret = 0;
2232
2233        for (i=0; !ret && i<cfi->numchips; i++) {
2234                chip = &cfi->chips[i];
2235
2236                mutex_lock(&chip->mutex);
2237
2238                switch(chip->state) {
2239                case FL_READY:
2240                case FL_STATUS:
2241                case FL_CFI_QUERY:
2242                case FL_JEDEC_QUERY:
2243                        chip->oldstate = chip->state;
2244                        chip->state = FL_PM_SUSPENDED;
2245                        /* No need to wake_up() on this state change -
2246                         * as the whole point is that nobody can do anything
2247                         * with the chip now anyway.
2248                         */
2249                case FL_PM_SUSPENDED:
2250                        break;
2251
2252                default:
2253                        ret = -EAGAIN;
2254                        break;
2255                }
2256                mutex_unlock(&chip->mutex);
2257        }
2258
2259        /* Unlock the chips again */
2260
2261        if (ret) {
2262                for (i--; i >=0; i--) {
2263                        chip = &cfi->chips[i];
2264
2265                        mutex_lock(&chip->mutex);
2266
2267                        if (chip->state == FL_PM_SUSPENDED) {
2268                                chip->state = chip->oldstate;
2269                                wake_up(&chip->wq);
2270                        }
2271                        mutex_unlock(&chip->mutex);
2272                }
2273        }
2274
2275        return ret;
2276}
2277
2278
2279static void cfi_amdstd_resume(struct mtd_info *mtd)
2280{
2281        struct map_info *map = mtd->priv;
2282        struct cfi_private *cfi = map->fldrv_priv;
2283        int i;
2284        struct flchip *chip;
2285
2286        for (i=0; i<cfi->numchips; i++) {
2287
2288                chip = &cfi->chips[i];
2289
2290                mutex_lock(&chip->mutex);
2291
2292                if (chip->state == FL_PM_SUSPENDED) {
2293                        chip->state = FL_READY;
2294                        map_write(map, CMD(0xF0), chip->start);
2295                        wake_up(&chip->wq);
2296                }
2297                else
2298                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2299
2300                mutex_unlock(&chip->mutex);
2301        }
2302}
2303
2304
2305/*
2306 * Ensure that the flash device is put back into read array mode before
2307 * unloading the driver or rebooting.  On some systems, rebooting while
2308 * the flash is in query/program/erase mode will prevent the CPU from
2309 * fetching the bootloader code, requiring a hard reset or power cycle.
2310 */
2311static int cfi_amdstd_reset(struct mtd_info *mtd)
2312{
2313        struct map_info *map = mtd->priv;
2314        struct cfi_private *cfi = map->fldrv_priv;
2315        int i, ret;
2316        struct flchip *chip;
2317
2318        for (i = 0; i < cfi->numchips; i++) {
2319
2320                chip = &cfi->chips[i];
2321
2322                mutex_lock(&chip->mutex);
2323
2324                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2325                if (!ret) {
2326                        map_write(map, CMD(0xF0), chip->start);
2327                        chip->state = FL_SHUTDOWN;
2328                        put_chip(map, chip, chip->start);
2329                }
2330
2331                mutex_unlock(&chip->mutex);
2332        }
2333
2334        return 0;
2335}
2336
2337
2338static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2339                               void *v)
2340{
2341        struct mtd_info *mtd;
2342
2343        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2344        cfi_amdstd_reset(mtd);
2345        return NOTIFY_DONE;
2346}
2347
2348
2349static void cfi_amdstd_destroy(struct mtd_info *mtd)
2350{
2351        struct map_info *map = mtd->priv;
2352        struct cfi_private *cfi = map->fldrv_priv;
2353
2354        cfi_amdstd_reset(mtd);
2355        unregister_reboot_notifier(&mtd->reboot_notifier);
2356        kfree(cfi->cmdset_priv);
2357        kfree(cfi->cfiq);
2358        kfree(cfi);
2359        kfree(mtd->eraseregions);
2360}
2361
2362MODULE_LICENSE("GPL");
2363MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2364MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2365MODULE_ALIAS("cfi_cmdset_0006");
2366MODULE_ALIAS("cfi_cmdset_0701");
2367