linux/drivers/mtd/chips/cfi_cmdset_0001.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 *
   8 * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
   9 *      - completely revamped method functions so they are aware and
  10 *        independent of the flash geometry (buswidth, interleave, etc.)
  11 *      - scalability vs code size is completely set at compile-time
  12 *        (see include/linux/mtd/cfi.h for selection)
  13 *      - optimized write buffer method
  14 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  15 *      - reworked lock/unlock/erase support for var size flash
  16 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  17 *      - auto unlock sectors on resume for auto locking flash on power up
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <linux/init.h>
  25#include <asm/io.h>
  26#include <asm/byteorder.h>
  27
  28#include <linux/errno.h>
  29#include <linux/slab.h>
  30#include <linux/delay.h>
  31#include <linux/interrupt.h>
  32#include <linux/reboot.h>
  33#include <linux/bitmap.h>
  34#include <linux/mtd/xip.h>
  35#include <linux/mtd/map.h>
  36#include <linux/mtd/mtd.h>
  37#include <linux/mtd/compatmac.h>
  38#include <linux/mtd/cfi.h>
  39
  40/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  41/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  42
  43// debugging, turns off buffer write mode if set to 1
  44#define FORCE_WORD_WRITE 0
  45
  46#define MANUFACTURER_INTEL      0x0089
  47#define I82802AB        0x00ad
  48#define I82802AC        0x00ac
  49#define PF38F4476       0x881c
  50#define MANUFACTURER_ST         0x0020
  51#define M50LPW080       0x002F
  52#define M50FLW080A      0x0080
  53#define M50FLW080B      0x0081
  54#define AT49BV640D      0x02de
  55
  56static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  57static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  58static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  59static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  60static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  61static void cfi_intelext_sync (struct mtd_info *);
  62static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  63static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  64#ifdef CONFIG_MTD_OTP
  65static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  66static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  67static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  68static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  69static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
  70                                            struct otp_info *, size_t);
  71static int cfi_intelext_get_user_prot_info (struct mtd_info *,
  72                                            struct otp_info *, size_t);
  73#endif
  74static int cfi_intelext_suspend (struct mtd_info *);
  75static void cfi_intelext_resume (struct mtd_info *);
  76static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  77
  78static void cfi_intelext_destroy(struct mtd_info *);
  79
  80struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  81
  82static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  83static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  84
  85static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  86                     size_t *retlen, void **virt, resource_size_t *phys);
  87static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
  88
  89static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  90static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  91static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  92#include "fwh_lock.h"
  93
  94
  95
  96/*
  97 *  *********** SETUP AND PROBE BITS  ***********
  98 */
  99
 100static struct mtd_chip_driver cfi_intelext_chipdrv = {
 101        .probe          = NULL, /* Not usable directly */
 102        .destroy        = cfi_intelext_destroy,
 103        .name           = "cfi_cmdset_0001",
 104        .module         = THIS_MODULE
 105};
 106
 107/* #define DEBUG_LOCK_BITS */
 108/* #define DEBUG_CFI_FEATURES */
 109
 110#ifdef DEBUG_CFI_FEATURES
 111static void cfi_tell_features(struct cfi_pri_intelext *extp)
 112{
 113        int i;
 114        printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 115        printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 116        printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 117        printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 118        printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 119        printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 120        printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 121        printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 122        printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 123        printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 124        printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 125        printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 126        printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 127        for (i=11; i<32; i++) {
 128                if (extp->FeatureSupport & (1<<i))
 129                        printk("     - Unknown Bit %X:      supported\n", i);
 130        }
 131
 132        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 133        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 134        for (i=1; i<8; i++) {
 135                if (extp->SuspendCmdSupport & (1<<i))
 136                        printk("     - Unknown Bit %X:               supported\n", i);
 137        }
 138
 139        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 140        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 141        printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 142        for (i=2; i<3; i++) {
 143                if (extp->BlkStatusRegMask & (1<<i))
 144                        printk("     - Unknown Bit %X Active: yes\n",i);
 145        }
 146        printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 147        printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 148        for (i=6; i<16; i++) {
 149                if (extp->BlkStatusRegMask & (1<<i))
 150                        printk("     - Unknown Bit %X Active: yes\n",i);
 151        }
 152
 153        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 154               extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 155        if (extp->VppOptimal)
 156                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 157                       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 158}
 159#endif
 160
 161/* Atmel chips don't use the same PRI format as Intel chips */
 162static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
 163{
 164        struct map_info *map = mtd->priv;
 165        struct cfi_private *cfi = map->fldrv_priv;
 166        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 167        struct cfi_pri_atmel atmel_pri;
 168        uint32_t features = 0;
 169
 170        /* Reverse byteswapping */
 171        extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 172        extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 173        extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 174
 175        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 176        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 177
 178        printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 179
 180        if (atmel_pri.Features & 0x01) /* chip erase supported */
 181                features |= (1<<0);
 182        if (atmel_pri.Features & 0x02) /* erase suspend supported */
 183                features |= (1<<1);
 184        if (atmel_pri.Features & 0x04) /* program suspend supported */
 185                features |= (1<<2);
 186        if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 187                features |= (1<<9);
 188        if (atmel_pri.Features & 0x20) /* page mode read supported */
 189                features |= (1<<7);
 190        if (atmel_pri.Features & 0x40) /* queued erase supported */
 191                features |= (1<<4);
 192        if (atmel_pri.Features & 0x80) /* Protection bits supported */
 193                features |= (1<<6);
 194
 195        extp->FeatureSupport = features;
 196
 197        /* burst write mode not supported */
 198        cfi->cfiq->BufWriteTimeoutTyp = 0;
 199        cfi->cfiq->BufWriteTimeoutMax = 0;
 200}
 201
 202#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 203/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 204static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
 205{
 206        struct map_info *map = mtd->priv;
 207        struct cfi_private *cfi = map->fldrv_priv;
 208        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 209
 210        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 211                            "erase on write disabled.\n");
 212        extp->SuspendCmdSupport &= ~1;
 213}
 214#endif
 215
 216#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 217static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
 218{
 219        struct map_info *map = mtd->priv;
 220        struct cfi_private *cfi = map->fldrv_priv;
 221        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 222
 223        if (cfip && (cfip->FeatureSupport&4)) {
 224                cfip->FeatureSupport &= ~4;
 225                printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 226        }
 227}
 228#endif
 229
 230static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
 231{
 232        struct map_info *map = mtd->priv;
 233        struct cfi_private *cfi = map->fldrv_priv;
 234
 235        cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
 236        cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
 237}
 238
 239static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
 240{
 241        struct map_info *map = mtd->priv;
 242        struct cfi_private *cfi = map->fldrv_priv;
 243
 244        /* Note this is done after the region info is endian swapped */
 245        cfi->cfiq->EraseRegionInfo[1] =
 246                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 247};
 248
 249static void fixup_use_point(struct mtd_info *mtd, void *param)
 250{
 251        struct map_info *map = mtd->priv;
 252        if (!mtd->point && map_is_linear(map)) {
 253                mtd->point   = cfi_intelext_point;
 254                mtd->unpoint = cfi_intelext_unpoint;
 255        }
 256}
 257
 258static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
 259{
 260        struct map_info *map = mtd->priv;
 261        struct cfi_private *cfi = map->fldrv_priv;
 262        if (cfi->cfiq->BufWriteTimeoutTyp) {
 263                printk(KERN_INFO "Using buffer write method\n" );
 264                mtd->write = cfi_intelext_write_buffers;
 265                mtd->writev = cfi_intelext_writev;
 266        }
 267}
 268
 269/*
 270 * Some chips power-up with all sectors locked by default.
 271 */
 272static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
 273{
 274        struct map_info *map = mtd->priv;
 275        struct cfi_private *cfi = map->fldrv_priv;
 276        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 277
 278        if (cfip->FeatureSupport&32) {
 279                printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 280                mtd->flags |= MTD_POWERUP_LOCK;
 281        }
 282}
 283
 284static struct cfi_fixup cfi_fixup_table[] = {
 285        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 286#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 287        { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
 288#endif
 289#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 290        { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
 291#endif
 292#if !FORCE_WORD_WRITE
 293        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
 294#endif
 295        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
 296        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
 297        { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
 298        { 0, 0, NULL, NULL }
 299};
 300
 301static struct cfi_fixup jedec_fixup_table[] = {
 302        { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
 303        { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
 304        { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
 305        { MANUFACTURER_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
 306        { MANUFACTURER_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
 307        { 0, 0, NULL, NULL }
 308};
 309static struct cfi_fixup fixup_table[] = {
 310        /* The CFI vendor ids and the JEDEC vendor IDs appear
 311         * to be common.  It is like the devices id's are as
 312         * well.  This table is to pick all cases where
 313         * we know that is the case.
 314         */
 315        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
 316        { 0, 0, NULL, NULL }
 317};
 318
 319static void cfi_fixup_major_minor(struct cfi_private *cfi,
 320                                                struct cfi_pri_intelext *extp)
 321{
 322        if (cfi->mfr == MANUFACTURER_INTEL &&
 323                        cfi->id == PF38F4476 && extp->MinorVersion == '3')
 324                extp->MinorVersion = '1';
 325}
 326
 327static inline struct cfi_pri_intelext *
 328read_pri_intelext(struct map_info *map, __u16 adr)
 329{
 330        struct cfi_private *cfi = map->fldrv_priv;
 331        struct cfi_pri_intelext *extp;
 332        unsigned int extra_size = 0;
 333        unsigned int extp_size = sizeof(*extp);
 334
 335 again:
 336        extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 337        if (!extp)
 338                return NULL;
 339
 340        cfi_fixup_major_minor(cfi, extp);
 341
 342        if (extp->MajorVersion != '1' ||
 343            (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 344                printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 345                       "version %c.%c.\n",  extp->MajorVersion,
 346                       extp->MinorVersion);
 347                kfree(extp);
 348                return NULL;
 349        }
 350
 351        /* Do some byteswapping if necessary */
 352        extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 353        extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 354        extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 355
 356        if (extp->MinorVersion >= '0') {
 357                extra_size = 0;
 358
 359                /* Protection Register info */
 360                extra_size += (extp->NumProtectionFields - 1) *
 361                              sizeof(struct cfi_intelext_otpinfo);
 362        }
 363
 364        if (extp->MinorVersion >= '1') {
 365                /* Burst Read info */
 366                extra_size += 2;
 367                if (extp_size < sizeof(*extp) + extra_size)
 368                        goto need_more;
 369                extra_size += extp->extra[extra_size - 1];
 370        }
 371
 372        if (extp->MinorVersion >= '3') {
 373                int nb_parts, i;
 374
 375                /* Number of hardware-partitions */
 376                extra_size += 1;
 377                if (extp_size < sizeof(*extp) + extra_size)
 378                        goto need_more;
 379                nb_parts = extp->extra[extra_size - 1];
 380
 381                /* skip the sizeof(partregion) field in CFI 1.4 */
 382                if (extp->MinorVersion >= '4')
 383                        extra_size += 2;
 384
 385                for (i = 0; i < nb_parts; i++) {
 386                        struct cfi_intelext_regioninfo *rinfo;
 387                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 388                        extra_size += sizeof(*rinfo);
 389                        if (extp_size < sizeof(*extp) + extra_size)
 390                                goto need_more;
 391                        rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 392                        extra_size += (rinfo->NumBlockTypes - 1)
 393                                      * sizeof(struct cfi_intelext_blockinfo);
 394                }
 395
 396                if (extp->MinorVersion >= '4')
 397                        extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 398
 399                if (extp_size < sizeof(*extp) + extra_size) {
 400                        need_more:
 401                        extp_size = sizeof(*extp) + extra_size;
 402                        kfree(extp);
 403                        if (extp_size > 4096) {
 404                                printk(KERN_ERR
 405                                        "%s: cfi_pri_intelext is too fat\n",
 406                                        __func__);
 407                                return NULL;
 408                        }
 409                        goto again;
 410                }
 411        }
 412
 413        return extp;
 414}
 415
 416struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 417{
 418        struct cfi_private *cfi = map->fldrv_priv;
 419        struct mtd_info *mtd;
 420        int i;
 421
 422        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 423        if (!mtd) {
 424                printk(KERN_ERR "Failed to allocate memory for MTD device\n");
 425                return NULL;
 426        }
 427        mtd->priv = map;
 428        mtd->type = MTD_NORFLASH;
 429
 430        /* Fill in the default mtd operations */
 431        mtd->erase   = cfi_intelext_erase_varsize;
 432        mtd->read    = cfi_intelext_read;
 433        mtd->write   = cfi_intelext_write_words;
 434        mtd->sync    = cfi_intelext_sync;
 435        mtd->lock    = cfi_intelext_lock;
 436        mtd->unlock  = cfi_intelext_unlock;
 437        mtd->suspend = cfi_intelext_suspend;
 438        mtd->resume  = cfi_intelext_resume;
 439        mtd->flags   = MTD_CAP_NORFLASH;
 440        mtd->name    = map->name;
 441        mtd->writesize = 1;
 442
 443        mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 444
 445        if (cfi->cfi_mode == CFI_MODE_CFI) {
 446                /*
 447                 * It's a real CFI chip, not one for which the probe
 448                 * routine faked a CFI structure. So we read the feature
 449                 * table from it.
 450                 */
 451                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 452                struct cfi_pri_intelext *extp;
 453
 454                extp = read_pri_intelext(map, adr);
 455                if (!extp) {
 456                        kfree(mtd);
 457                        return NULL;
 458                }
 459
 460                /* Install our own private info structure */
 461                cfi->cmdset_priv = extp;
 462
 463                cfi_fixup(mtd, cfi_fixup_table);
 464
 465#ifdef DEBUG_CFI_FEATURES
 466                /* Tell the user about it in lots of lovely detail */
 467                cfi_tell_features(extp);
 468#endif
 469
 470                if(extp->SuspendCmdSupport & 1) {
 471                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 472                }
 473        }
 474        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 475                /* Apply jedec specific fixups */
 476                cfi_fixup(mtd, jedec_fixup_table);
 477        }
 478        /* Apply generic fixups */
 479        cfi_fixup(mtd, fixup_table);
 480
 481        for (i=0; i< cfi->numchips; i++) {
 482                if (cfi->cfiq->WordWriteTimeoutTyp)
 483                        cfi->chips[i].word_write_time =
 484                                1<<cfi->cfiq->WordWriteTimeoutTyp;
 485                else
 486                        cfi->chips[i].word_write_time = 50000;
 487
 488                if (cfi->cfiq->BufWriteTimeoutTyp)
 489                        cfi->chips[i].buffer_write_time =
 490                                1<<cfi->cfiq->BufWriteTimeoutTyp;
 491                /* No default; if it isn't specified, we won't use it */
 492
 493                if (cfi->cfiq->BlockEraseTimeoutTyp)
 494                        cfi->chips[i].erase_time =
 495                                1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 496                else
 497                        cfi->chips[i].erase_time = 2000000;
 498
 499                if (cfi->cfiq->WordWriteTimeoutTyp &&
 500                    cfi->cfiq->WordWriteTimeoutMax)
 501                        cfi->chips[i].word_write_time_max =
 502                                1<<(cfi->cfiq->WordWriteTimeoutTyp +
 503                                    cfi->cfiq->WordWriteTimeoutMax);
 504                else
 505                        cfi->chips[i].word_write_time_max = 50000 * 8;
 506
 507                if (cfi->cfiq->BufWriteTimeoutTyp &&
 508                    cfi->cfiq->BufWriteTimeoutMax)
 509                        cfi->chips[i].buffer_write_time_max =
 510                                1<<(cfi->cfiq->BufWriteTimeoutTyp +
 511                                    cfi->cfiq->BufWriteTimeoutMax);
 512
 513                if (cfi->cfiq->BlockEraseTimeoutTyp &&
 514                    cfi->cfiq->BlockEraseTimeoutMax)
 515                        cfi->chips[i].erase_time_max =
 516                                1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
 517                                       cfi->cfiq->BlockEraseTimeoutMax);
 518                else
 519                        cfi->chips[i].erase_time_max = 2000000 * 8;
 520
 521                cfi->chips[i].ref_point_counter = 0;
 522                init_waitqueue_head(&(cfi->chips[i].wq));
 523        }
 524
 525        map->fldrv = &cfi_intelext_chipdrv;
 526
 527        return cfi_intelext_setup(mtd);
 528}
 529struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 530struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 531EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 532EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 533EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 534
 535static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 536{
 537        struct map_info *map = mtd->priv;
 538        struct cfi_private *cfi = map->fldrv_priv;
 539        unsigned long offset = 0;
 540        int i,j;
 541        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 542
 543        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 544
 545        mtd->size = devsize * cfi->numchips;
 546
 547        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 548        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 549                        * mtd->numeraseregions, GFP_KERNEL);
 550        if (!mtd->eraseregions) {
 551                printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
 552                goto setup_err;
 553        }
 554
 555        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 556                unsigned long ernum, ersize;
 557                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 558                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 559
 560                if (mtd->erasesize < ersize) {
 561                        mtd->erasesize = ersize;
 562                }
 563                for (j=0; j<cfi->numchips; j++) {
 564                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 565                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 566                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 567                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 568                }
 569                offset += (ersize * ernum);
 570        }
 571
 572        if (offset != devsize) {
 573                /* Argh */
 574                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 575                goto setup_err;
 576        }
 577
 578        for (i=0; i<mtd->numeraseregions;i++){
 579                printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
 580                       i,(unsigned long long)mtd->eraseregions[i].offset,
 581                       mtd->eraseregions[i].erasesize,
 582                       mtd->eraseregions[i].numblocks);
 583        }
 584
 585#ifdef CONFIG_MTD_OTP
 586        mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 587        mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 588        mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 589        mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 590        mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 591        mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
 592#endif
 593
 594        /* This function has the potential to distort the reality
 595           a bit and therefore should be called last. */
 596        if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 597                goto setup_err;
 598
 599        __module_get(THIS_MODULE);
 600        register_reboot_notifier(&mtd->reboot_notifier);
 601        return mtd;
 602
 603 setup_err:
 604        if(mtd) {
 605                kfree(mtd->eraseregions);
 606                kfree(mtd);
 607        }
 608        kfree(cfi->cmdset_priv);
 609        return NULL;
 610}
 611
 612static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 613                                        struct cfi_private **pcfi)
 614{
 615        struct map_info *map = mtd->priv;
 616        struct cfi_private *cfi = *pcfi;
 617        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 618
 619        /*
 620         * Probing of multi-partition flash chips.
 621         *
 622         * To support multiple partitions when available, we simply arrange
 623         * for each of them to have their own flchip structure even if they
 624         * are on the same physical chip.  This means completely recreating
 625         * a new cfi_private structure right here which is a blatent code
 626         * layering violation, but this is still the least intrusive
 627         * arrangement at this point. This can be rearranged in the future
 628         * if someone feels motivated enough.  --nico
 629         */
 630        if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 631            && extp->FeatureSupport & (1 << 9)) {
 632                struct cfi_private *newcfi;
 633                struct flchip *chip;
 634                struct flchip_shared *shared;
 635                int offs, numregions, numparts, partshift, numvirtchips, i, j;
 636
 637                /* Protection Register info */
 638                offs = (extp->NumProtectionFields - 1) *
 639                       sizeof(struct cfi_intelext_otpinfo);
 640
 641                /* Burst Read info */
 642                offs += extp->extra[offs+1]+2;
 643
 644                /* Number of partition regions */
 645                numregions = extp->extra[offs];
 646                offs += 1;
 647
 648                /* skip the sizeof(partregion) field in CFI 1.4 */
 649                if (extp->MinorVersion >= '4')
 650                        offs += 2;
 651
 652                /* Number of hardware partitions */
 653                numparts = 0;
 654                for (i = 0; i < numregions; i++) {
 655                        struct cfi_intelext_regioninfo *rinfo;
 656                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 657                        numparts += rinfo->NumIdentPartitions;
 658                        offs += sizeof(*rinfo)
 659                                + (rinfo->NumBlockTypes - 1) *
 660                                  sizeof(struct cfi_intelext_blockinfo);
 661                }
 662
 663                if (!numparts)
 664                        numparts = 1;
 665
 666                /* Programming Region info */
 667                if (extp->MinorVersion >= '4') {
 668                        struct cfi_intelext_programming_regioninfo *prinfo;
 669                        prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 670                        mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 671                        mtd->flags &= ~MTD_BIT_WRITEABLE;
 672                        printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 673                               map->name, mtd->writesize,
 674                               cfi->interleave * prinfo->ControlValid,
 675                               cfi->interleave * prinfo->ControlInvalid);
 676                }
 677
 678                /*
 679                 * All functions below currently rely on all chips having
 680                 * the same geometry so we'll just assume that all hardware
 681                 * partitions are of the same size too.
 682                 */
 683                partshift = cfi->chipshift - __ffs(numparts);
 684
 685                if ((1 << partshift) < mtd->erasesize) {
 686                        printk( KERN_ERR
 687                                "%s: bad number of hw partitions (%d)\n",
 688                                __func__, numparts);
 689                        return -EINVAL;
 690                }
 691
 692                numvirtchips = cfi->numchips * numparts;
 693                newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
 694                if (!newcfi)
 695                        return -ENOMEM;
 696                shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
 697                if (!shared) {
 698                        kfree(newcfi);
 699                        return -ENOMEM;
 700                }
 701                memcpy(newcfi, cfi, sizeof(struct cfi_private));
 702                newcfi->numchips = numvirtchips;
 703                newcfi->chipshift = partshift;
 704
 705                chip = &newcfi->chips[0];
 706                for (i = 0; i < cfi->numchips; i++) {
 707                        shared[i].writing = shared[i].erasing = NULL;
 708                        spin_lock_init(&shared[i].lock);
 709                        for (j = 0; j < numparts; j++) {
 710                                *chip = cfi->chips[i];
 711                                chip->start += j << partshift;
 712                                chip->priv = &shared[i];
 713                                /* those should be reset too since
 714                                   they create memory references. */
 715                                init_waitqueue_head(&chip->wq);
 716                                spin_lock_init(&chip->_spinlock);
 717                                chip->mutex = &chip->_spinlock;
 718                                chip++;
 719                        }
 720                }
 721
 722                printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 723                                  "--> %d partitions of %d KiB\n",
 724                                  map->name, cfi->numchips, cfi->interleave,
 725                                  newcfi->numchips, 1<<(newcfi->chipshift-10));
 726
 727                map->fldrv_priv = newcfi;
 728                *pcfi = newcfi;
 729                kfree(cfi);
 730        }
 731
 732        return 0;
 733}
 734
 735/*
 736 *  *********** CHIP ACCESS FUNCTIONS ***********
 737 */
 738static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 739{
 740        DECLARE_WAITQUEUE(wait, current);
 741        struct cfi_private *cfi = map->fldrv_priv;
 742        map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 743        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 744        unsigned long timeo = jiffies + HZ;
 745
 746        /* Prevent setting state FL_SYNCING for chip in suspended state. */
 747        if (mode == FL_SYNCING && chip->oldstate != FL_READY)
 748                goto sleep;
 749
 750        switch (chip->state) {
 751
 752        case FL_STATUS:
 753                for (;;) {
 754                        status = map_read(map, adr);
 755                        if (map_word_andequal(map, status, status_OK, status_OK))
 756                                break;
 757
 758                        /* At this point we're fine with write operations
 759                           in other partitions as they don't conflict. */
 760                        if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 761                                break;
 762
 763                        spin_unlock(chip->mutex);
 764                        cfi_udelay(1);
 765                        spin_lock(chip->mutex);
 766                        /* Someone else might have been playing with it. */
 767                        return -EAGAIN;
 768                }
 769                /* Fall through */
 770        case FL_READY:
 771        case FL_CFI_QUERY:
 772        case FL_JEDEC_QUERY:
 773                return 0;
 774
 775        case FL_ERASING:
 776                if (!cfip ||
 777                    !(cfip->FeatureSupport & 2) ||
 778                    !(mode == FL_READY || mode == FL_POINT ||
 779                     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 780                        goto sleep;
 781
 782
 783                /* Erase suspend */
 784                map_write(map, CMD(0xB0), adr);
 785
 786                /* If the flash has finished erasing, then 'erase suspend'
 787                 * appears to make some (28F320) flash devices switch to
 788                 * 'read' mode.  Make sure that we switch to 'read status'
 789                 * mode so we get the right data. --rmk
 790                 */
 791                map_write(map, CMD(0x70), adr);
 792                chip->oldstate = FL_ERASING;
 793                chip->state = FL_ERASE_SUSPENDING;
 794                chip->erase_suspended = 1;
 795                for (;;) {
 796                        status = map_read(map, adr);
 797                        if (map_word_andequal(map, status, status_OK, status_OK))
 798                                break;
 799
 800                        if (time_after(jiffies, timeo)) {
 801                                /* Urgh. Resume and pretend we weren't here.  */
 802                                map_write(map, CMD(0xd0), adr);
 803                                /* Make sure we're in 'read status' mode if it had finished */
 804                                map_write(map, CMD(0x70), adr);
 805                                chip->state = FL_ERASING;
 806                                chip->oldstate = FL_READY;
 807                                printk(KERN_ERR "%s: Chip not ready after erase "
 808                                       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 809                                return -EIO;
 810                        }
 811
 812                        spin_unlock(chip->mutex);
 813                        cfi_udelay(1);
 814                        spin_lock(chip->mutex);
 815                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 816                           So we can just loop here. */
 817                }
 818                chip->state = FL_STATUS;
 819                return 0;
 820
 821        case FL_XIP_WHILE_ERASING:
 822                if (mode != FL_READY && mode != FL_POINT &&
 823                    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 824                        goto sleep;
 825                chip->oldstate = chip->state;
 826                chip->state = FL_READY;
 827                return 0;
 828
 829        case FL_SHUTDOWN:
 830                /* The machine is rebooting now,so no one can get chip anymore */
 831                return -EIO;
 832        case FL_POINT:
 833                /* Only if there's no operation suspended... */
 834                if (mode == FL_READY && chip->oldstate == FL_READY)
 835                        return 0;
 836                /* Fall through */
 837        default:
 838        sleep:
 839                set_current_state(TASK_UNINTERRUPTIBLE);
 840                add_wait_queue(&chip->wq, &wait);
 841                spin_unlock(chip->mutex);
 842                schedule();
 843                remove_wait_queue(&chip->wq, &wait);
 844                spin_lock(chip->mutex);
 845                return -EAGAIN;
 846        }
 847}
 848
 849static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 850{
 851        int ret;
 852        DECLARE_WAITQUEUE(wait, current);
 853
 854 retry:
 855        if (chip->priv &&
 856            (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
 857            || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 858                /*
 859                 * OK. We have possibility for contention on the write/erase
 860                 * operations which are global to the real chip and not per
 861                 * partition.  So let's fight it over in the partition which
 862                 * currently has authority on the operation.
 863                 *
 864                 * The rules are as follows:
 865                 *
 866                 * - any write operation must own shared->writing.
 867                 *
 868                 * - any erase operation must own _both_ shared->writing and
 869                 *   shared->erasing.
 870                 *
 871                 * - contention arbitration is handled in the owner's context.
 872                 *
 873                 * The 'shared' struct can be read and/or written only when
 874                 * its lock is taken.
 875                 */
 876                struct flchip_shared *shared = chip->priv;
 877                struct flchip *contender;
 878                spin_lock(&shared->lock);
 879                contender = shared->writing;
 880                if (contender && contender != chip) {
 881                        /*
 882                         * The engine to perform desired operation on this
 883                         * partition is already in use by someone else.
 884                         * Let's fight over it in the context of the chip
 885                         * currently using it.  If it is possible to suspend,
 886                         * that other partition will do just that, otherwise
 887                         * it'll happily send us to sleep.  In any case, when
 888                         * get_chip returns success we're clear to go ahead.
 889                         */
 890                        ret = spin_trylock(contender->mutex);
 891                        spin_unlock(&shared->lock);
 892                        if (!ret)
 893                                goto retry;
 894                        spin_unlock(chip->mutex);
 895                        ret = chip_ready(map, contender, contender->start, mode);
 896                        spin_lock(chip->mutex);
 897
 898                        if (ret == -EAGAIN) {
 899                                spin_unlock(contender->mutex);
 900                                goto retry;
 901                        }
 902                        if (ret) {
 903                                spin_unlock(contender->mutex);
 904                                return ret;
 905                        }
 906                        spin_lock(&shared->lock);
 907
 908                        /* We should not own chip if it is already
 909                         * in FL_SYNCING state. Put contender and retry. */
 910                        if (chip->state == FL_SYNCING) {
 911                                put_chip(map, contender, contender->start);
 912                                spin_unlock(contender->mutex);
 913                                goto retry;
 914                        }
 915                        spin_unlock(contender->mutex);
 916                }
 917
 918                /* Check if we already have suspended erase
 919                 * on this chip. Sleep. */
 920                if (mode == FL_ERASING && shared->erasing
 921                    && shared->erasing->oldstate == FL_ERASING) {
 922                        spin_unlock(&shared->lock);
 923                        set_current_state(TASK_UNINTERRUPTIBLE);
 924                        add_wait_queue(&chip->wq, &wait);
 925                        spin_unlock(chip->mutex);
 926                        schedule();
 927                        remove_wait_queue(&chip->wq, &wait);
 928                        spin_lock(chip->mutex);
 929                        goto retry;
 930                }
 931
 932                /* We now own it */
 933                shared->writing = chip;
 934                if (mode == FL_ERASING)
 935                        shared->erasing = chip;
 936                spin_unlock(&shared->lock);
 937        }
 938        ret = chip_ready(map, chip, adr, mode);
 939        if (ret == -EAGAIN)
 940                goto retry;
 941
 942        return ret;
 943}
 944
 945static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 946{
 947        struct cfi_private *cfi = map->fldrv_priv;
 948
 949        if (chip->priv) {
 950                struct flchip_shared *shared = chip->priv;
 951                spin_lock(&shared->lock);
 952                if (shared->writing == chip && chip->oldstate == FL_READY) {
 953                        /* We own the ability to write, but we're done */
 954                        shared->writing = shared->erasing;
 955                        if (shared->writing && shared->writing != chip) {
 956                                /* give back ownership to who we loaned it from */
 957                                struct flchip *loaner = shared->writing;
 958                                spin_lock(loaner->mutex);
 959                                spin_unlock(&shared->lock);
 960                                spin_unlock(chip->mutex);
 961                                put_chip(map, loaner, loaner->start);
 962                                spin_lock(chip->mutex);
 963                                spin_unlock(loaner->mutex);
 964                                wake_up(&chip->wq);
 965                                return;
 966                        }
 967                        shared->erasing = NULL;
 968                        shared->writing = NULL;
 969                } else if (shared->erasing == chip && shared->writing != chip) {
 970                        /*
 971                         * We own the ability to erase without the ability
 972                         * to write, which means the erase was suspended
 973                         * and some other partition is currently writing.
 974                         * Don't let the switch below mess things up since
 975                         * we don't have ownership to resume anything.
 976                         */
 977                        spin_unlock(&shared->lock);
 978                        wake_up(&chip->wq);
 979                        return;
 980                }
 981                spin_unlock(&shared->lock);
 982        }
 983
 984        switch(chip->oldstate) {
 985        case FL_ERASING:
 986                chip->state = chip->oldstate;
 987                /* What if one interleaved chip has finished and the
 988                   other hasn't? The old code would leave the finished
 989                   one in READY mode. That's bad, and caused -EROFS
 990                   errors to be returned from do_erase_oneblock because
 991                   that's the only bit it checked for at the time.
 992                   As the state machine appears to explicitly allow
 993                   sending the 0x70 (Read Status) command to an erasing
 994                   chip and expecting it to be ignored, that's what we
 995                   do. */
 996                map_write(map, CMD(0xd0), adr);
 997                map_write(map, CMD(0x70), adr);
 998                chip->oldstate = FL_READY;
 999                chip->state = FL_ERASING;
1000                break;
1001
1002        case FL_XIP_WHILE_ERASING:
1003                chip->state = chip->oldstate;
1004                chip->oldstate = FL_READY;
1005                break;
1006
1007        case FL_READY:
1008        case FL_STATUS:
1009        case FL_JEDEC_QUERY:
1010                /* We should really make set_vpp() count, rather than doing this */
1011                DISABLE_VPP(map);
1012                break;
1013        default:
1014                printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1015        }
1016        wake_up(&chip->wq);
1017}
1018
1019#ifdef CONFIG_MTD_XIP
1020
1021/*
1022 * No interrupt what so ever can be serviced while the flash isn't in array
1023 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1024 * enclosing any code path where the flash is known not to be in array mode.
1025 * And within a XIP disabled code path, only functions marked with __xipram
1026 * may be called and nothing else (it's a good thing to inspect generated
1027 * assembly to make sure inline functions were actually inlined and that gcc
1028 * didn't emit calls to its own support functions). Also configuring MTD CFI
1029 * support to a single buswidth and a single interleave is also recommended.
1030 */
1031
1032static void xip_disable(struct map_info *map, struct flchip *chip,
1033                        unsigned long adr)
1034{
1035        /* TODO: chips with no XIP use should ignore and return */
1036        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1037        local_irq_disable();
1038}
1039
1040static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1041                                unsigned long adr)
1042{
1043        struct cfi_private *cfi = map->fldrv_priv;
1044        if (chip->state != FL_POINT && chip->state != FL_READY) {
1045                map_write(map, CMD(0xff), adr);
1046                chip->state = FL_READY;
1047        }
1048        (void) map_read(map, adr);
1049        xip_iprefetch();
1050        local_irq_enable();
1051}
1052
1053/*
1054 * When a delay is required for the flash operation to complete, the
1055 * xip_wait_for_operation() function is polling for both the given timeout
1056 * and pending (but still masked) hardware interrupts.  Whenever there is an
1057 * interrupt pending then the flash erase or write operation is suspended,
1058 * array mode restored and interrupts unmasked.  Task scheduling might also
1059 * happen at that point.  The CPU eventually returns from the interrupt or
1060 * the call to schedule() and the suspended flash operation is resumed for
1061 * the remaining of the delay period.
1062 *
1063 * Warning: this function _will_ fool interrupt latency tracing tools.
1064 */
1065
1066static int __xipram xip_wait_for_operation(
1067                struct map_info *map, struct flchip *chip,
1068                unsigned long adr, unsigned int chip_op_time_max)
1069{
1070        struct cfi_private *cfi = map->fldrv_priv;
1071        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1072        map_word status, OK = CMD(0x80);
1073        unsigned long usec, suspended, start, done;
1074        flstate_t oldstate, newstate;
1075
1076        start = xip_currtime();
1077        usec = chip_op_time_max;
1078        if (usec == 0)
1079                usec = 500000;
1080        done = 0;
1081
1082        do {
1083                cpu_relax();
1084                if (xip_irqpending() && cfip &&
1085                    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1086                     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1087                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1088                        /*
1089                         * Let's suspend the erase or write operation when
1090                         * supported.  Note that we currently don't try to
1091                         * suspend interleaved chips if there is already
1092                         * another operation suspended (imagine what happens
1093                         * when one chip was already done with the current
1094                         * operation while another chip suspended it, then
1095                         * we resume the whole thing at once).  Yes, it
1096                         * can happen!
1097                         */
1098                        usec -= done;
1099                        map_write(map, CMD(0xb0), adr);
1100                        map_write(map, CMD(0x70), adr);
1101                        suspended = xip_currtime();
1102                        do {
1103                                if (xip_elapsed_since(suspended) > 100000) {
1104                                        /*
1105                                         * The chip doesn't want to suspend
1106                                         * after waiting for 100 msecs.
1107                                         * This is a critical error but there
1108                                         * is not much we can do here.
1109                                         */
1110                                        return -EIO;
1111                                }
1112                                status = map_read(map, adr);
1113                        } while (!map_word_andequal(map, status, OK, OK));
1114
1115                        /* Suspend succeeded */
1116                        oldstate = chip->state;
1117                        if (oldstate == FL_ERASING) {
1118                                if (!map_word_bitsset(map, status, CMD(0x40)))
1119                                        break;
1120                                newstate = FL_XIP_WHILE_ERASING;
1121                                chip->erase_suspended = 1;
1122                        } else {
1123                                if (!map_word_bitsset(map, status, CMD(0x04)))
1124                                        break;
1125                                newstate = FL_XIP_WHILE_WRITING;
1126                                chip->write_suspended = 1;
1127                        }
1128                        chip->state = newstate;
1129                        map_write(map, CMD(0xff), adr);
1130                        (void) map_read(map, adr);
1131                        xip_iprefetch();
1132                        local_irq_enable();
1133                        spin_unlock(chip->mutex);
1134                        xip_iprefetch();
1135                        cond_resched();
1136
1137                        /*
1138                         * We're back.  However someone else might have
1139                         * decided to go write to the chip if we are in
1140                         * a suspended erase state.  If so let's wait
1141                         * until it's done.
1142                         */
1143                        spin_lock(chip->mutex);
1144                        while (chip->state != newstate) {
1145                                DECLARE_WAITQUEUE(wait, current);
1146                                set_current_state(TASK_UNINTERRUPTIBLE);
1147                                add_wait_queue(&chip->wq, &wait);
1148                                spin_unlock(chip->mutex);
1149                                schedule();
1150                                remove_wait_queue(&chip->wq, &wait);
1151                                spin_lock(chip->mutex);
1152                        }
1153                        /* Disallow XIP again */
1154                        local_irq_disable();
1155
1156                        /* Resume the write or erase operation */
1157                        map_write(map, CMD(0xd0), adr);
1158                        map_write(map, CMD(0x70), adr);
1159                        chip->state = oldstate;
1160                        start = xip_currtime();
1161                } else if (usec >= 1000000/HZ) {
1162                        /*
1163                         * Try to save on CPU power when waiting delay
1164                         * is at least a system timer tick period.
1165                         * No need to be extremely accurate here.
1166                         */
1167                        xip_cpu_idle();
1168                }
1169                status = map_read(map, adr);
1170                done = xip_elapsed_since(start);
1171        } while (!map_word_andequal(map, status, OK, OK)
1172                 && done < usec);
1173
1174        return (done >= usec) ? -ETIME : 0;
1175}
1176
1177/*
1178 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1179 * the flash is actively programming or erasing since we have to poll for
1180 * the operation to complete anyway.  We can't do that in a generic way with
1181 * a XIP setup so do it before the actual flash operation in this case
1182 * and stub it out from INVAL_CACHE_AND_WAIT.
1183 */
1184#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1185        INVALIDATE_CACHED_RANGE(map, from, size)
1186
1187#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1188        xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1189
1190#else
1191
1192#define xip_disable(map, chip, adr)
1193#define xip_enable(map, chip, adr)
1194#define XIP_INVAL_CACHED_RANGE(x...)
1195#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1196
1197static int inval_cache_and_wait_for_operation(
1198                struct map_info *map, struct flchip *chip,
1199                unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1200                unsigned int chip_op_time, unsigned int chip_op_time_max)
1201{
1202        struct cfi_private *cfi = map->fldrv_priv;
1203        map_word status, status_OK = CMD(0x80);
1204        int chip_state = chip->state;
1205        unsigned int timeo, sleep_time, reset_timeo;
1206
1207        spin_unlock(chip->mutex);
1208        if (inval_len)
1209                INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1210        spin_lock(chip->mutex);
1211
1212        timeo = chip_op_time_max;
1213        if (!timeo)
1214                timeo = 500000;
1215        reset_timeo = timeo;
1216        sleep_time = chip_op_time / 2;
1217
1218        for (;;) {
1219                status = map_read(map, cmd_adr);
1220                if (map_word_andequal(map, status, status_OK, status_OK))
1221                        break;
1222
1223                if (!timeo) {
1224                        map_write(map, CMD(0x70), cmd_adr);
1225                        chip->state = FL_STATUS;
1226                        return -ETIME;
1227                }
1228
1229                /* OK Still waiting. Drop the lock, wait a while and retry. */
1230                spin_unlock(chip->mutex);
1231                if (sleep_time >= 1000000/HZ) {
1232                        /*
1233                         * Half of the normal delay still remaining
1234                         * can be performed with a sleeping delay instead
1235                         * of busy waiting.
1236                         */
1237                        msleep(sleep_time/1000);
1238                        timeo -= sleep_time;
1239                        sleep_time = 1000000/HZ;
1240                } else {
1241                        udelay(1);
1242                        cond_resched();
1243                        timeo--;
1244                }
1245                spin_lock(chip->mutex);
1246
1247                while (chip->state != chip_state) {
1248                        /* Someone's suspended the operation: sleep */
1249                        DECLARE_WAITQUEUE(wait, current);
1250                        set_current_state(TASK_UNINTERRUPTIBLE);
1251                        add_wait_queue(&chip->wq, &wait);
1252                        spin_unlock(chip->mutex);
1253                        schedule();
1254                        remove_wait_queue(&chip->wq, &wait);
1255                        spin_lock(chip->mutex);
1256                }
1257                if (chip->erase_suspended && chip_state == FL_ERASING)  {
1258                        /* Erase suspend occured while sleep: reset timeout */
1259                        timeo = reset_timeo;
1260                        chip->erase_suspended = 0;
1261                }
1262                if (chip->write_suspended && chip_state == FL_WRITING)  {
1263                        /* Write suspend occured while sleep: reset timeout */
1264                        timeo = reset_timeo;
1265                        chip->write_suspended = 0;
1266                }
1267        }
1268
1269        /* Done and happy. */
1270        chip->state = FL_STATUS;
1271        return 0;
1272}
1273
1274#endif
1275
1276#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1277        INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1278
1279
1280static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1281{
1282        unsigned long cmd_addr;
1283        struct cfi_private *cfi = map->fldrv_priv;
1284        int ret = 0;
1285
1286        adr += chip->start;
1287
1288        /* Ensure cmd read/writes are aligned. */
1289        cmd_addr = adr & ~(map_bankwidth(map)-1);
1290
1291        spin_lock(chip->mutex);
1292
1293        ret = get_chip(map, chip, cmd_addr, FL_POINT);
1294
1295        if (!ret) {
1296                if (chip->state != FL_POINT && chip->state != FL_READY)
1297                        map_write(map, CMD(0xff), cmd_addr);
1298
1299                chip->state = FL_POINT;
1300                chip->ref_point_counter++;
1301        }
1302        spin_unlock(chip->mutex);
1303
1304        return ret;
1305}
1306
1307static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1308                size_t *retlen, void **virt, resource_size_t *phys)
1309{
1310        struct map_info *map = mtd->priv;
1311        struct cfi_private *cfi = map->fldrv_priv;
1312        unsigned long ofs, last_end = 0;
1313        int chipnum;
1314        int ret = 0;
1315
1316        if (!map->virt || (from + len > mtd->size))
1317                return -EINVAL;
1318
1319        /* Now lock the chip(s) to POINT state */
1320
1321        /* ofs: offset within the first chip that the first read should start */
1322        chipnum = (from >> cfi->chipshift);
1323        ofs = from - (chipnum << cfi->chipshift);
1324
1325        *virt = map->virt + cfi->chips[chipnum].start + ofs;
1326        *retlen = 0;
1327        if (phys)
1328                *phys = map->phys + cfi->chips[chipnum].start + ofs;
1329
1330        while (len) {
1331                unsigned long thislen;
1332
1333                if (chipnum >= cfi->numchips)
1334                        break;
1335
1336                /* We cannot point across chips that are virtually disjoint */
1337                if (!last_end)
1338                        last_end = cfi->chips[chipnum].start;
1339                else if (cfi->chips[chipnum].start != last_end)
1340                        break;
1341
1342                if ((len + ofs -1) >> cfi->chipshift)
1343                        thislen = (1<<cfi->chipshift) - ofs;
1344                else
1345                        thislen = len;
1346
1347                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1348                if (ret)
1349                        break;
1350
1351                *retlen += thislen;
1352                len -= thislen;
1353
1354                ofs = 0;
1355                last_end += 1 << cfi->chipshift;
1356                chipnum++;
1357        }
1358        return 0;
1359}
1360
1361static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1362{
1363        struct map_info *map = mtd->priv;
1364        struct cfi_private *cfi = map->fldrv_priv;
1365        unsigned long ofs;
1366        int chipnum;
1367
1368        /* Now unlock the chip(s) POINT state */
1369
1370        /* ofs: offset within the first chip that the first read should start */
1371        chipnum = (from >> cfi->chipshift);
1372        ofs = from - (chipnum <<  cfi->chipshift);
1373
1374        while (len) {
1375                unsigned long thislen;
1376                struct flchip *chip;
1377
1378                chip = &cfi->chips[chipnum];
1379                if (chipnum >= cfi->numchips)
1380                        break;
1381
1382                if ((len + ofs -1) >> cfi->chipshift)
1383                        thislen = (1<<cfi->chipshift) - ofs;
1384                else
1385                        thislen = len;
1386
1387                spin_lock(chip->mutex);
1388                if (chip->state == FL_POINT) {
1389                        chip->ref_point_counter--;
1390                        if(chip->ref_point_counter == 0)
1391                                chip->state = FL_READY;
1392                } else
1393                        printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1394
1395                put_chip(map, chip, chip->start);
1396                spin_unlock(chip->mutex);
1397
1398                len -= thislen;
1399                ofs = 0;
1400                chipnum++;
1401        }
1402}
1403
1404static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1405{
1406        unsigned long cmd_addr;
1407        struct cfi_private *cfi = map->fldrv_priv;
1408        int ret;
1409
1410        adr += chip->start;
1411
1412        /* Ensure cmd read/writes are aligned. */
1413        cmd_addr = adr & ~(map_bankwidth(map)-1);
1414
1415        spin_lock(chip->mutex);
1416        ret = get_chip(map, chip, cmd_addr, FL_READY);
1417        if (ret) {
1418                spin_unlock(chip->mutex);
1419                return ret;
1420        }
1421
1422        if (chip->state != FL_POINT && chip->state != FL_READY) {
1423                map_write(map, CMD(0xff), cmd_addr);
1424
1425                chip->state = FL_READY;
1426        }
1427
1428        map_copy_from(map, buf, adr, len);
1429
1430        put_chip(map, chip, cmd_addr);
1431
1432        spin_unlock(chip->mutex);
1433        return 0;
1434}
1435
1436static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1437{
1438        struct map_info *map = mtd->priv;
1439        struct cfi_private *cfi = map->fldrv_priv;
1440        unsigned long ofs;
1441        int chipnum;
1442        int ret = 0;
1443
1444        /* ofs: offset within the first chip that the first read should start */
1445        chipnum = (from >> cfi->chipshift);
1446        ofs = from - (chipnum <<  cfi->chipshift);
1447
1448        *retlen = 0;
1449
1450        while (len) {
1451                unsigned long thislen;
1452
1453                if (chipnum >= cfi->numchips)
1454                        break;
1455
1456                if ((len + ofs -1) >> cfi->chipshift)
1457                        thislen = (1<<cfi->chipshift) - ofs;
1458                else
1459                        thislen = len;
1460
1461                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1462                if (ret)
1463                        break;
1464
1465                *retlen += thislen;
1466                len -= thislen;
1467                buf += thislen;
1468
1469                ofs = 0;
1470                chipnum++;
1471        }
1472        return ret;
1473}
1474
1475static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1476                                     unsigned long adr, map_word datum, int mode)
1477{
1478        struct cfi_private *cfi = map->fldrv_priv;
1479        map_word status, write_cmd;
1480        int ret=0;
1481
1482        adr += chip->start;
1483
1484        switch (mode) {
1485        case FL_WRITING:
1486                write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1487                break;
1488        case FL_OTP_WRITE:
1489                write_cmd = CMD(0xc0);
1490                break;
1491        default:
1492                return -EINVAL;
1493        }
1494
1495        spin_lock(chip->mutex);
1496        ret = get_chip(map, chip, adr, mode);
1497        if (ret) {
1498                spin_unlock(chip->mutex);
1499                return ret;
1500        }
1501
1502        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1503        ENABLE_VPP(map);
1504        xip_disable(map, chip, adr);
1505        map_write(map, write_cmd, adr);
1506        map_write(map, datum, adr);
1507        chip->state = mode;
1508
1509        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1510                                   adr, map_bankwidth(map),
1511                                   chip->word_write_time,
1512                                   chip->word_write_time_max);
1513        if (ret) {
1514                xip_enable(map, chip, adr);
1515                printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1516                goto out;
1517        }
1518
1519        /* check for errors */
1520        status = map_read(map, adr);
1521        if (map_word_bitsset(map, status, CMD(0x1a))) {
1522                unsigned long chipstatus = MERGESTATUS(status);
1523
1524                /* reset status */
1525                map_write(map, CMD(0x50), adr);
1526                map_write(map, CMD(0x70), adr);
1527                xip_enable(map, chip, adr);
1528
1529                if (chipstatus & 0x02) {
1530                        ret = -EROFS;
1531                } else if (chipstatus & 0x08) {
1532                        printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1533                        ret = -EIO;
1534                } else {
1535                        printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1536                        ret = -EINVAL;
1537                }
1538
1539                goto out;
1540        }
1541
1542        xip_enable(map, chip, adr);
1543 out:   put_chip(map, chip, adr);
1544        spin_unlock(chip->mutex);
1545        return ret;
1546}
1547
1548
1549static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1550{
1551        struct map_info *map = mtd->priv;
1552        struct cfi_private *cfi = map->fldrv_priv;
1553        int ret = 0;
1554        int chipnum;
1555        unsigned long ofs;
1556
1557        *retlen = 0;
1558        if (!len)
1559                return 0;
1560
1561        chipnum = to >> cfi->chipshift;
1562        ofs = to  - (chipnum << cfi->chipshift);
1563
1564        /* If it's not bus-aligned, do the first byte write */
1565        if (ofs & (map_bankwidth(map)-1)) {
1566                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1567                int gap = ofs - bus_ofs;
1568                int n;
1569                map_word datum;
1570
1571                n = min_t(int, len, map_bankwidth(map)-gap);
1572                datum = map_word_ff(map);
1573                datum = map_word_load_partial(map, datum, buf, gap, n);
1574
1575                ret = do_write_oneword(map, &cfi->chips[chipnum],
1576                                               bus_ofs, datum, FL_WRITING);
1577                if (ret)
1578                        return ret;
1579
1580                len -= n;
1581                ofs += n;
1582                buf += n;
1583                (*retlen) += n;
1584
1585                if (ofs >> cfi->chipshift) {
1586                        chipnum ++;
1587                        ofs = 0;
1588                        if (chipnum == cfi->numchips)
1589                                return 0;
1590                }
1591        }
1592
1593        while(len >= map_bankwidth(map)) {
1594                map_word datum = map_word_load(map, buf);
1595
1596                ret = do_write_oneword(map, &cfi->chips[chipnum],
1597                                       ofs, datum, FL_WRITING);
1598                if (ret)
1599                        return ret;
1600
1601                ofs += map_bankwidth(map);
1602                buf += map_bankwidth(map);
1603                (*retlen) += map_bankwidth(map);
1604                len -= map_bankwidth(map);
1605
1606                if (ofs >> cfi->chipshift) {
1607                        chipnum ++;
1608                        ofs = 0;
1609                        if (chipnum == cfi->numchips)
1610                                return 0;
1611                }
1612        }
1613
1614        if (len & (map_bankwidth(map)-1)) {
1615                map_word datum;
1616
1617                datum = map_word_ff(map);
1618                datum = map_word_load_partial(map, datum, buf, 0, len);
1619
1620                ret = do_write_oneword(map, &cfi->chips[chipnum],
1621                                       ofs, datum, FL_WRITING);
1622                if (ret)
1623                        return ret;
1624
1625                (*retlen) += len;
1626        }
1627
1628        return 0;
1629}
1630
1631
1632static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1633                                    unsigned long adr, const struct kvec **pvec,
1634                                    unsigned long *pvec_seek, int len)
1635{
1636        struct cfi_private *cfi = map->fldrv_priv;
1637        map_word status, write_cmd, datum;
1638        unsigned long cmd_adr;
1639        int ret, wbufsize, word_gap, words;
1640        const struct kvec *vec;
1641        unsigned long vec_seek;
1642        unsigned long initial_adr;
1643        int initial_len = len;
1644
1645        wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1646        adr += chip->start;
1647        initial_adr = adr;
1648        cmd_adr = adr & ~(wbufsize-1);
1649
1650        /* Let's determine this according to the interleave only once */
1651        write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1652
1653        spin_lock(chip->mutex);
1654        ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1655        if (ret) {
1656                spin_unlock(chip->mutex);
1657                return ret;
1658        }
1659
1660        XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1661        ENABLE_VPP(map);
1662        xip_disable(map, chip, cmd_adr);
1663
1664        /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1665           [...], the device will not accept any more Write to Buffer commands".
1666           So we must check here and reset those bits if they're set. Otherwise
1667           we're just pissing in the wind */
1668        if (chip->state != FL_STATUS) {
1669                map_write(map, CMD(0x70), cmd_adr);
1670                chip->state = FL_STATUS;
1671        }
1672        status = map_read(map, cmd_adr);
1673        if (map_word_bitsset(map, status, CMD(0x30))) {
1674                xip_enable(map, chip, cmd_adr);
1675                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1676                xip_disable(map, chip, cmd_adr);
1677                map_write(map, CMD(0x50), cmd_adr);
1678                map_write(map, CMD(0x70), cmd_adr);
1679        }
1680
1681        chip->state = FL_WRITING_TO_BUFFER;
1682        map_write(map, write_cmd, cmd_adr);
1683        ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1684        if (ret) {
1685                /* Argh. Not ready for write to buffer */
1686                map_word Xstatus = map_read(map, cmd_adr);
1687                map_write(map, CMD(0x70), cmd_adr);
1688                chip->state = FL_STATUS;
1689                status = map_read(map, cmd_adr);
1690                map_write(map, CMD(0x50), cmd_adr);
1691                map_write(map, CMD(0x70), cmd_adr);
1692                xip_enable(map, chip, cmd_adr);
1693                printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1694                                map->name, Xstatus.x[0], status.x[0]);
1695                goto out;
1696        }
1697
1698        /* Figure out the number of words to write */
1699        word_gap = (-adr & (map_bankwidth(map)-1));
1700        words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1701        if (!word_gap) {
1702                words--;
1703        } else {
1704                word_gap = map_bankwidth(map) - word_gap;
1705                adr -= word_gap;
1706                datum = map_word_ff(map);
1707        }
1708
1709        /* Write length of data to come */
1710        map_write(map, CMD(words), cmd_adr );
1711
1712        /* Write data */
1713        vec = *pvec;
1714        vec_seek = *pvec_seek;
1715        do {
1716                int n = map_bankwidth(map) - word_gap;
1717                if (n > vec->iov_len - vec_seek)
1718                        n = vec->iov_len - vec_seek;
1719                if (n > len)
1720                        n = len;
1721
1722                if (!word_gap && len < map_bankwidth(map))
1723                        datum = map_word_ff(map);
1724
1725                datum = map_word_load_partial(map, datum,
1726                                              vec->iov_base + vec_seek,
1727                                              word_gap, n);
1728
1729                len -= n;
1730                word_gap += n;
1731                if (!len || word_gap == map_bankwidth(map)) {
1732                        map_write(map, datum, adr);
1733                        adr += map_bankwidth(map);
1734                        word_gap = 0;
1735                }
1736
1737                vec_seek += n;
1738                if (vec_seek == vec->iov_len) {
1739                        vec++;
1740                        vec_seek = 0;
1741                }
1742        } while (len);
1743        *pvec = vec;
1744        *pvec_seek = vec_seek;
1745
1746        /* GO GO GO */
1747        map_write(map, CMD(0xd0), cmd_adr);
1748        chip->state = FL_WRITING;
1749
1750        ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1751                                   initial_adr, initial_len,
1752                                   chip->buffer_write_time,
1753                                   chip->buffer_write_time_max);
1754        if (ret) {
1755                map_write(map, CMD(0x70), cmd_adr);
1756                chip->state = FL_STATUS;
1757                xip_enable(map, chip, cmd_adr);
1758                printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1759                goto out;
1760        }
1761
1762        /* check for errors */
1763        status = map_read(map, cmd_adr);
1764        if (map_word_bitsset(map, status, CMD(0x1a))) {
1765                unsigned long chipstatus = MERGESTATUS(status);
1766
1767                /* reset status */
1768                map_write(map, CMD(0x50), cmd_adr);
1769                map_write(map, CMD(0x70), cmd_adr);
1770                xip_enable(map, chip, cmd_adr);
1771
1772                if (chipstatus & 0x02) {
1773                        ret = -EROFS;
1774                } else if (chipstatus & 0x08) {
1775                        printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1776                        ret = -EIO;
1777                } else {
1778                        printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1779                        ret = -EINVAL;
1780                }
1781
1782                goto out;
1783        }
1784
1785        xip_enable(map, chip, cmd_adr);
1786 out:   put_chip(map, chip, cmd_adr);
1787        spin_unlock(chip->mutex);
1788        return ret;
1789}
1790
1791static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1792                                unsigned long count, loff_t to, size_t *retlen)
1793{
1794        struct map_info *map = mtd->priv;
1795        struct cfi_private *cfi = map->fldrv_priv;
1796        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1797        int ret = 0;
1798        int chipnum;
1799        unsigned long ofs, vec_seek, i;
1800        size_t len = 0;
1801
1802        for (i = 0; i < count; i++)
1803                len += vecs[i].iov_len;
1804
1805        *retlen = 0;
1806        if (!len)
1807                return 0;
1808
1809        chipnum = to >> cfi->chipshift;
1810        ofs = to - (chipnum << cfi->chipshift);
1811        vec_seek = 0;
1812
1813        do {
1814                /* We must not cross write block boundaries */
1815                int size = wbufsize - (ofs & (wbufsize-1));
1816
1817                if (size > len)
1818                        size = len;
1819                ret = do_write_buffer(map, &cfi->chips[chipnum],
1820                                      ofs, &vecs, &vec_seek, size);
1821                if (ret)
1822                        return ret;
1823
1824                ofs += size;
1825                (*retlen) += size;
1826                len -= size;
1827
1828                if (ofs >> cfi->chipshift) {
1829                        chipnum ++;
1830                        ofs = 0;
1831                        if (chipnum == cfi->numchips)
1832                                return 0;
1833                }
1834
1835                /* Be nice and reschedule with the chip in a usable state for other
1836                   processes. */
1837                cond_resched();
1838
1839        } while (len);
1840
1841        return 0;
1842}
1843
1844static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1845                                       size_t len, size_t *retlen, const u_char *buf)
1846{
1847        struct kvec vec;
1848
1849        vec.iov_base = (void *) buf;
1850        vec.iov_len = len;
1851
1852        return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1853}
1854
1855static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1856                                      unsigned long adr, int len, void *thunk)
1857{
1858        struct cfi_private *cfi = map->fldrv_priv;
1859        map_word status;
1860        int retries = 3;
1861        int ret;
1862
1863        adr += chip->start;
1864
1865 retry:
1866        spin_lock(chip->mutex);
1867        ret = get_chip(map, chip, adr, FL_ERASING);
1868        if (ret) {
1869                spin_unlock(chip->mutex);
1870                return ret;
1871        }
1872
1873        XIP_INVAL_CACHED_RANGE(map, adr, len);
1874        ENABLE_VPP(map);
1875        xip_disable(map, chip, adr);
1876
1877        /* Clear the status register first */
1878        map_write(map, CMD(0x50), adr);
1879
1880        /* Now erase */
1881        map_write(map, CMD(0x20), adr);
1882        map_write(map, CMD(0xD0), adr);
1883        chip->state = FL_ERASING;
1884        chip->erase_suspended = 0;
1885
1886        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1887                                   adr, len,
1888                                   chip->erase_time,
1889                                   chip->erase_time_max);
1890        if (ret) {
1891                map_write(map, CMD(0x70), adr);
1892                chip->state = FL_STATUS;
1893                xip_enable(map, chip, adr);
1894                printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1895                goto out;
1896        }
1897
1898        /* We've broken this before. It doesn't hurt to be safe */
1899        map_write(map, CMD(0x70), adr);
1900        chip->state = FL_STATUS;
1901        status = map_read(map, adr);
1902
1903        /* check for errors */
1904        if (map_word_bitsset(map, status, CMD(0x3a))) {
1905                unsigned long chipstatus = MERGESTATUS(status);
1906
1907                /* Reset the error bits */
1908                map_write(map, CMD(0x50), adr);
1909                map_write(map, CMD(0x70), adr);
1910                xip_enable(map, chip, adr);
1911
1912                if ((chipstatus & 0x30) == 0x30) {
1913                        printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1914                        ret = -EINVAL;
1915                } else if (chipstatus & 0x02) {
1916                        /* Protection bit set */
1917                        ret = -EROFS;
1918                } else if (chipstatus & 0x8) {
1919                        /* Voltage */
1920                        printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1921                        ret = -EIO;
1922                } else if (chipstatus & 0x20 && retries--) {
1923                        printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1924                        put_chip(map, chip, adr);
1925                        spin_unlock(chip->mutex);
1926                        goto retry;
1927                } else {
1928                        printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1929                        ret = -EIO;
1930                }
1931
1932                goto out;
1933        }
1934
1935        xip_enable(map, chip, adr);
1936 out:   put_chip(map, chip, adr);
1937        spin_unlock(chip->mutex);
1938        return ret;
1939}
1940
1941static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1942{
1943        unsigned long ofs, len;
1944        int ret;
1945
1946        ofs = instr->addr;
1947        len = instr->len;
1948
1949        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1950        if (ret)
1951                return ret;
1952
1953        instr->state = MTD_ERASE_DONE;
1954        mtd_erase_callback(instr);
1955
1956        return 0;
1957}
1958
1959static void cfi_intelext_sync (struct mtd_info *mtd)
1960{
1961        struct map_info *map = mtd->priv;
1962        struct cfi_private *cfi = map->fldrv_priv;
1963        int i;
1964        struct flchip *chip;
1965        int ret = 0;
1966
1967        for (i=0; !ret && i<cfi->numchips; i++) {
1968                chip = &cfi->chips[i];
1969
1970                spin_lock(chip->mutex);
1971                ret = get_chip(map, chip, chip->start, FL_SYNCING);
1972
1973                if (!ret) {
1974                        chip->oldstate = chip->state;
1975                        chip->state = FL_SYNCING;
1976                        /* No need to wake_up() on this state change -
1977                         * as the whole point is that nobody can do anything
1978                         * with the chip now anyway.
1979                         */
1980                }
1981                spin_unlock(chip->mutex);
1982        }
1983
1984        /* Unlock the chips again */
1985
1986        for (i--; i >=0; i--) {
1987                chip = &cfi->chips[i];
1988
1989                spin_lock(chip->mutex);
1990
1991                if (chip->state == FL_SYNCING) {
1992                        chip->state = chip->oldstate;
1993                        chip->oldstate = FL_READY;
1994                        wake_up(&chip->wq);
1995                }
1996                spin_unlock(chip->mutex);
1997        }
1998}
1999
2000static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2001                                                struct flchip *chip,
2002                                                unsigned long adr,
2003                                                int len, void *thunk)
2004{
2005        struct cfi_private *cfi = map->fldrv_priv;
2006        int status, ofs_factor = cfi->interleave * cfi->device_type;
2007
2008        adr += chip->start;
2009        xip_disable(map, chip, adr+(2*ofs_factor));
2010        map_write(map, CMD(0x90), adr+(2*ofs_factor));
2011        chip->state = FL_JEDEC_QUERY;
2012        status = cfi_read_query(map, adr+(2*ofs_factor));
2013        xip_enable(map, chip, 0);
2014        return status;
2015}
2016
2017#ifdef DEBUG_LOCK_BITS
2018static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2019                                                struct flchip *chip,
2020                                                unsigned long adr,
2021                                                int len, void *thunk)
2022{
2023        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2024               adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2025        return 0;
2026}
2027#endif
2028
2029#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2030#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2031
2032static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2033                                       unsigned long adr, int len, void *thunk)
2034{
2035        struct cfi_private *cfi = map->fldrv_priv;
2036        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2037        int udelay;
2038        int ret;
2039
2040        adr += chip->start;
2041
2042        spin_lock(chip->mutex);
2043        ret = get_chip(map, chip, adr, FL_LOCKING);
2044        if (ret) {
2045                spin_unlock(chip->mutex);
2046                return ret;
2047        }
2048
2049        ENABLE_VPP(map);
2050        xip_disable(map, chip, adr);
2051
2052        map_write(map, CMD(0x60), adr);
2053        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2054                map_write(map, CMD(0x01), adr);
2055                chip->state = FL_LOCKING;
2056        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2057                map_write(map, CMD(0xD0), adr);
2058                chip->state = FL_UNLOCKING;
2059        } else
2060                BUG();
2061
2062        /*
2063         * If Instant Individual Block Locking supported then no need
2064         * to delay.
2065         */
2066        udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2067
2068        ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2069        if (ret) {
2070                map_write(map, CMD(0x70), adr);
2071                chip->state = FL_STATUS;
2072                xip_enable(map, chip, adr);
2073                printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2074                goto out;
2075        }
2076
2077        xip_enable(map, chip, adr);
2078out:    put_chip(map, chip, adr);
2079        spin_unlock(chip->mutex);
2080        return ret;
2081}
2082
2083static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2084{
2085        int ret;
2086
2087#ifdef DEBUG_LOCK_BITS
2088        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2089               __func__, ofs, len);
2090        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2091                ofs, len, NULL);
2092#endif
2093
2094        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2095                ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2096
2097#ifdef DEBUG_LOCK_BITS
2098        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2099               __func__, ret);
2100        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2101                ofs, len, NULL);
2102#endif
2103
2104        return ret;
2105}
2106
2107static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2108{
2109        int ret;
2110
2111#ifdef DEBUG_LOCK_BITS
2112        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2113               __func__, ofs, len);
2114        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2115                ofs, len, NULL);
2116#endif
2117
2118        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2119                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2120
2121#ifdef DEBUG_LOCK_BITS
2122        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2123               __func__, ret);
2124        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2125                ofs, len, NULL);
2126#endif
2127
2128        return ret;
2129}
2130
2131#ifdef CONFIG_MTD_OTP
2132
2133typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2134                        u_long data_offset, u_char *buf, u_int size,
2135                        u_long prot_offset, u_int groupno, u_int groupsize);
2136
2137static int __xipram
2138do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2139            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2140{
2141        struct cfi_private *cfi = map->fldrv_priv;
2142        int ret;
2143
2144        spin_lock(chip->mutex);
2145        ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2146        if (ret) {
2147                spin_unlock(chip->mutex);
2148                return ret;
2149        }
2150
2151        /* let's ensure we're not reading back cached data from array mode */
2152        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2153
2154        xip_disable(map, chip, chip->start);
2155        if (chip->state != FL_JEDEC_QUERY) {
2156                map_write(map, CMD(0x90), chip->start);
2157                chip->state = FL_JEDEC_QUERY;
2158        }
2159        map_copy_from(map, buf, chip->start + offset, size);
2160        xip_enable(map, chip, chip->start);
2161
2162        /* then ensure we don't keep OTP data in the cache */
2163        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2164
2165        put_chip(map, chip, chip->start);
2166        spin_unlock(chip->mutex);
2167        return 0;
2168}
2169
2170static int
2171do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2172             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2173{
2174        int ret;
2175
2176        while (size) {
2177                unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2178                int gap = offset - bus_ofs;
2179                int n = min_t(int, size, map_bankwidth(map)-gap);
2180                map_word datum = map_word_ff(map);
2181
2182                datum = map_word_load_partial(map, datum, buf, gap, n);
2183                ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2184                if (ret)
2185                        return ret;
2186
2187                offset += n;
2188                buf += n;
2189                size -= n;
2190        }
2191
2192        return 0;
2193}
2194
2195static int
2196do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2197            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2198{
2199        struct cfi_private *cfi = map->fldrv_priv;
2200        map_word datum;
2201
2202        /* make sure area matches group boundaries */
2203        if (size != grpsz)
2204                return -EXDEV;
2205
2206        datum = map_word_ff(map);
2207        datum = map_word_clr(map, datum, CMD(1 << grpno));
2208        return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2209}
2210
2211static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2212                                 size_t *retlen, u_char *buf,
2213                                 otp_op_t action, int user_regs)
2214{
2215        struct map_info *map = mtd->priv;
2216        struct cfi_private *cfi = map->fldrv_priv;
2217        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2218        struct flchip *chip;
2219        struct cfi_intelext_otpinfo *otp;
2220        u_long devsize, reg_prot_offset, data_offset;
2221        u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2222        u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2223        int ret;
2224
2225        *retlen = 0;
2226
2227        /* Check that we actually have some OTP registers */
2228        if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2229                return -ENODATA;
2230
2231        /* we need real chips here not virtual ones */
2232        devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2233        chip_step = devsize >> cfi->chipshift;
2234        chip_num = 0;
2235
2236        /* Some chips have OTP located in the _top_ partition only.
2237           For example: Intel 28F256L18T (T means top-parameter device) */
2238        if (cfi->mfr == MANUFACTURER_INTEL) {
2239                switch (cfi->id) {
2240                case 0x880b:
2241                case 0x880c:
2242                case 0x880d:
2243                        chip_num = chip_step - 1;
2244                }
2245        }
2246
2247        for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2248                chip = &cfi->chips[chip_num];
2249                otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2250
2251                /* first OTP region */
2252                field = 0;
2253                reg_prot_offset = extp->ProtRegAddr;
2254                reg_fact_groups = 1;
2255                reg_fact_size = 1 << extp->FactProtRegSize;
2256                reg_user_groups = 1;
2257                reg_user_size = 1 << extp->UserProtRegSize;
2258
2259                while (len > 0) {
2260                        /* flash geometry fixup */
2261                        data_offset = reg_prot_offset + 1;
2262                        data_offset *= cfi->interleave * cfi->device_type;
2263                        reg_prot_offset *= cfi->interleave * cfi->device_type;
2264                        reg_fact_size *= cfi->interleave;
2265                        reg_user_size *= cfi->interleave;
2266
2267                        if (user_regs) {
2268                                groups = reg_user_groups;
2269                                groupsize = reg_user_size;
2270                                /* skip over factory reg area */
2271                                groupno = reg_fact_groups;
2272                                data_offset += reg_fact_groups * reg_fact_size;
2273                        } else {
2274                                groups = reg_fact_groups;
2275                                groupsize = reg_fact_size;
2276                                groupno = 0;
2277                        }
2278
2279                        while (len > 0 && groups > 0) {
2280                                if (!action) {
2281                                        /*
2282                                         * Special case: if action is NULL
2283                                         * we fill buf with otp_info records.
2284                                         */
2285                                        struct otp_info *otpinfo;
2286                                        map_word lockword;
2287                                        len -= sizeof(struct otp_info);
2288                                        if (len <= 0)
2289                                                return -ENOSPC;
2290                                        ret = do_otp_read(map, chip,
2291                                                          reg_prot_offset,
2292                                                          (u_char *)&lockword,
2293                                                          map_bankwidth(map),
2294                                                          0, 0,  0);
2295                                        if (ret)
2296                                                return ret;
2297                                        otpinfo = (struct otp_info *)buf;
2298                                        otpinfo->start = from;
2299                                        otpinfo->length = groupsize;
2300                                        otpinfo->locked =
2301                                           !map_word_bitsset(map, lockword,
2302                                                             CMD(1 << groupno));
2303                                        from += groupsize;
2304                                        buf += sizeof(*otpinfo);
2305                                        *retlen += sizeof(*otpinfo);
2306                                } else if (from >= groupsize) {
2307                                        from -= groupsize;
2308                                        data_offset += groupsize;
2309                                } else {
2310                                        int size = groupsize;
2311                                        data_offset += from;
2312                                        size -= from;
2313                                        from = 0;
2314                                        if (size > len)
2315                                                size = len;
2316                                        ret = action(map, chip, data_offset,
2317                                                     buf, size, reg_prot_offset,
2318                                                     groupno, groupsize);
2319                                        if (ret < 0)
2320                                                return ret;
2321                                        buf += size;
2322                                        len -= size;
2323                                        *retlen += size;
2324                                        data_offset += size;
2325                                }
2326                                groupno++;
2327                                groups--;
2328                        }
2329
2330                        /* next OTP region */
2331                        if (++field == extp->NumProtectionFields)
2332                                break;
2333                        reg_prot_offset = otp->ProtRegAddr;
2334                        reg_fact_groups = otp->FactGroups;
2335                        reg_fact_size = 1 << otp->FactProtRegSize;
2336                        reg_user_groups = otp->UserGroups;
2337                        reg_user_size = 1 << otp->UserProtRegSize;
2338                        otp++;
2339                }
2340        }
2341
2342        return 0;
2343}
2344
2345static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2346                                           size_t len, size_t *retlen,
2347                                            u_char *buf)
2348{
2349        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2350                                     buf, do_otp_read, 0);
2351}
2352
2353static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2354                                           size_t len, size_t *retlen,
2355                                            u_char *buf)
2356{
2357        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2358                                     buf, do_otp_read, 1);
2359}
2360
2361static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2362                                            size_t len, size_t *retlen,
2363                                             u_char *buf)
2364{
2365        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2366                                     buf, do_otp_write, 1);
2367}
2368
2369static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2370                                           loff_t from, size_t len)
2371{
2372        size_t retlen;
2373        return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2374                                     NULL, do_otp_lock, 1);
2375}
2376
2377static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2378                                           struct otp_info *buf, size_t len)
2379{
2380        size_t retlen;
2381        int ret;
2382
2383        ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2384        return ret ? : retlen;
2385}
2386
2387static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2388                                           struct otp_info *buf, size_t len)
2389{
2390        size_t retlen;
2391        int ret;
2392
2393        ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2394        return ret ? : retlen;
2395}
2396
2397#endif
2398
2399static void cfi_intelext_save_locks(struct mtd_info *mtd)
2400{
2401        struct mtd_erase_region_info *region;
2402        int block, status, i;
2403        unsigned long adr;
2404        size_t len;
2405
2406        for (i = 0; i < mtd->numeraseregions; i++) {
2407                region = &mtd->eraseregions[i];
2408                if (!region->lockmap)
2409                        continue;
2410
2411                for (block = 0; block < region->numblocks; block++){
2412                        len = region->erasesize;
2413                        adr = region->offset + block * len;
2414
2415                        status = cfi_varsize_frob(mtd,
2416                                        do_getlockstatus_oneblock, adr, len, NULL);
2417                        if (status)
2418                                set_bit(block, region->lockmap);
2419                        else
2420                                clear_bit(block, region->lockmap);
2421                }
2422        }
2423}
2424
2425static int cfi_intelext_suspend(struct mtd_info *mtd)
2426{
2427        struct map_info *map = mtd->priv;
2428        struct cfi_private *cfi = map->fldrv_priv;
2429        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2430        int i;
2431        struct flchip *chip;
2432        int ret = 0;
2433
2434        if ((mtd->flags & MTD_POWERUP_LOCK)
2435            && extp && (extp->FeatureSupport & (1 << 5)))
2436                cfi_intelext_save_locks(mtd);
2437
2438        for (i=0; !ret && i<cfi->numchips; i++) {
2439                chip = &cfi->chips[i];
2440
2441                spin_lock(chip->mutex);
2442
2443                switch (chip->state) {
2444                case FL_READY:
2445                case FL_STATUS:
2446                case FL_CFI_QUERY:
2447                case FL_JEDEC_QUERY:
2448                        if (chip->oldstate == FL_READY) {
2449                                /* place the chip in a known state before suspend */
2450                                map_write(map, CMD(0xFF), cfi->chips[i].start);
2451                                chip->oldstate = chip->state;
2452                                chip->state = FL_PM_SUSPENDED;
2453                                /* No need to wake_up() on this state change -
2454                                 * as the whole point is that nobody can do anything
2455                                 * with the chip now anyway.
2456                                 */
2457                        } else {
2458                                /* There seems to be an operation pending. We must wait for it. */
2459                                printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2460                                ret = -EAGAIN;
2461                        }
2462                        break;
2463                default:
2464                        /* Should we actually wait? Once upon a time these routines weren't
2465                           allowed to. Or should we return -EAGAIN, because the upper layers
2466                           ought to have already shut down anything which was using the device
2467                           anyway? The latter for now. */
2468                        printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2469                        ret = -EAGAIN;
2470                case FL_PM_SUSPENDED:
2471                        break;
2472                }
2473                spin_unlock(chip->mutex);
2474        }
2475
2476        /* Unlock the chips again */
2477
2478        if (ret) {
2479                for (i--; i >=0; i--) {
2480                        chip = &cfi->chips[i];
2481
2482                        spin_lock(chip->mutex);
2483
2484                        if (chip->state == FL_PM_SUSPENDED) {
2485                                /* No need to force it into a known state here,
2486                                   because we're returning failure, and it didn't
2487                                   get power cycled */
2488                                chip->state = chip->oldstate;
2489                                chip->oldstate = FL_READY;
2490                                wake_up(&chip->wq);
2491                        }
2492                        spin_unlock(chip->mutex);
2493                }
2494        }
2495
2496        return ret;
2497}
2498
2499static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2500{
2501        struct mtd_erase_region_info *region;
2502        int block, i;
2503        unsigned long adr;
2504        size_t len;
2505
2506        for (i = 0; i < mtd->numeraseregions; i++) {
2507                region = &mtd->eraseregions[i];
2508                if (!region->lockmap)
2509                        continue;
2510
2511                for (block = 0; block < region->numblocks; block++) {
2512                        len = region->erasesize;
2513                        adr = region->offset + block * len;
2514
2515                        if (!test_bit(block, region->lockmap))
2516                                cfi_intelext_unlock(mtd, adr, len);
2517                }
2518        }
2519}
2520
2521static void cfi_intelext_resume(struct mtd_info *mtd)
2522{
2523        struct map_info *map = mtd->priv;
2524        struct cfi_private *cfi = map->fldrv_priv;
2525        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2526        int i;
2527        struct flchip *chip;
2528
2529        for (i=0; i<cfi->numchips; i++) {
2530
2531                chip = &cfi->chips[i];
2532
2533                spin_lock(chip->mutex);
2534
2535                /* Go to known state. Chip may have been power cycled */
2536                if (chip->state == FL_PM_SUSPENDED) {
2537                        map_write(map, CMD(0xFF), cfi->chips[i].start);
2538                        chip->oldstate = chip->state = FL_READY;
2539                        wake_up(&chip->wq);
2540                }
2541
2542                spin_unlock(chip->mutex);
2543        }
2544
2545        if ((mtd->flags & MTD_POWERUP_LOCK)
2546            && extp && (extp->FeatureSupport & (1 << 5)))
2547                cfi_intelext_restore_locks(mtd);
2548}
2549
2550static int cfi_intelext_reset(struct mtd_info *mtd)
2551{
2552        struct map_info *map = mtd->priv;
2553        struct cfi_private *cfi = map->fldrv_priv;
2554        int i, ret;
2555
2556        for (i=0; i < cfi->numchips; i++) {
2557                struct flchip *chip = &cfi->chips[i];
2558
2559                /* force the completion of any ongoing operation
2560                   and switch to array mode so any bootloader in
2561                   flash is accessible for soft reboot. */
2562                spin_lock(chip->mutex);
2563                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2564                if (!ret) {
2565                        map_write(map, CMD(0xff), chip->start);
2566                        chip->state = FL_SHUTDOWN;
2567                }
2568                spin_unlock(chip->mutex);
2569        }
2570
2571        return 0;
2572}
2573
2574static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2575                               void *v)
2576{
2577        struct mtd_info *mtd;
2578
2579        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2580        cfi_intelext_reset(mtd);
2581        return NOTIFY_DONE;
2582}
2583
2584static void cfi_intelext_destroy(struct mtd_info *mtd)
2585{
2586        struct map_info *map = mtd->priv;
2587        struct cfi_private *cfi = map->fldrv_priv;
2588        struct mtd_erase_region_info *region;
2589        int i;
2590        cfi_intelext_reset(mtd);
2591        unregister_reboot_notifier(&mtd->reboot_notifier);
2592        kfree(cfi->cmdset_priv);
2593        kfree(cfi->cfiq);
2594        kfree(cfi->chips[0].priv);
2595        kfree(cfi);
2596        for (i = 0; i < mtd->numeraseregions; i++) {
2597                region = &mtd->eraseregions[i];
2598                if (region->lockmap)
2599                        kfree(region->lockmap);
2600        }
2601        kfree(mtd->eraseregions);
2602}
2603
2604MODULE_LICENSE("GPL");
2605MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2606MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2607MODULE_ALIAS("cfi_cmdset_0003");
2608MODULE_ALIAS("cfi_cmdset_0200");
2609