linux/drivers/mtd/chips/cfi_cmdset_0001.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 *
   8 * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
   9 *      - completely revamped method functions so they are aware and
  10 *        independent of the flash geometry (buswidth, interleave, etc.)
  11 *      - scalability vs code size is completely set at compile-time
  12 *        (see include/linux/mtd/cfi.h for selection)
  13 *      - optimized write buffer method
  14 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  15 *      - reworked lock/unlock/erase support for var size flash
  16 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  17 *      - auto unlock sectors on resume for auto locking flash on power up
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <linux/init.h>
  25#include <asm/io.h>
  26#include <asm/byteorder.h>
  27
  28#include <linux/errno.h>
  29#include <linux/slab.h>
  30#include <linux/delay.h>
  31#include <linux/interrupt.h>
  32#include <linux/reboot.h>
  33#include <linux/bitmap.h>
  34#include <linux/mtd/xip.h>
  35#include <linux/mtd/map.h>
  36#include <linux/mtd/mtd.h>
  37#include <linux/mtd/cfi.h>
  38
  39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  41
  42// debugging, turns off buffer write mode if set to 1
  43#define FORCE_WORD_WRITE 0
  44
  45/* Intel chips */
  46#define I82802AB        0x00ad
  47#define I82802AC        0x00ac
  48#define PF38F4476       0x881c
  49/* STMicroelectronics chips */
  50#define M50LPW080       0x002F
  51#define M50FLW080A      0x0080
  52#define M50FLW080B      0x0081
  53/* Atmel chips */
  54#define AT49BV640D      0x02de
  55#define AT49BV640DT     0x02db
  56
  57static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  58static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  59static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  60static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  61static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  62static void cfi_intelext_sync (struct mtd_info *);
  63static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  64static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  65static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
  66                                  uint64_t len);
  67#ifdef CONFIG_MTD_OTP
  68static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  69static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  70static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  71static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  72static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
  73                                            struct otp_info *, size_t);
  74static int cfi_intelext_get_user_prot_info (struct mtd_info *,
  75                                            struct otp_info *, size_t);
  76#endif
  77static int cfi_intelext_suspend (struct mtd_info *);
  78static void cfi_intelext_resume (struct mtd_info *);
  79static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  80
  81static void cfi_intelext_destroy(struct mtd_info *);
  82
  83struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  84
  85static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  86static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  87
  88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  89                     size_t *retlen, void **virt, resource_size_t *phys);
  90static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
  91
  92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  94static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  95#include "fwh_lock.h"
  96
  97
  98
  99/*
 100 *  *********** SETUP AND PROBE BITS  ***********
 101 */
 102
 103static struct mtd_chip_driver cfi_intelext_chipdrv = {
 104        .probe          = NULL, /* Not usable directly */
 105        .destroy        = cfi_intelext_destroy,
 106        .name           = "cfi_cmdset_0001",
 107        .module         = THIS_MODULE
 108};
 109
 110/* #define DEBUG_LOCK_BITS */
 111/* #define DEBUG_CFI_FEATURES */
 112
 113#ifdef DEBUG_CFI_FEATURES
 114static void cfi_tell_features(struct cfi_pri_intelext *extp)
 115{
 116        int i;
 117        printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 118        printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 119        printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 120        printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 121        printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 122        printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 123        printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 124        printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 125        printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 126        printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 127        printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 128        printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 129        printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 130        for (i=11; i<32; i++) {
 131                if (extp->FeatureSupport & (1<<i))
 132                        printk("     - Unknown Bit %X:      supported\n", i);
 133        }
 134
 135        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 136        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 137        for (i=1; i<8; i++) {
 138                if (extp->SuspendCmdSupport & (1<<i))
 139                        printk("     - Unknown Bit %X:               supported\n", i);
 140        }
 141
 142        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 143        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 144        printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 145        for (i=2; i<3; i++) {
 146                if (extp->BlkStatusRegMask & (1<<i))
 147                        printk("     - Unknown Bit %X Active: yes\n",i);
 148        }
 149        printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 150        printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 151        for (i=6; i<16; i++) {
 152                if (extp->BlkStatusRegMask & (1<<i))
 153                        printk("     - Unknown Bit %X Active: yes\n",i);
 154        }
 155
 156        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 157               extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 158        if (extp->VppOptimal)
 159                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 160                       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 161}
 162#endif
 163
 164/* Atmel chips don't use the same PRI format as Intel chips */
 165static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 166{
 167        struct map_info *map = mtd->priv;
 168        struct cfi_private *cfi = map->fldrv_priv;
 169        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 170        struct cfi_pri_atmel atmel_pri;
 171        uint32_t features = 0;
 172
 173        /* Reverse byteswapping */
 174        extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 175        extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 176        extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 177
 178        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 179        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 180
 181        printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 182
 183        if (atmel_pri.Features & 0x01) /* chip erase supported */
 184                features |= (1<<0);
 185        if (atmel_pri.Features & 0x02) /* erase suspend supported */
 186                features |= (1<<1);
 187        if (atmel_pri.Features & 0x04) /* program suspend supported */
 188                features |= (1<<2);
 189        if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 190                features |= (1<<9);
 191        if (atmel_pri.Features & 0x20) /* page mode read supported */
 192                features |= (1<<7);
 193        if (atmel_pri.Features & 0x40) /* queued erase supported */
 194                features |= (1<<4);
 195        if (atmel_pri.Features & 0x80) /* Protection bits supported */
 196                features |= (1<<6);
 197
 198        extp->FeatureSupport = features;
 199
 200        /* burst write mode not supported */
 201        cfi->cfiq->BufWriteTimeoutTyp = 0;
 202        cfi->cfiq->BufWriteTimeoutMax = 0;
 203}
 204
 205static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 206{
 207        struct map_info *map = mtd->priv;
 208        struct cfi_private *cfi = map->fldrv_priv;
 209        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 210
 211        cfip->FeatureSupport |= (1 << 5);
 212        mtd->flags |= MTD_POWERUP_LOCK;
 213}
 214
 215#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 216/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 217static void fixup_intel_strataflash(struct mtd_info *mtd)
 218{
 219        struct map_info *map = mtd->priv;
 220        struct cfi_private *cfi = map->fldrv_priv;
 221        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 222
 223        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 224                            "erase on write disabled.\n");
 225        extp->SuspendCmdSupport &= ~1;
 226}
 227#endif
 228
 229#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 230static void fixup_no_write_suspend(struct mtd_info *mtd)
 231{
 232        struct map_info *map = mtd->priv;
 233        struct cfi_private *cfi = map->fldrv_priv;
 234        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 235
 236        if (cfip && (cfip->FeatureSupport&4)) {
 237                cfip->FeatureSupport &= ~4;
 238                printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 239        }
 240}
 241#endif
 242
 243static void fixup_st_m28w320ct(struct mtd_info *mtd)
 244{
 245        struct map_info *map = mtd->priv;
 246        struct cfi_private *cfi = map->fldrv_priv;
 247
 248        cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
 249        cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
 250}
 251
 252static void fixup_st_m28w320cb(struct mtd_info *mtd)
 253{
 254        struct map_info *map = mtd->priv;
 255        struct cfi_private *cfi = map->fldrv_priv;
 256
 257        /* Note this is done after the region info is endian swapped */
 258        cfi->cfiq->EraseRegionInfo[1] =
 259                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 260};
 261
 262static void fixup_use_point(struct mtd_info *mtd)
 263{
 264        struct map_info *map = mtd->priv;
 265        if (!mtd->_point && map_is_linear(map)) {
 266                mtd->_point   = cfi_intelext_point;
 267                mtd->_unpoint = cfi_intelext_unpoint;
 268        }
 269}
 270
 271static void fixup_use_write_buffers(struct mtd_info *mtd)
 272{
 273        struct map_info *map = mtd->priv;
 274        struct cfi_private *cfi = map->fldrv_priv;
 275        if (cfi->cfiq->BufWriteTimeoutTyp) {
 276                printk(KERN_INFO "Using buffer write method\n" );
 277                mtd->_write = cfi_intelext_write_buffers;
 278                mtd->_writev = cfi_intelext_writev;
 279        }
 280}
 281
 282/*
 283 * Some chips power-up with all sectors locked by default.
 284 */
 285static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 286{
 287        struct map_info *map = mtd->priv;
 288        struct cfi_private *cfi = map->fldrv_priv;
 289        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 290
 291        if (cfip->FeatureSupport&32) {
 292                printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 293                mtd->flags |= MTD_POWERUP_LOCK;
 294        }
 295}
 296
 297static struct cfi_fixup cfi_fixup_table[] = {
 298        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 299        { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
 300        { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 301#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 302        { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 303#endif
 304#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 305        { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 306#endif
 307#if !FORCE_WORD_WRITE
 308        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 309#endif
 310        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
 311        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
 312        { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
 313        { 0, 0, NULL }
 314};
 315
 316static struct cfi_fixup jedec_fixup_table[] = {
 317        { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
 318        { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
 319        { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
 320        { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
 321        { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
 322        { 0, 0, NULL }
 323};
 324static struct cfi_fixup fixup_table[] = {
 325        /* The CFI vendor ids and the JEDEC vendor IDs appear
 326         * to be common.  It is like the devices id's are as
 327         * well.  This table is to pick all cases where
 328         * we know that is the case.
 329         */
 330        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
 331        { 0, 0, NULL }
 332};
 333
 334static void cfi_fixup_major_minor(struct cfi_private *cfi,
 335                                                struct cfi_pri_intelext *extp)
 336{
 337        if (cfi->mfr == CFI_MFR_INTEL &&
 338                        cfi->id == PF38F4476 && extp->MinorVersion == '3')
 339                extp->MinorVersion = '1';
 340}
 341
 342static inline struct cfi_pri_intelext *
 343read_pri_intelext(struct map_info *map, __u16 adr)
 344{
 345        struct cfi_private *cfi = map->fldrv_priv;
 346        struct cfi_pri_intelext *extp;
 347        unsigned int extra_size = 0;
 348        unsigned int extp_size = sizeof(*extp);
 349
 350 again:
 351        extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 352        if (!extp)
 353                return NULL;
 354
 355        cfi_fixup_major_minor(cfi, extp);
 356
 357        if (extp->MajorVersion != '1' ||
 358            (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 359                printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 360                       "version %c.%c.\n",  extp->MajorVersion,
 361                       extp->MinorVersion);
 362                kfree(extp);
 363                return NULL;
 364        }
 365
 366        /* Do some byteswapping if necessary */
 367        extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 368        extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 369        extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 370
 371        if (extp->MinorVersion >= '0') {
 372                extra_size = 0;
 373
 374                /* Protection Register info */
 375                extra_size += (extp->NumProtectionFields - 1) *
 376                              sizeof(struct cfi_intelext_otpinfo);
 377        }
 378
 379        if (extp->MinorVersion >= '1') {
 380                /* Burst Read info */
 381                extra_size += 2;
 382                if (extp_size < sizeof(*extp) + extra_size)
 383                        goto need_more;
 384                extra_size += extp->extra[extra_size - 1];
 385        }
 386
 387        if (extp->MinorVersion >= '3') {
 388                int nb_parts, i;
 389
 390                /* Number of hardware-partitions */
 391                extra_size += 1;
 392                if (extp_size < sizeof(*extp) + extra_size)
 393                        goto need_more;
 394                nb_parts = extp->extra[extra_size - 1];
 395
 396                /* skip the sizeof(partregion) field in CFI 1.4 */
 397                if (extp->MinorVersion >= '4')
 398                        extra_size += 2;
 399
 400                for (i = 0; i < nb_parts; i++) {
 401                        struct cfi_intelext_regioninfo *rinfo;
 402                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 403                        extra_size += sizeof(*rinfo);
 404                        if (extp_size < sizeof(*extp) + extra_size)
 405                                goto need_more;
 406                        rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 407                        extra_size += (rinfo->NumBlockTypes - 1)
 408                                      * sizeof(struct cfi_intelext_blockinfo);
 409                }
 410
 411                if (extp->MinorVersion >= '4')
 412                        extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 413
 414                if (extp_size < sizeof(*extp) + extra_size) {
 415                        need_more:
 416                        extp_size = sizeof(*extp) + extra_size;
 417                        kfree(extp);
 418                        if (extp_size > 4096) {
 419                                printk(KERN_ERR
 420                                        "%s: cfi_pri_intelext is too fat\n",
 421                                        __func__);
 422                                return NULL;
 423                        }
 424                        goto again;
 425                }
 426        }
 427
 428        return extp;
 429}
 430
 431struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 432{
 433        struct cfi_private *cfi = map->fldrv_priv;
 434        struct mtd_info *mtd;
 435        int i;
 436
 437        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 438        if (!mtd) {
 439                printk(KERN_ERR "Failed to allocate memory for MTD device\n");
 440                return NULL;
 441        }
 442        mtd->priv = map;
 443        mtd->type = MTD_NORFLASH;
 444
 445        /* Fill in the default mtd operations */
 446        mtd->_erase   = cfi_intelext_erase_varsize;
 447        mtd->_read    = cfi_intelext_read;
 448        mtd->_write   = cfi_intelext_write_words;
 449        mtd->_sync    = cfi_intelext_sync;
 450        mtd->_lock    = cfi_intelext_lock;
 451        mtd->_unlock  = cfi_intelext_unlock;
 452        mtd->_is_locked = cfi_intelext_is_locked;
 453        mtd->_suspend = cfi_intelext_suspend;
 454        mtd->_resume  = cfi_intelext_resume;
 455        mtd->flags   = MTD_CAP_NORFLASH;
 456        mtd->name    = map->name;
 457        mtd->writesize = 1;
 458        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 459
 460        mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 461
 462        if (cfi->cfi_mode == CFI_MODE_CFI) {
 463                /*
 464                 * It's a real CFI chip, not one for which the probe
 465                 * routine faked a CFI structure. So we read the feature
 466                 * table from it.
 467                 */
 468                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 469                struct cfi_pri_intelext *extp;
 470
 471                extp = read_pri_intelext(map, adr);
 472                if (!extp) {
 473                        kfree(mtd);
 474                        return NULL;
 475                }
 476
 477                /* Install our own private info structure */
 478                cfi->cmdset_priv = extp;
 479
 480                cfi_fixup(mtd, cfi_fixup_table);
 481
 482#ifdef DEBUG_CFI_FEATURES
 483                /* Tell the user about it in lots of lovely detail */
 484                cfi_tell_features(extp);
 485#endif
 486
 487                if(extp->SuspendCmdSupport & 1) {
 488                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 489                }
 490        }
 491        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 492                /* Apply jedec specific fixups */
 493                cfi_fixup(mtd, jedec_fixup_table);
 494        }
 495        /* Apply generic fixups */
 496        cfi_fixup(mtd, fixup_table);
 497
 498        for (i=0; i< cfi->numchips; i++) {
 499                if (cfi->cfiq->WordWriteTimeoutTyp)
 500                        cfi->chips[i].word_write_time =
 501                                1<<cfi->cfiq->WordWriteTimeoutTyp;
 502                else
 503                        cfi->chips[i].word_write_time = 50000;
 504
 505                if (cfi->cfiq->BufWriteTimeoutTyp)
 506                        cfi->chips[i].buffer_write_time =
 507                                1<<cfi->cfiq->BufWriteTimeoutTyp;
 508                /* No default; if it isn't specified, we won't use it */
 509
 510                if (cfi->cfiq->BlockEraseTimeoutTyp)
 511                        cfi->chips[i].erase_time =
 512                                1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 513                else
 514                        cfi->chips[i].erase_time = 2000000;
 515
 516                if (cfi->cfiq->WordWriteTimeoutTyp &&
 517                    cfi->cfiq->WordWriteTimeoutMax)
 518                        cfi->chips[i].word_write_time_max =
 519                                1<<(cfi->cfiq->WordWriteTimeoutTyp +
 520                                    cfi->cfiq->WordWriteTimeoutMax);
 521                else
 522                        cfi->chips[i].word_write_time_max = 50000 * 8;
 523
 524                if (cfi->cfiq->BufWriteTimeoutTyp &&
 525                    cfi->cfiq->BufWriteTimeoutMax)
 526                        cfi->chips[i].buffer_write_time_max =
 527                                1<<(cfi->cfiq->BufWriteTimeoutTyp +
 528                                    cfi->cfiq->BufWriteTimeoutMax);
 529
 530                if (cfi->cfiq->BlockEraseTimeoutTyp &&
 531                    cfi->cfiq->BlockEraseTimeoutMax)
 532                        cfi->chips[i].erase_time_max =
 533                                1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
 534                                       cfi->cfiq->BlockEraseTimeoutMax);
 535                else
 536                        cfi->chips[i].erase_time_max = 2000000 * 8;
 537
 538                cfi->chips[i].ref_point_counter = 0;
 539                init_waitqueue_head(&(cfi->chips[i].wq));
 540        }
 541
 542        map->fldrv = &cfi_intelext_chipdrv;
 543
 544        return cfi_intelext_setup(mtd);
 545}
 546struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 547struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 548EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 549EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 550EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 551
 552static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 553{
 554        struct map_info *map = mtd->priv;
 555        struct cfi_private *cfi = map->fldrv_priv;
 556        unsigned long offset = 0;
 557        int i,j;
 558        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 559
 560        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 561
 562        mtd->size = devsize * cfi->numchips;
 563
 564        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 565        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 566                        * mtd->numeraseregions, GFP_KERNEL);
 567        if (!mtd->eraseregions) {
 568                printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
 569                goto setup_err;
 570        }
 571
 572        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 573                unsigned long ernum, ersize;
 574                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 575                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 576
 577                if (mtd->erasesize < ersize) {
 578                        mtd->erasesize = ersize;
 579                }
 580                for (j=0; j<cfi->numchips; j++) {
 581                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 582                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 583                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 584                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 585                }
 586                offset += (ersize * ernum);
 587        }
 588
 589        if (offset != devsize) {
 590                /* Argh */
 591                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 592                goto setup_err;
 593        }
 594
 595        for (i=0; i<mtd->numeraseregions;i++){
 596                printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
 597                       i,(unsigned long long)mtd->eraseregions[i].offset,
 598                       mtd->eraseregions[i].erasesize,
 599                       mtd->eraseregions[i].numblocks);
 600        }
 601
 602#ifdef CONFIG_MTD_OTP
 603        mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 604        mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 605        mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 606        mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 607        mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 608        mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
 609#endif
 610
 611        /* This function has the potential to distort the reality
 612           a bit and therefore should be called last. */
 613        if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 614                goto setup_err;
 615
 616        __module_get(THIS_MODULE);
 617        register_reboot_notifier(&mtd->reboot_notifier);
 618        return mtd;
 619
 620 setup_err:
 621        kfree(mtd->eraseregions);
 622        kfree(mtd);
 623        kfree(cfi->cmdset_priv);
 624        return NULL;
 625}
 626
 627static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 628                                        struct cfi_private **pcfi)
 629{
 630        struct map_info *map = mtd->priv;
 631        struct cfi_private *cfi = *pcfi;
 632        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 633
 634        /*
 635         * Probing of multi-partition flash chips.
 636         *
 637         * To support multiple partitions when available, we simply arrange
 638         * for each of them to have their own flchip structure even if they
 639         * are on the same physical chip.  This means completely recreating
 640         * a new cfi_private structure right here which is a blatent code
 641         * layering violation, but this is still the least intrusive
 642         * arrangement at this point. This can be rearranged in the future
 643         * if someone feels motivated enough.  --nico
 644         */
 645        if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 646            && extp->FeatureSupport & (1 << 9)) {
 647                struct cfi_private *newcfi;
 648                struct flchip *chip;
 649                struct flchip_shared *shared;
 650                int offs, numregions, numparts, partshift, numvirtchips, i, j;
 651
 652                /* Protection Register info */
 653                offs = (extp->NumProtectionFields - 1) *
 654                       sizeof(struct cfi_intelext_otpinfo);
 655
 656                /* Burst Read info */
 657                offs += extp->extra[offs+1]+2;
 658
 659                /* Number of partition regions */
 660                numregions = extp->extra[offs];
 661                offs += 1;
 662
 663                /* skip the sizeof(partregion) field in CFI 1.4 */
 664                if (extp->MinorVersion >= '4')
 665                        offs += 2;
 666
 667                /* Number of hardware partitions */
 668                numparts = 0;
 669                for (i = 0; i < numregions; i++) {
 670                        struct cfi_intelext_regioninfo *rinfo;
 671                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 672                        numparts += rinfo->NumIdentPartitions;
 673                        offs += sizeof(*rinfo)
 674                                + (rinfo->NumBlockTypes - 1) *
 675                                  sizeof(struct cfi_intelext_blockinfo);
 676                }
 677
 678                if (!numparts)
 679                        numparts = 1;
 680
 681                /* Programming Region info */
 682                if (extp->MinorVersion >= '4') {
 683                        struct cfi_intelext_programming_regioninfo *prinfo;
 684                        prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 685                        mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 686                        mtd->flags &= ~MTD_BIT_WRITEABLE;
 687                        printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 688                               map->name, mtd->writesize,
 689                               cfi->interleave * prinfo->ControlValid,
 690                               cfi->interleave * prinfo->ControlInvalid);
 691                }
 692
 693                /*
 694                 * All functions below currently rely on all chips having
 695                 * the same geometry so we'll just assume that all hardware
 696                 * partitions are of the same size too.
 697                 */
 698                partshift = cfi->chipshift - __ffs(numparts);
 699
 700                if ((1 << partshift) < mtd->erasesize) {
 701                        printk( KERN_ERR
 702                                "%s: bad number of hw partitions (%d)\n",
 703                                __func__, numparts);
 704                        return -EINVAL;
 705                }
 706
 707                numvirtchips = cfi->numchips * numparts;
 708                newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
 709                if (!newcfi)
 710                        return -ENOMEM;
 711                shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
 712                if (!shared) {
 713                        kfree(newcfi);
 714                        return -ENOMEM;
 715                }
 716                memcpy(newcfi, cfi, sizeof(struct cfi_private));
 717                newcfi->numchips = numvirtchips;
 718                newcfi->chipshift = partshift;
 719
 720                chip = &newcfi->chips[0];
 721                for (i = 0; i < cfi->numchips; i++) {
 722                        shared[i].writing = shared[i].erasing = NULL;
 723                        mutex_init(&shared[i].lock);
 724                        for (j = 0; j < numparts; j++) {
 725                                *chip = cfi->chips[i];
 726                                chip->start += j << partshift;
 727                                chip->priv = &shared[i];
 728                                /* those should be reset too since
 729                                   they create memory references. */
 730                                init_waitqueue_head(&chip->wq);
 731                                mutex_init(&chip->mutex);
 732                                chip++;
 733                        }
 734                }
 735
 736                printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 737                                  "--> %d partitions of %d KiB\n",
 738                                  map->name, cfi->numchips, cfi->interleave,
 739                                  newcfi->numchips, 1<<(newcfi->chipshift-10));
 740
 741                map->fldrv_priv = newcfi;
 742                *pcfi = newcfi;
 743                kfree(cfi);
 744        }
 745
 746        return 0;
 747}
 748
 749/*
 750 *  *********** CHIP ACCESS FUNCTIONS ***********
 751 */
 752static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 753{
 754        DECLARE_WAITQUEUE(wait, current);
 755        struct cfi_private *cfi = map->fldrv_priv;
 756        map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 757        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 758        unsigned long timeo = jiffies + HZ;
 759
 760        /* Prevent setting state FL_SYNCING for chip in suspended state. */
 761        if (mode == FL_SYNCING && chip->oldstate != FL_READY)
 762                goto sleep;
 763
 764        switch (chip->state) {
 765
 766        case FL_STATUS:
 767                for (;;) {
 768                        status = map_read(map, adr);
 769                        if (map_word_andequal(map, status, status_OK, status_OK))
 770                                break;
 771
 772                        /* At this point we're fine with write operations
 773                           in other partitions as they don't conflict. */
 774                        if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 775                                break;
 776
 777                        mutex_unlock(&chip->mutex);
 778                        cfi_udelay(1);
 779                        mutex_lock(&chip->mutex);
 780                        /* Someone else might have been playing with it. */
 781                        return -EAGAIN;
 782                }
 783                /* Fall through */
 784        case FL_READY:
 785        case FL_CFI_QUERY:
 786        case FL_JEDEC_QUERY:
 787                return 0;
 788
 789        case FL_ERASING:
 790                if (!cfip ||
 791                    !(cfip->FeatureSupport & 2) ||
 792                    !(mode == FL_READY || mode == FL_POINT ||
 793                     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 794                        goto sleep;
 795
 796
 797                /* Erase suspend */
 798                map_write(map, CMD(0xB0), adr);
 799
 800                /* If the flash has finished erasing, then 'erase suspend'
 801                 * appears to make some (28F320) flash devices switch to
 802                 * 'read' mode.  Make sure that we switch to 'read status'
 803                 * mode so we get the right data. --rmk
 804                 */
 805                map_write(map, CMD(0x70), adr);
 806                chip->oldstate = FL_ERASING;
 807                chip->state = FL_ERASE_SUSPENDING;
 808                chip->erase_suspended = 1;
 809                for (;;) {
 810                        status = map_read(map, adr);
 811                        if (map_word_andequal(map, status, status_OK, status_OK))
 812                                break;
 813
 814                        if (time_after(jiffies, timeo)) {
 815                                /* Urgh. Resume and pretend we weren't here.
 816                                 * Make sure we're in 'read status' mode if it had finished */
 817                                put_chip(map, chip, adr);
 818                                printk(KERN_ERR "%s: Chip not ready after erase "
 819                                       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 820                                return -EIO;
 821                        }
 822
 823                        mutex_unlock(&chip->mutex);
 824                        cfi_udelay(1);
 825                        mutex_lock(&chip->mutex);
 826                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 827                           So we can just loop here. */
 828                }
 829                chip->state = FL_STATUS;
 830                return 0;
 831
 832        case FL_XIP_WHILE_ERASING:
 833                if (mode != FL_READY && mode != FL_POINT &&
 834                    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 835                        goto sleep;
 836                chip->oldstate = chip->state;
 837                chip->state = FL_READY;
 838                return 0;
 839
 840        case FL_SHUTDOWN:
 841                /* The machine is rebooting now,so no one can get chip anymore */
 842                return -EIO;
 843        case FL_POINT:
 844                /* Only if there's no operation suspended... */
 845                if (mode == FL_READY && chip->oldstate == FL_READY)
 846                        return 0;
 847                /* Fall through */
 848        default:
 849        sleep:
 850                set_current_state(TASK_UNINTERRUPTIBLE);
 851                add_wait_queue(&chip->wq, &wait);
 852                mutex_unlock(&chip->mutex);
 853                schedule();
 854                remove_wait_queue(&chip->wq, &wait);
 855                mutex_lock(&chip->mutex);
 856                return -EAGAIN;
 857        }
 858}
 859
 860static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 861{
 862        int ret;
 863        DECLARE_WAITQUEUE(wait, current);
 864
 865 retry:
 866        if (chip->priv &&
 867            (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
 868            || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 869                /*
 870                 * OK. We have possibility for contention on the write/erase
 871                 * operations which are global to the real chip and not per
 872                 * partition.  So let's fight it over in the partition which
 873                 * currently has authority on the operation.
 874                 *
 875                 * The rules are as follows:
 876                 *
 877                 * - any write operation must own shared->writing.
 878                 *
 879                 * - any erase operation must own _both_ shared->writing and
 880                 *   shared->erasing.
 881                 *
 882                 * - contention arbitration is handled in the owner's context.
 883                 *
 884                 * The 'shared' struct can be read and/or written only when
 885                 * its lock is taken.
 886                 */
 887                struct flchip_shared *shared = chip->priv;
 888                struct flchip *contender;
 889                mutex_lock(&shared->lock);
 890                contender = shared->writing;
 891                if (contender && contender != chip) {
 892                        /*
 893                         * The engine to perform desired operation on this
 894                         * partition is already in use by someone else.
 895                         * Let's fight over it in the context of the chip
 896                         * currently using it.  If it is possible to suspend,
 897                         * that other partition will do just that, otherwise
 898                         * it'll happily send us to sleep.  In any case, when
 899                         * get_chip returns success we're clear to go ahead.
 900                         */
 901                        ret = mutex_trylock(&contender->mutex);
 902                        mutex_unlock(&shared->lock);
 903                        if (!ret)
 904                                goto retry;
 905                        mutex_unlock(&chip->mutex);
 906                        ret = chip_ready(map, contender, contender->start, mode);
 907                        mutex_lock(&chip->mutex);
 908
 909                        if (ret == -EAGAIN) {
 910                                mutex_unlock(&contender->mutex);
 911                                goto retry;
 912                        }
 913                        if (ret) {
 914                                mutex_unlock(&contender->mutex);
 915                                return ret;
 916                        }
 917                        mutex_lock(&shared->lock);
 918
 919                        /* We should not own chip if it is already
 920                         * in FL_SYNCING state. Put contender and retry. */
 921                        if (chip->state == FL_SYNCING) {
 922                                put_chip(map, contender, contender->start);
 923                                mutex_unlock(&contender->mutex);
 924                                goto retry;
 925                        }
 926                        mutex_unlock(&contender->mutex);
 927                }
 928
 929                /* Check if we already have suspended erase
 930                 * on this chip. Sleep. */
 931                if (mode == FL_ERASING && shared->erasing
 932                    && shared->erasing->oldstate == FL_ERASING) {
 933                        mutex_unlock(&shared->lock);
 934                        set_current_state(TASK_UNINTERRUPTIBLE);
 935                        add_wait_queue(&chip->wq, &wait);
 936                        mutex_unlock(&chip->mutex);
 937                        schedule();
 938                        remove_wait_queue(&chip->wq, &wait);
 939                        mutex_lock(&chip->mutex);
 940                        goto retry;
 941                }
 942
 943                /* We now own it */
 944                shared->writing = chip;
 945                if (mode == FL_ERASING)
 946                        shared->erasing = chip;
 947                mutex_unlock(&shared->lock);
 948        }
 949        ret = chip_ready(map, chip, adr, mode);
 950        if (ret == -EAGAIN)
 951                goto retry;
 952
 953        return ret;
 954}
 955
 956static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 957{
 958        struct cfi_private *cfi = map->fldrv_priv;
 959
 960        if (chip->priv) {
 961                struct flchip_shared *shared = chip->priv;
 962                mutex_lock(&shared->lock);
 963                if (shared->writing == chip && chip->oldstate == FL_READY) {
 964                        /* We own the ability to write, but we're done */
 965                        shared->writing = shared->erasing;
 966                        if (shared->writing && shared->writing != chip) {
 967                                /* give back ownership to who we loaned it from */
 968                                struct flchip *loaner = shared->writing;
 969                                mutex_lock(&loaner->mutex);
 970                                mutex_unlock(&shared->lock);
 971                                mutex_unlock(&chip->mutex);
 972                                put_chip(map, loaner, loaner->start);
 973                                mutex_lock(&chip->mutex);
 974                                mutex_unlock(&loaner->mutex);
 975                                wake_up(&chip->wq);
 976                                return;
 977                        }
 978                        shared->erasing = NULL;
 979                        shared->writing = NULL;
 980                } else if (shared->erasing == chip && shared->writing != chip) {
 981                        /*
 982                         * We own the ability to erase without the ability
 983                         * to write, which means the erase was suspended
 984                         * and some other partition is currently writing.
 985                         * Don't let the switch below mess things up since
 986                         * we don't have ownership to resume anything.
 987                         */
 988                        mutex_unlock(&shared->lock);
 989                        wake_up(&chip->wq);
 990                        return;
 991                }
 992                mutex_unlock(&shared->lock);
 993        }
 994
 995        switch(chip->oldstate) {
 996        case FL_ERASING:
 997                /* What if one interleaved chip has finished and the
 998                   other hasn't? The old code would leave the finished
 999                   one in READY mode. That's bad, and caused -EROFS
1000                   errors to be returned from do_erase_oneblock because
1001                   that's the only bit it checked for at the time.
1002                   As the state machine appears to explicitly allow
1003                   sending the 0x70 (Read Status) command to an erasing
1004                   chip and expecting it to be ignored, that's what we
1005                   do. */
1006                map_write(map, CMD(0xd0), adr);
1007                map_write(map, CMD(0x70), adr);
1008                chip->oldstate = FL_READY;
1009                chip->state = FL_ERASING;
1010                break;
1011
1012        case FL_XIP_WHILE_ERASING:
1013                chip->state = chip->oldstate;
1014                chip->oldstate = FL_READY;
1015                break;
1016
1017        case FL_READY:
1018        case FL_STATUS:
1019        case FL_JEDEC_QUERY:
1020                break;
1021        default:
1022                printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1023        }
1024        wake_up(&chip->wq);
1025}
1026
1027#ifdef CONFIG_MTD_XIP
1028
1029/*
1030 * No interrupt what so ever can be serviced while the flash isn't in array
1031 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1032 * enclosing any code path where the flash is known not to be in array mode.
1033 * And within a XIP disabled code path, only functions marked with __xipram
1034 * may be called and nothing else (it's a good thing to inspect generated
1035 * assembly to make sure inline functions were actually inlined and that gcc
1036 * didn't emit calls to its own support functions). Also configuring MTD CFI
1037 * support to a single buswidth and a single interleave is also recommended.
1038 */
1039
1040static void xip_disable(struct map_info *map, struct flchip *chip,
1041                        unsigned long adr)
1042{
1043        /* TODO: chips with no XIP use should ignore and return */
1044        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1045        local_irq_disable();
1046}
1047
1048static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1049                                unsigned long adr)
1050{
1051        struct cfi_private *cfi = map->fldrv_priv;
1052        if (chip->state != FL_POINT && chip->state != FL_READY) {
1053                map_write(map, CMD(0xff), adr);
1054                chip->state = FL_READY;
1055        }
1056        (void) map_read(map, adr);
1057        xip_iprefetch();
1058        local_irq_enable();
1059}
1060
1061/*
1062 * When a delay is required for the flash operation to complete, the
1063 * xip_wait_for_operation() function is polling for both the given timeout
1064 * and pending (but still masked) hardware interrupts.  Whenever there is an
1065 * interrupt pending then the flash erase or write operation is suspended,
1066 * array mode restored and interrupts unmasked.  Task scheduling might also
1067 * happen at that point.  The CPU eventually returns from the interrupt or
1068 * the call to schedule() and the suspended flash operation is resumed for
1069 * the remaining of the delay period.
1070 *
1071 * Warning: this function _will_ fool interrupt latency tracing tools.
1072 */
1073
1074static int __xipram xip_wait_for_operation(
1075                struct map_info *map, struct flchip *chip,
1076                unsigned long adr, unsigned int chip_op_time_max)
1077{
1078        struct cfi_private *cfi = map->fldrv_priv;
1079        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1080        map_word status, OK = CMD(0x80);
1081        unsigned long usec, suspended, start, done;
1082        flstate_t oldstate, newstate;
1083
1084        start = xip_currtime();
1085        usec = chip_op_time_max;
1086        if (usec == 0)
1087                usec = 500000;
1088        done = 0;
1089
1090        do {
1091                cpu_relax();
1092                if (xip_irqpending() && cfip &&
1093                    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1094                     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1095                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1096                        /*
1097                         * Let's suspend the erase or write operation when
1098                         * supported.  Note that we currently don't try to
1099                         * suspend interleaved chips if there is already
1100                         * another operation suspended (imagine what happens
1101                         * when one chip was already done with the current
1102                         * operation while another chip suspended it, then
1103                         * we resume the whole thing at once).  Yes, it
1104                         * can happen!
1105                         */
1106                        usec -= done;
1107                        map_write(map, CMD(0xb0), adr);
1108                        map_write(map, CMD(0x70), adr);
1109                        suspended = xip_currtime();
1110                        do {
1111                                if (xip_elapsed_since(suspended) > 100000) {
1112                                        /*
1113                                         * The chip doesn't want to suspend
1114                                         * after waiting for 100 msecs.
1115                                         * This is a critical error but there
1116                                         * is not much we can do here.
1117                                         */
1118                                        return -EIO;
1119                                }
1120                                status = map_read(map, adr);
1121                        } while (!map_word_andequal(map, status, OK, OK));
1122
1123                        /* Suspend succeeded */
1124                        oldstate = chip->state;
1125                        if (oldstate == FL_ERASING) {
1126                                if (!map_word_bitsset(map, status, CMD(0x40)))
1127                                        break;
1128                                newstate = FL_XIP_WHILE_ERASING;
1129                                chip->erase_suspended = 1;
1130                        } else {
1131                                if (!map_word_bitsset(map, status, CMD(0x04)))
1132                                        break;
1133                                newstate = FL_XIP_WHILE_WRITING;
1134                                chip->write_suspended = 1;
1135                        }
1136                        chip->state = newstate;
1137                        map_write(map, CMD(0xff), adr);
1138                        (void) map_read(map, adr);
1139                        xip_iprefetch();
1140                        local_irq_enable();
1141                        mutex_unlock(&chip->mutex);
1142                        xip_iprefetch();
1143                        cond_resched();
1144
1145                        /*
1146                         * We're back.  However someone else might have
1147                         * decided to go write to the chip if we are in
1148                         * a suspended erase state.  If so let's wait
1149                         * until it's done.
1150                         */
1151                        mutex_lock(&chip->mutex);
1152                        while (chip->state != newstate) {
1153                                DECLARE_WAITQUEUE(wait, current);
1154                                set_current_state(TASK_UNINTERRUPTIBLE);
1155                                add_wait_queue(&chip->wq, &wait);
1156                                mutex_unlock(&chip->mutex);
1157                                schedule();
1158                                remove_wait_queue(&chip->wq, &wait);
1159                                mutex_lock(&chip->mutex);
1160                        }
1161                        /* Disallow XIP again */
1162                        local_irq_disable();
1163
1164                        /* Resume the write or erase operation */
1165                        map_write(map, CMD(0xd0), adr);
1166                        map_write(map, CMD(0x70), adr);
1167                        chip->state = oldstate;
1168                        start = xip_currtime();
1169                } else if (usec >= 1000000/HZ) {
1170                        /*
1171                         * Try to save on CPU power when waiting delay
1172                         * is at least a system timer tick period.
1173                         * No need to be extremely accurate here.
1174                         */
1175                        xip_cpu_idle();
1176                }
1177                status = map_read(map, adr);
1178                done = xip_elapsed_since(start);
1179        } while (!map_word_andequal(map, status, OK, OK)
1180                 && done < usec);
1181
1182        return (done >= usec) ? -ETIME : 0;
1183}
1184
1185/*
1186 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1187 * the flash is actively programming or erasing since we have to poll for
1188 * the operation to complete anyway.  We can't do that in a generic way with
1189 * a XIP setup so do it before the actual flash operation in this case
1190 * and stub it out from INVAL_CACHE_AND_WAIT.
1191 */
1192#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1193        INVALIDATE_CACHED_RANGE(map, from, size)
1194
1195#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1196        xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1197
1198#else
1199
1200#define xip_disable(map, chip, adr)
1201#define xip_enable(map, chip, adr)
1202#define XIP_INVAL_CACHED_RANGE(x...)
1203#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1204
1205static int inval_cache_and_wait_for_operation(
1206                struct map_info *map, struct flchip *chip,
1207                unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1208                unsigned int chip_op_time, unsigned int chip_op_time_max)
1209{
1210        struct cfi_private *cfi = map->fldrv_priv;
1211        map_word status, status_OK = CMD(0x80);
1212        int chip_state = chip->state;
1213        unsigned int timeo, sleep_time, reset_timeo;
1214
1215        mutex_unlock(&chip->mutex);
1216        if (inval_len)
1217                INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1218        mutex_lock(&chip->mutex);
1219
1220        timeo = chip_op_time_max;
1221        if (!timeo)
1222                timeo = 500000;
1223        reset_timeo = timeo;
1224        sleep_time = chip_op_time / 2;
1225
1226        for (;;) {
1227                if (chip->state != chip_state) {
1228                        /* Someone's suspended the operation: sleep */
1229                        DECLARE_WAITQUEUE(wait, current);
1230                        set_current_state(TASK_UNINTERRUPTIBLE);
1231                        add_wait_queue(&chip->wq, &wait);
1232                        mutex_unlock(&chip->mutex);
1233                        schedule();
1234                        remove_wait_queue(&chip->wq, &wait);
1235                        mutex_lock(&chip->mutex);
1236                        continue;
1237                }
1238
1239                status = map_read(map, cmd_adr);
1240                if (map_word_andequal(map, status, status_OK, status_OK))
1241                        break;
1242
1243                if (chip->erase_suspended && chip_state == FL_ERASING)  {
1244                        /* Erase suspend occurred while sleep: reset timeout */
1245                        timeo = reset_timeo;
1246                        chip->erase_suspended = 0;
1247                }
1248                if (chip->write_suspended && chip_state == FL_WRITING)  {
1249                        /* Write suspend occurred while sleep: reset timeout */
1250                        timeo = reset_timeo;
1251                        chip->write_suspended = 0;
1252                }
1253                if (!timeo) {
1254                        map_write(map, CMD(0x70), cmd_adr);
1255                        chip->state = FL_STATUS;
1256                        return -ETIME;
1257                }
1258
1259                /* OK Still waiting. Drop the lock, wait a while and retry. */
1260                mutex_unlock(&chip->mutex);
1261                if (sleep_time >= 1000000/HZ) {
1262                        /*
1263                         * Half of the normal delay still remaining
1264                         * can be performed with a sleeping delay instead
1265                         * of busy waiting.
1266                         */
1267                        msleep(sleep_time/1000);
1268                        timeo -= sleep_time;
1269                        sleep_time = 1000000/HZ;
1270                } else {
1271                        udelay(1);
1272                        cond_resched();
1273                        timeo--;
1274                }
1275                mutex_lock(&chip->mutex);
1276        }
1277
1278        /* Done and happy. */
1279        chip->state = FL_STATUS;
1280        return 0;
1281}
1282
1283#endif
1284
1285#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1286        INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1287
1288
1289static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1290{
1291        unsigned long cmd_addr;
1292        struct cfi_private *cfi = map->fldrv_priv;
1293        int ret = 0;
1294
1295        adr += chip->start;
1296
1297        /* Ensure cmd read/writes are aligned. */
1298        cmd_addr = adr & ~(map_bankwidth(map)-1);
1299
1300        mutex_lock(&chip->mutex);
1301
1302        ret = get_chip(map, chip, cmd_addr, FL_POINT);
1303
1304        if (!ret) {
1305                if (chip->state != FL_POINT && chip->state != FL_READY)
1306                        map_write(map, CMD(0xff), cmd_addr);
1307
1308                chip->state = FL_POINT;
1309                chip->ref_point_counter++;
1310        }
1311        mutex_unlock(&chip->mutex);
1312
1313        return ret;
1314}
1315
1316static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1317                size_t *retlen, void **virt, resource_size_t *phys)
1318{
1319        struct map_info *map = mtd->priv;
1320        struct cfi_private *cfi = map->fldrv_priv;
1321        unsigned long ofs, last_end = 0;
1322        int chipnum;
1323        int ret = 0;
1324
1325        if (!map->virt)
1326                return -EINVAL;
1327
1328        /* Now lock the chip(s) to POINT state */
1329
1330        /* ofs: offset within the first chip that the first read should start */
1331        chipnum = (from >> cfi->chipshift);
1332        ofs = from - (chipnum << cfi->chipshift);
1333
1334        *virt = map->virt + cfi->chips[chipnum].start + ofs;
1335        if (phys)
1336                *phys = map->phys + cfi->chips[chipnum].start + ofs;
1337
1338        while (len) {
1339                unsigned long thislen;
1340
1341                if (chipnum >= cfi->numchips)
1342                        break;
1343
1344                /* We cannot point across chips that are virtually disjoint */
1345                if (!last_end)
1346                        last_end = cfi->chips[chipnum].start;
1347                else if (cfi->chips[chipnum].start != last_end)
1348                        break;
1349
1350                if ((len + ofs -1) >> cfi->chipshift)
1351                        thislen = (1<<cfi->chipshift) - ofs;
1352                else
1353                        thislen = len;
1354
1355                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1356                if (ret)
1357                        break;
1358
1359                *retlen += thislen;
1360                len -= thislen;
1361
1362                ofs = 0;
1363                last_end += 1 << cfi->chipshift;
1364                chipnum++;
1365        }
1366        return 0;
1367}
1368
1369static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1370{
1371        struct map_info *map = mtd->priv;
1372        struct cfi_private *cfi = map->fldrv_priv;
1373        unsigned long ofs;
1374        int chipnum, err = 0;
1375
1376        /* Now unlock the chip(s) POINT state */
1377
1378        /* ofs: offset within the first chip that the first read should start */
1379        chipnum = (from >> cfi->chipshift);
1380        ofs = from - (chipnum <<  cfi->chipshift);
1381
1382        while (len && !err) {
1383                unsigned long thislen;
1384                struct flchip *chip;
1385
1386                chip = &cfi->chips[chipnum];
1387                if (chipnum >= cfi->numchips)
1388                        break;
1389
1390                if ((len + ofs -1) >> cfi->chipshift)
1391                        thislen = (1<<cfi->chipshift) - ofs;
1392                else
1393                        thislen = len;
1394
1395                mutex_lock(&chip->mutex);
1396                if (chip->state == FL_POINT) {
1397                        chip->ref_point_counter--;
1398                        if(chip->ref_point_counter == 0)
1399                                chip->state = FL_READY;
1400                } else {
1401                        printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1402                        err = -EINVAL;
1403                }
1404
1405                put_chip(map, chip, chip->start);
1406                mutex_unlock(&chip->mutex);
1407
1408                len -= thislen;
1409                ofs = 0;
1410                chipnum++;
1411        }
1412
1413        return err;
1414}
1415
1416static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1417{
1418        unsigned long cmd_addr;
1419        struct cfi_private *cfi = map->fldrv_priv;
1420        int ret;
1421
1422        adr += chip->start;
1423
1424        /* Ensure cmd read/writes are aligned. */
1425        cmd_addr = adr & ~(map_bankwidth(map)-1);
1426
1427        mutex_lock(&chip->mutex);
1428        ret = get_chip(map, chip, cmd_addr, FL_READY);
1429        if (ret) {
1430                mutex_unlock(&chip->mutex);
1431                return ret;
1432        }
1433
1434        if (chip->state != FL_POINT && chip->state != FL_READY) {
1435                map_write(map, CMD(0xff), cmd_addr);
1436
1437                chip->state = FL_READY;
1438        }
1439
1440        map_copy_from(map, buf, adr, len);
1441
1442        put_chip(map, chip, cmd_addr);
1443
1444        mutex_unlock(&chip->mutex);
1445        return 0;
1446}
1447
1448static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1449{
1450        struct map_info *map = mtd->priv;
1451        struct cfi_private *cfi = map->fldrv_priv;
1452        unsigned long ofs;
1453        int chipnum;
1454        int ret = 0;
1455
1456        /* ofs: offset within the first chip that the first read should start */
1457        chipnum = (from >> cfi->chipshift);
1458        ofs = from - (chipnum <<  cfi->chipshift);
1459
1460        while (len) {
1461                unsigned long thislen;
1462
1463                if (chipnum >= cfi->numchips)
1464                        break;
1465
1466                if ((len + ofs -1) >> cfi->chipshift)
1467                        thislen = (1<<cfi->chipshift) - ofs;
1468                else
1469                        thislen = len;
1470
1471                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1472                if (ret)
1473                        break;
1474
1475                *retlen += thislen;
1476                len -= thislen;
1477                buf += thislen;
1478
1479                ofs = 0;
1480                chipnum++;
1481        }
1482        return ret;
1483}
1484
1485static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1486                                     unsigned long adr, map_word datum, int mode)
1487{
1488        struct cfi_private *cfi = map->fldrv_priv;
1489        map_word status, write_cmd;
1490        int ret=0;
1491
1492        adr += chip->start;
1493
1494        switch (mode) {
1495        case FL_WRITING:
1496                write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1497                break;
1498        case FL_OTP_WRITE:
1499                write_cmd = CMD(0xc0);
1500                break;
1501        default:
1502                return -EINVAL;
1503        }
1504
1505        mutex_lock(&chip->mutex);
1506        ret = get_chip(map, chip, adr, mode);
1507        if (ret) {
1508                mutex_unlock(&chip->mutex);
1509                return ret;
1510        }
1511
1512        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1513        ENABLE_VPP(map);
1514        xip_disable(map, chip, adr);
1515        map_write(map, write_cmd, adr);
1516        map_write(map, datum, adr);
1517        chip->state = mode;
1518
1519        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1520                                   adr, map_bankwidth(map),
1521                                   chip->word_write_time,
1522                                   chip->word_write_time_max);
1523        if (ret) {
1524                xip_enable(map, chip, adr);
1525                printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1526                goto out;
1527        }
1528
1529        /* check for errors */
1530        status = map_read(map, adr);
1531        if (map_word_bitsset(map, status, CMD(0x1a))) {
1532                unsigned long chipstatus = MERGESTATUS(status);
1533
1534                /* reset status */
1535                map_write(map, CMD(0x50), adr);
1536                map_write(map, CMD(0x70), adr);
1537                xip_enable(map, chip, adr);
1538
1539                if (chipstatus & 0x02) {
1540                        ret = -EROFS;
1541                } else if (chipstatus & 0x08) {
1542                        printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1543                        ret = -EIO;
1544                } else {
1545                        printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1546                        ret = -EINVAL;
1547                }
1548
1549                goto out;
1550        }
1551
1552        xip_enable(map, chip, adr);
1553 out:   DISABLE_VPP(map);
1554        put_chip(map, chip, adr);
1555        mutex_unlock(&chip->mutex);
1556        return ret;
1557}
1558
1559
1560static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1561{
1562        struct map_info *map = mtd->priv;
1563        struct cfi_private *cfi = map->fldrv_priv;
1564        int ret = 0;
1565        int chipnum;
1566        unsigned long ofs;
1567
1568        chipnum = to >> cfi->chipshift;
1569        ofs = to  - (chipnum << cfi->chipshift);
1570
1571        /* If it's not bus-aligned, do the first byte write */
1572        if (ofs & (map_bankwidth(map)-1)) {
1573                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1574                int gap = ofs - bus_ofs;
1575                int n;
1576                map_word datum;
1577
1578                n = min_t(int, len, map_bankwidth(map)-gap);
1579                datum = map_word_ff(map);
1580                datum = map_word_load_partial(map, datum, buf, gap, n);
1581
1582                ret = do_write_oneword(map, &cfi->chips[chipnum],
1583                                               bus_ofs, datum, FL_WRITING);
1584                if (ret)
1585                        return ret;
1586
1587                len -= n;
1588                ofs += n;
1589                buf += n;
1590                (*retlen) += n;
1591
1592                if (ofs >> cfi->chipshift) {
1593                        chipnum ++;
1594                        ofs = 0;
1595                        if (chipnum == cfi->numchips)
1596                                return 0;
1597                }
1598        }
1599
1600        while(len >= map_bankwidth(map)) {
1601                map_word datum = map_word_load(map, buf);
1602
1603                ret = do_write_oneword(map, &cfi->chips[chipnum],
1604                                       ofs, datum, FL_WRITING);
1605                if (ret)
1606                        return ret;
1607
1608                ofs += map_bankwidth(map);
1609                buf += map_bankwidth(map);
1610                (*retlen) += map_bankwidth(map);
1611                len -= map_bankwidth(map);
1612
1613                if (ofs >> cfi->chipshift) {
1614                        chipnum ++;
1615                        ofs = 0;
1616                        if (chipnum == cfi->numchips)
1617                                return 0;
1618                }
1619        }
1620
1621        if (len & (map_bankwidth(map)-1)) {
1622                map_word datum;
1623
1624                datum = map_word_ff(map);
1625                datum = map_word_load_partial(map, datum, buf, 0, len);
1626
1627                ret = do_write_oneword(map, &cfi->chips[chipnum],
1628                                       ofs, datum, FL_WRITING);
1629                if (ret)
1630                        return ret;
1631
1632                (*retlen) += len;
1633        }
1634
1635        return 0;
1636}
1637
1638
1639static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1640                                    unsigned long adr, const struct kvec **pvec,
1641                                    unsigned long *pvec_seek, int len)
1642{
1643        struct cfi_private *cfi = map->fldrv_priv;
1644        map_word status, write_cmd, datum;
1645        unsigned long cmd_adr;
1646        int ret, wbufsize, word_gap, words;
1647        const struct kvec *vec;
1648        unsigned long vec_seek;
1649        unsigned long initial_adr;
1650        int initial_len = len;
1651
1652        wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1653        adr += chip->start;
1654        initial_adr = adr;
1655        cmd_adr = adr & ~(wbufsize-1);
1656
1657        /* Let's determine this according to the interleave only once */
1658        write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1659
1660        mutex_lock(&chip->mutex);
1661        ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1662        if (ret) {
1663                mutex_unlock(&chip->mutex);
1664                return ret;
1665        }
1666
1667        XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1668        ENABLE_VPP(map);
1669        xip_disable(map, chip, cmd_adr);
1670
1671        /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1672           [...], the device will not accept any more Write to Buffer commands".
1673           So we must check here and reset those bits if they're set. Otherwise
1674           we're just pissing in the wind */
1675        if (chip->state != FL_STATUS) {
1676                map_write(map, CMD(0x70), cmd_adr);
1677                chip->state = FL_STATUS;
1678        }
1679        status = map_read(map, cmd_adr);
1680        if (map_word_bitsset(map, status, CMD(0x30))) {
1681                xip_enable(map, chip, cmd_adr);
1682                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1683                xip_disable(map, chip, cmd_adr);
1684                map_write(map, CMD(0x50), cmd_adr);
1685                map_write(map, CMD(0x70), cmd_adr);
1686        }
1687
1688        chip->state = FL_WRITING_TO_BUFFER;
1689        map_write(map, write_cmd, cmd_adr);
1690        ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1691        if (ret) {
1692                /* Argh. Not ready for write to buffer */
1693                map_word Xstatus = map_read(map, cmd_adr);
1694                map_write(map, CMD(0x70), cmd_adr);
1695                chip->state = FL_STATUS;
1696                status = map_read(map, cmd_adr);
1697                map_write(map, CMD(0x50), cmd_adr);
1698                map_write(map, CMD(0x70), cmd_adr);
1699                xip_enable(map, chip, cmd_adr);
1700                printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1701                                map->name, Xstatus.x[0], status.x[0]);
1702                goto out;
1703        }
1704
1705        /* Figure out the number of words to write */
1706        word_gap = (-adr & (map_bankwidth(map)-1));
1707        words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1708        if (!word_gap) {
1709                words--;
1710        } else {
1711                word_gap = map_bankwidth(map) - word_gap;
1712                adr -= word_gap;
1713                datum = map_word_ff(map);
1714        }
1715
1716        /* Write length of data to come */
1717        map_write(map, CMD(words), cmd_adr );
1718
1719        /* Write data */
1720        vec = *pvec;
1721        vec_seek = *pvec_seek;
1722        do {
1723                int n = map_bankwidth(map) - word_gap;
1724                if (n > vec->iov_len - vec_seek)
1725                        n = vec->iov_len - vec_seek;
1726                if (n > len)
1727                        n = len;
1728
1729                if (!word_gap && len < map_bankwidth(map))
1730                        datum = map_word_ff(map);
1731
1732                datum = map_word_load_partial(map, datum,
1733                                              vec->iov_base + vec_seek,
1734                                              word_gap, n);
1735
1736                len -= n;
1737                word_gap += n;
1738                if (!len || word_gap == map_bankwidth(map)) {
1739                        map_write(map, datum, adr);
1740                        adr += map_bankwidth(map);
1741                        word_gap = 0;
1742                }
1743
1744                vec_seek += n;
1745                if (vec_seek == vec->iov_len) {
1746                        vec++;
1747                        vec_seek = 0;
1748                }
1749        } while (len);
1750        *pvec = vec;
1751        *pvec_seek = vec_seek;
1752
1753        /* GO GO GO */
1754        map_write(map, CMD(0xd0), cmd_adr);
1755        chip->state = FL_WRITING;
1756
1757        ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1758                                   initial_adr, initial_len,
1759                                   chip->buffer_write_time,
1760                                   chip->buffer_write_time_max);
1761        if (ret) {
1762                map_write(map, CMD(0x70), cmd_adr);
1763                chip->state = FL_STATUS;
1764                xip_enable(map, chip, cmd_adr);
1765                printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1766                goto out;
1767        }
1768
1769        /* check for errors */
1770        status = map_read(map, cmd_adr);
1771        if (map_word_bitsset(map, status, CMD(0x1a))) {
1772                unsigned long chipstatus = MERGESTATUS(status);
1773
1774                /* reset status */
1775                map_write(map, CMD(0x50), cmd_adr);
1776                map_write(map, CMD(0x70), cmd_adr);
1777                xip_enable(map, chip, cmd_adr);
1778
1779                if (chipstatus & 0x02) {
1780                        ret = -EROFS;
1781                } else if (chipstatus & 0x08) {
1782                        printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1783                        ret = -EIO;
1784                } else {
1785                        printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1786                        ret = -EINVAL;
1787                }
1788
1789                goto out;
1790        }
1791
1792        xip_enable(map, chip, cmd_adr);
1793 out:   DISABLE_VPP(map);
1794        put_chip(map, chip, cmd_adr);
1795        mutex_unlock(&chip->mutex);
1796        return ret;
1797}
1798
1799static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1800                                unsigned long count, loff_t to, size_t *retlen)
1801{
1802        struct map_info *map = mtd->priv;
1803        struct cfi_private *cfi = map->fldrv_priv;
1804        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1805        int ret = 0;
1806        int chipnum;
1807        unsigned long ofs, vec_seek, i;
1808        size_t len = 0;
1809
1810        for (i = 0; i < count; i++)
1811                len += vecs[i].iov_len;
1812
1813        if (!len)
1814                return 0;
1815
1816        chipnum = to >> cfi->chipshift;
1817        ofs = to - (chipnum << cfi->chipshift);
1818        vec_seek = 0;
1819
1820        do {
1821                /* We must not cross write block boundaries */
1822                int size = wbufsize - (ofs & (wbufsize-1));
1823
1824                if (size > len)
1825                        size = len;
1826                ret = do_write_buffer(map, &cfi->chips[chipnum],
1827                                      ofs, &vecs, &vec_seek, size);
1828                if (ret)
1829                        return ret;
1830
1831                ofs += size;
1832                (*retlen) += size;
1833                len -= size;
1834
1835                if (ofs >> cfi->chipshift) {
1836                        chipnum ++;
1837                        ofs = 0;
1838                        if (chipnum == cfi->numchips)
1839                                return 0;
1840                }
1841
1842                /* Be nice and reschedule with the chip in a usable state for other
1843                   processes. */
1844                cond_resched();
1845
1846        } while (len);
1847
1848        return 0;
1849}
1850
1851static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1852                                       size_t len, size_t *retlen, const u_char *buf)
1853{
1854        struct kvec vec;
1855
1856        vec.iov_base = (void *) buf;
1857        vec.iov_len = len;
1858
1859        return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1860}
1861
1862static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1863                                      unsigned long adr, int len, void *thunk)
1864{
1865        struct cfi_private *cfi = map->fldrv_priv;
1866        map_word status;
1867        int retries = 3;
1868        int ret;
1869
1870        adr += chip->start;
1871
1872 retry:
1873        mutex_lock(&chip->mutex);
1874        ret = get_chip(map, chip, adr, FL_ERASING);
1875        if (ret) {
1876                mutex_unlock(&chip->mutex);
1877                return ret;
1878        }
1879
1880        XIP_INVAL_CACHED_RANGE(map, adr, len);
1881        ENABLE_VPP(map);
1882        xip_disable(map, chip, adr);
1883
1884        /* Clear the status register first */
1885        map_write(map, CMD(0x50), adr);
1886
1887        /* Now erase */
1888        map_write(map, CMD(0x20), adr);
1889        map_write(map, CMD(0xD0), adr);
1890        chip->state = FL_ERASING;
1891        chip->erase_suspended = 0;
1892
1893        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1894                                   adr, len,
1895                                   chip->erase_time,
1896                                   chip->erase_time_max);
1897        if (ret) {
1898                map_write(map, CMD(0x70), adr);
1899                chip->state = FL_STATUS;
1900                xip_enable(map, chip, adr);
1901                printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1902                goto out;
1903        }
1904
1905        /* We've broken this before. It doesn't hurt to be safe */
1906        map_write(map, CMD(0x70), adr);
1907        chip->state = FL_STATUS;
1908        status = map_read(map, adr);
1909
1910        /* check for errors */
1911        if (map_word_bitsset(map, status, CMD(0x3a))) {
1912                unsigned long chipstatus = MERGESTATUS(status);
1913
1914                /* Reset the error bits */
1915                map_write(map, CMD(0x50), adr);
1916                map_write(map, CMD(0x70), adr);
1917                xip_enable(map, chip, adr);
1918
1919                if ((chipstatus & 0x30) == 0x30) {
1920                        printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1921                        ret = -EINVAL;
1922                } else if (chipstatus & 0x02) {
1923                        /* Protection bit set */
1924                        ret = -EROFS;
1925                } else if (chipstatus & 0x8) {
1926                        /* Voltage */
1927                        printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1928                        ret = -EIO;
1929                } else if (chipstatus & 0x20 && retries--) {
1930                        printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1931                        DISABLE_VPP(map);
1932                        put_chip(map, chip, adr);
1933                        mutex_unlock(&chip->mutex);
1934                        goto retry;
1935                } else {
1936                        printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1937                        ret = -EIO;
1938                }
1939
1940                goto out;
1941        }
1942
1943        xip_enable(map, chip, adr);
1944 out:   DISABLE_VPP(map);
1945        put_chip(map, chip, adr);
1946        mutex_unlock(&chip->mutex);
1947        return ret;
1948}
1949
1950static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1951{
1952        unsigned long ofs, len;
1953        int ret;
1954
1955        ofs = instr->addr;
1956        len = instr->len;
1957
1958        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1959        if (ret)
1960                return ret;
1961
1962        instr->state = MTD_ERASE_DONE;
1963        mtd_erase_callback(instr);
1964
1965        return 0;
1966}
1967
1968static void cfi_intelext_sync (struct mtd_info *mtd)
1969{
1970        struct map_info *map = mtd->priv;
1971        struct cfi_private *cfi = map->fldrv_priv;
1972        int i;
1973        struct flchip *chip;
1974        int ret = 0;
1975
1976        for (i=0; !ret && i<cfi->numchips; i++) {
1977                chip = &cfi->chips[i];
1978
1979                mutex_lock(&chip->mutex);
1980                ret = get_chip(map, chip, chip->start, FL_SYNCING);
1981
1982                if (!ret) {
1983                        chip->oldstate = chip->state;
1984                        chip->state = FL_SYNCING;
1985                        /* No need to wake_up() on this state change -
1986                         * as the whole point is that nobody can do anything
1987                         * with the chip now anyway.
1988                         */
1989                }
1990                mutex_unlock(&chip->mutex);
1991        }
1992
1993        /* Unlock the chips again */
1994
1995        for (i--; i >=0; i--) {
1996                chip = &cfi->chips[i];
1997
1998                mutex_lock(&chip->mutex);
1999
2000                if (chip->state == FL_SYNCING) {
2001                        chip->state = chip->oldstate;
2002                        chip->oldstate = FL_READY;
2003                        wake_up(&chip->wq);
2004                }
2005                mutex_unlock(&chip->mutex);
2006        }
2007}
2008
2009static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2010                                                struct flchip *chip,
2011                                                unsigned long adr,
2012                                                int len, void *thunk)
2013{
2014        struct cfi_private *cfi = map->fldrv_priv;
2015        int status, ofs_factor = cfi->interleave * cfi->device_type;
2016
2017        adr += chip->start;
2018        xip_disable(map, chip, adr+(2*ofs_factor));
2019        map_write(map, CMD(0x90), adr+(2*ofs_factor));
2020        chip->state = FL_JEDEC_QUERY;
2021        status = cfi_read_query(map, adr+(2*ofs_factor));
2022        xip_enable(map, chip, 0);
2023        return status;
2024}
2025
2026#ifdef DEBUG_LOCK_BITS
2027static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2028                                                struct flchip *chip,
2029                                                unsigned long adr,
2030                                                int len, void *thunk)
2031{
2032        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2033               adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2034        return 0;
2035}
2036#endif
2037
2038#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2039#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2040
2041static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2042                                       unsigned long adr, int len, void *thunk)
2043{
2044        struct cfi_private *cfi = map->fldrv_priv;
2045        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2046        int mdelay;
2047        int ret;
2048
2049        adr += chip->start;
2050
2051        mutex_lock(&chip->mutex);
2052        ret = get_chip(map, chip, adr, FL_LOCKING);
2053        if (ret) {
2054                mutex_unlock(&chip->mutex);
2055                return ret;
2056        }
2057
2058        ENABLE_VPP(map);
2059        xip_disable(map, chip, adr);
2060
2061        map_write(map, CMD(0x60), adr);
2062        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2063                map_write(map, CMD(0x01), adr);
2064                chip->state = FL_LOCKING;
2065        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2066                map_write(map, CMD(0xD0), adr);
2067                chip->state = FL_UNLOCKING;
2068        } else
2069                BUG();
2070
2071        /*
2072         * If Instant Individual Block Locking supported then no need
2073         * to delay.
2074         */
2075        /*
2076         * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2077         * lets use a max of 1.5 seconds (1500ms) as timeout.
2078         *
2079         * See "Clear Block Lock-Bits Time" on page 40 in
2080         * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2081         * from February 2003
2082         */
2083        mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2084
2085        ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2086        if (ret) {
2087                map_write(map, CMD(0x70), adr);
2088                chip->state = FL_STATUS;
2089                xip_enable(map, chip, adr);
2090                printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2091                goto out;
2092        }
2093
2094        xip_enable(map, chip, adr);
2095 out:   DISABLE_VPP(map);
2096        put_chip(map, chip, adr);
2097        mutex_unlock(&chip->mutex);
2098        return ret;
2099}
2100
2101static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2102{
2103        int ret;
2104
2105#ifdef DEBUG_LOCK_BITS
2106        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2107               __func__, ofs, len);
2108        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2109                ofs, len, NULL);
2110#endif
2111
2112        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2113                ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2114
2115#ifdef DEBUG_LOCK_BITS
2116        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2117               __func__, ret);
2118        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2119                ofs, len, NULL);
2120#endif
2121
2122        return ret;
2123}
2124
2125static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2126{
2127        int ret;
2128
2129#ifdef DEBUG_LOCK_BITS
2130        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2131               __func__, ofs, len);
2132        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2133                ofs, len, NULL);
2134#endif
2135
2136        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2137                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2138
2139#ifdef DEBUG_LOCK_BITS
2140        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2141               __func__, ret);
2142        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2143                ofs, len, NULL);
2144#endif
2145
2146        return ret;
2147}
2148
2149static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2150                                  uint64_t len)
2151{
2152        return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2153                                ofs, len, NULL) ? 1 : 0;
2154}
2155
2156#ifdef CONFIG_MTD_OTP
2157
2158typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2159                        u_long data_offset, u_char *buf, u_int size,
2160                        u_long prot_offset, u_int groupno, u_int groupsize);
2161
2162static int __xipram
2163do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2164            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2165{
2166        struct cfi_private *cfi = map->fldrv_priv;
2167        int ret;
2168
2169        mutex_lock(&chip->mutex);
2170        ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2171        if (ret) {
2172                mutex_unlock(&chip->mutex);
2173                return ret;
2174        }
2175
2176        /* let's ensure we're not reading back cached data from array mode */
2177        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2178
2179        xip_disable(map, chip, chip->start);
2180        if (chip->state != FL_JEDEC_QUERY) {
2181                map_write(map, CMD(0x90), chip->start);
2182                chip->state = FL_JEDEC_QUERY;
2183        }
2184        map_copy_from(map, buf, chip->start + offset, size);
2185        xip_enable(map, chip, chip->start);
2186
2187        /* then ensure we don't keep OTP data in the cache */
2188        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2189
2190        put_chip(map, chip, chip->start);
2191        mutex_unlock(&chip->mutex);
2192        return 0;
2193}
2194
2195static int
2196do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2197             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2198{
2199        int ret;
2200
2201        while (size) {
2202                unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2203                int gap = offset - bus_ofs;
2204                int n = min_t(int, size, map_bankwidth(map)-gap);
2205                map_word datum = map_word_ff(map);
2206
2207                datum = map_word_load_partial(map, datum, buf, gap, n);
2208                ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2209                if (ret)
2210                        return ret;
2211
2212                offset += n;
2213                buf += n;
2214                size -= n;
2215        }
2216
2217        return 0;
2218}
2219
2220static int
2221do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2222            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2223{
2224        struct cfi_private *cfi = map->fldrv_priv;
2225        map_word datum;
2226
2227        /* make sure area matches group boundaries */
2228        if (size != grpsz)
2229                return -EXDEV;
2230
2231        datum = map_word_ff(map);
2232        datum = map_word_clr(map, datum, CMD(1 << grpno));
2233        return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2234}
2235
2236static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2237                                 size_t *retlen, u_char *buf,
2238                                 otp_op_t action, int user_regs)
2239{
2240        struct map_info *map = mtd->priv;
2241        struct cfi_private *cfi = map->fldrv_priv;
2242        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2243        struct flchip *chip;
2244        struct cfi_intelext_otpinfo *otp;
2245        u_long devsize, reg_prot_offset, data_offset;
2246        u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2247        u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2248        int ret;
2249
2250        *retlen = 0;
2251
2252        /* Check that we actually have some OTP registers */
2253        if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2254                return -ENODATA;
2255
2256        /* we need real chips here not virtual ones */
2257        devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2258        chip_step = devsize >> cfi->chipshift;
2259        chip_num = 0;
2260
2261        /* Some chips have OTP located in the _top_ partition only.
2262           For example: Intel 28F256L18T (T means top-parameter device) */
2263        if (cfi->mfr == CFI_MFR_INTEL) {
2264                switch (cfi->id) {
2265                case 0x880b:
2266                case 0x880c:
2267                case 0x880d:
2268                        chip_num = chip_step - 1;
2269                }
2270        }
2271
2272        for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2273                chip = &cfi->chips[chip_num];
2274                otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2275
2276                /* first OTP region */
2277                field = 0;
2278                reg_prot_offset = extp->ProtRegAddr;
2279                reg_fact_groups = 1;
2280                reg_fact_size = 1 << extp->FactProtRegSize;
2281                reg_user_groups = 1;
2282                reg_user_size = 1 << extp->UserProtRegSize;
2283
2284                while (len > 0) {
2285                        /* flash geometry fixup */
2286                        data_offset = reg_prot_offset + 1;
2287                        data_offset *= cfi->interleave * cfi->device_type;
2288                        reg_prot_offset *= cfi->interleave * cfi->device_type;
2289                        reg_fact_size *= cfi->interleave;
2290                        reg_user_size *= cfi->interleave;
2291
2292                        if (user_regs) {
2293                                groups = reg_user_groups;
2294                                groupsize = reg_user_size;
2295                                /* skip over factory reg area */
2296                                groupno = reg_fact_groups;
2297                                data_offset += reg_fact_groups * reg_fact_size;
2298                        } else {
2299                                groups = reg_fact_groups;
2300                                groupsize = reg_fact_size;
2301                                groupno = 0;
2302                        }
2303
2304                        while (len > 0 && groups > 0) {
2305                                if (!action) {
2306                                        /*
2307                                         * Special case: if action is NULL
2308                                         * we fill buf with otp_info records.
2309                                         */
2310                                        struct otp_info *otpinfo;
2311                                        map_word lockword;
2312                                        len -= sizeof(struct otp_info);
2313                                        if (len <= 0)
2314                                                return -ENOSPC;
2315                                        ret = do_otp_read(map, chip,
2316                                                          reg_prot_offset,
2317                                                          (u_char *)&lockword,
2318                                                          map_bankwidth(map),
2319                                                          0, 0,  0);
2320                                        if (ret)
2321                                                return ret;
2322                                        otpinfo = (struct otp_info *)buf;
2323                                        otpinfo->start = from;
2324                                        otpinfo->length = groupsize;
2325                                        otpinfo->locked =
2326                                           !map_word_bitsset(map, lockword,
2327                                                             CMD(1 << groupno));
2328                                        from += groupsize;
2329                                        buf += sizeof(*otpinfo);
2330                                        *retlen += sizeof(*otpinfo);
2331                                } else if (from >= groupsize) {
2332                                        from -= groupsize;
2333                                        data_offset += groupsize;
2334                                } else {
2335                                        int size = groupsize;
2336                                        data_offset += from;
2337                                        size -= from;
2338                                        from = 0;
2339                                        if (size > len)
2340                                                size = len;
2341                                        ret = action(map, chip, data_offset,
2342                                                     buf, size, reg_prot_offset,
2343                                                     groupno, groupsize);
2344                                        if (ret < 0)
2345                                                return ret;
2346                                        buf += size;
2347                                        len -= size;
2348                                        *retlen += size;
2349                                        data_offset += size;
2350                                }
2351                                groupno++;
2352                                groups--;
2353                        }
2354
2355                        /* next OTP region */
2356                        if (++field == extp->NumProtectionFields)
2357                                break;
2358                        reg_prot_offset = otp->ProtRegAddr;
2359                        reg_fact_groups = otp->FactGroups;
2360                        reg_fact_size = 1 << otp->FactProtRegSize;
2361                        reg_user_groups = otp->UserGroups;
2362                        reg_user_size = 1 << otp->UserProtRegSize;
2363                        otp++;
2364                }
2365        }
2366
2367        return 0;
2368}
2369
2370static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2371                                           size_t len, size_t *retlen,
2372                                            u_char *buf)
2373{
2374        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2375                                     buf, do_otp_read, 0);
2376}
2377
2378static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2379                                           size_t len, size_t *retlen,
2380                                            u_char *buf)
2381{
2382        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2383                                     buf, do_otp_read, 1);
2384}
2385
2386static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2387                                            size_t len, size_t *retlen,
2388                                             u_char *buf)
2389{
2390        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2391                                     buf, do_otp_write, 1);
2392}
2393
2394static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2395                                           loff_t from, size_t len)
2396{
2397        size_t retlen;
2398        return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2399                                     NULL, do_otp_lock, 1);
2400}
2401
2402static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2403                                           struct otp_info *buf, size_t len)
2404{
2405        size_t retlen;
2406        int ret;
2407
2408        ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2409        return ret ? : retlen;
2410}
2411
2412static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2413                                           struct otp_info *buf, size_t len)
2414{
2415        size_t retlen;
2416        int ret;
2417
2418        ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2419        return ret ? : retlen;
2420}
2421
2422#endif
2423
2424static void cfi_intelext_save_locks(struct mtd_info *mtd)
2425{
2426        struct mtd_erase_region_info *region;
2427        int block, status, i;
2428        unsigned long adr;
2429        size_t len;
2430
2431        for (i = 0; i < mtd->numeraseregions; i++) {
2432                region = &mtd->eraseregions[i];
2433                if (!region->lockmap)
2434                        continue;
2435
2436                for (block = 0; block < region->numblocks; block++){
2437                        len = region->erasesize;
2438                        adr = region->offset + block * len;
2439
2440                        status = cfi_varsize_frob(mtd,
2441                                        do_getlockstatus_oneblock, adr, len, NULL);
2442                        if (status)
2443                                set_bit(block, region->lockmap);
2444                        else
2445                                clear_bit(block, region->lockmap);
2446                }
2447        }
2448}
2449
2450static int cfi_intelext_suspend(struct mtd_info *mtd)
2451{
2452        struct map_info *map = mtd->priv;
2453        struct cfi_private *cfi = map->fldrv_priv;
2454        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2455        int i;
2456        struct flchip *chip;
2457        int ret = 0;
2458
2459        if ((mtd->flags & MTD_POWERUP_LOCK)
2460            && extp && (extp->FeatureSupport & (1 << 5)))
2461                cfi_intelext_save_locks(mtd);
2462
2463        for (i=0; !ret && i<cfi->numchips; i++) {
2464                chip = &cfi->chips[i];
2465
2466                mutex_lock(&chip->mutex);
2467
2468                switch (chip->state) {
2469                case FL_READY:
2470                case FL_STATUS:
2471                case FL_CFI_QUERY:
2472                case FL_JEDEC_QUERY:
2473                        if (chip->oldstate == FL_READY) {
2474                                /* place the chip in a known state before suspend */
2475                                map_write(map, CMD(0xFF), cfi->chips[i].start);
2476                                chip->oldstate = chip->state;
2477                                chip->state = FL_PM_SUSPENDED;
2478                                /* No need to wake_up() on this state change -
2479                                 * as the whole point is that nobody can do anything
2480                                 * with the chip now anyway.
2481                                 */
2482                        } else {
2483                                /* There seems to be an operation pending. We must wait for it. */
2484                                printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2485                                ret = -EAGAIN;
2486                        }
2487                        break;
2488                default:
2489                        /* Should we actually wait? Once upon a time these routines weren't
2490                           allowed to. Or should we return -EAGAIN, because the upper layers
2491                           ought to have already shut down anything which was using the device
2492                           anyway? The latter for now. */
2493                        printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2494                        ret = -EAGAIN;
2495                case FL_PM_SUSPENDED:
2496                        break;
2497                }
2498                mutex_unlock(&chip->mutex);
2499        }
2500
2501        /* Unlock the chips again */
2502
2503        if (ret) {
2504                for (i--; i >=0; i--) {
2505                        chip = &cfi->chips[i];
2506
2507                        mutex_lock(&chip->mutex);
2508
2509                        if (chip->state == FL_PM_SUSPENDED) {
2510                                /* No need to force it into a known state here,
2511                                   because we're returning failure, and it didn't
2512                                   get power cycled */
2513                                chip->state = chip->oldstate;
2514                                chip->oldstate = FL_READY;
2515                                wake_up(&chip->wq);
2516                        }
2517                        mutex_unlock(&chip->mutex);
2518                }
2519        }
2520
2521        return ret;
2522}
2523
2524static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2525{
2526        struct mtd_erase_region_info *region;
2527        int block, i;
2528        unsigned long adr;
2529        size_t len;
2530
2531        for (i = 0; i < mtd->numeraseregions; i++) {
2532                region = &mtd->eraseregions[i];
2533                if (!region->lockmap)
2534                        continue;
2535
2536                for_each_clear_bit(block, region->lockmap, region->numblocks) {
2537                        len = region->erasesize;
2538                        adr = region->offset + block * len;
2539                        cfi_intelext_unlock(mtd, adr, len);
2540                }
2541        }
2542}
2543
2544static void cfi_intelext_resume(struct mtd_info *mtd)
2545{
2546        struct map_info *map = mtd->priv;
2547        struct cfi_private *cfi = map->fldrv_priv;
2548        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2549        int i;
2550        struct flchip *chip;
2551
2552        for (i=0; i<cfi->numchips; i++) {
2553
2554                chip = &cfi->chips[i];
2555
2556                mutex_lock(&chip->mutex);
2557
2558                /* Go to known state. Chip may have been power cycled */
2559                if (chip->state == FL_PM_SUSPENDED) {
2560                        map_write(map, CMD(0xFF), cfi->chips[i].start);
2561                        chip->oldstate = chip->state = FL_READY;
2562                        wake_up(&chip->wq);
2563                }
2564
2565                mutex_unlock(&chip->mutex);
2566        }
2567
2568        if ((mtd->flags & MTD_POWERUP_LOCK)
2569            && extp && (extp->FeatureSupport & (1 << 5)))
2570                cfi_intelext_restore_locks(mtd);
2571}
2572
2573static int cfi_intelext_reset(struct mtd_info *mtd)
2574{
2575        struct map_info *map = mtd->priv;
2576        struct cfi_private *cfi = map->fldrv_priv;
2577        int i, ret;
2578
2579        for (i=0; i < cfi->numchips; i++) {
2580                struct flchip *chip = &cfi->chips[i];
2581
2582                /* force the completion of any ongoing operation
2583                   and switch to array mode so any bootloader in
2584                   flash is accessible for soft reboot. */
2585                mutex_lock(&chip->mutex);
2586                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2587                if (!ret) {
2588                        map_write(map, CMD(0xff), chip->start);
2589                        chip->state = FL_SHUTDOWN;
2590                        put_chip(map, chip, chip->start);
2591                }
2592                mutex_unlock(&chip->mutex);
2593        }
2594
2595        return 0;
2596}
2597
2598static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2599                               void *v)
2600{
2601        struct mtd_info *mtd;
2602
2603        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2604        cfi_intelext_reset(mtd);
2605        return NOTIFY_DONE;
2606}
2607
2608static void cfi_intelext_destroy(struct mtd_info *mtd)
2609{
2610        struct map_info *map = mtd->priv;
2611        struct cfi_private *cfi = map->fldrv_priv;
2612        struct mtd_erase_region_info *region;
2613        int i;
2614        cfi_intelext_reset(mtd);
2615        unregister_reboot_notifier(&mtd->reboot_notifier);
2616        kfree(cfi->cmdset_priv);
2617        kfree(cfi->cfiq);
2618        kfree(cfi->chips[0].priv);
2619        kfree(cfi);
2620        for (i = 0; i < mtd->numeraseregions; i++) {
2621                region = &mtd->eraseregions[i];
2622                if (region->lockmap)
2623                        kfree(region->lockmap);
2624        }
2625        kfree(mtd->eraseregions);
2626}
2627
2628MODULE_LICENSE("GPL");
2629MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2630MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2631MODULE_ALIAS("cfi_cmdset_0003");
2632MODULE_ALIAS("cfi_cmdset_0200");
2633