linux/drivers/mtd/chips/cfi_cmdset_0001.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 *
   8 * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
   9 *      - completely revamped method functions so they are aware and
  10 *        independent of the flash geometry (buswidth, interleave, etc.)
  11 *      - scalability vs code size is completely set at compile-time
  12 *        (see include/linux/mtd/cfi.h for selection)
  13 *      - optimized write buffer method
  14 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  15 *      - reworked lock/unlock/erase support for var size flash
  16 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  17 *      - auto unlock sectors on resume for auto locking flash on power up
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <asm/io.h>
  25#include <asm/byteorder.h>
  26
  27#include <linux/errno.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/interrupt.h>
  31#include <linux/reboot.h>
  32#include <linux/bitmap.h>
  33#include <linux/mtd/xip.h>
  34#include <linux/mtd/map.h>
  35#include <linux/mtd/mtd.h>
  36#include <linux/mtd/cfi.h>
  37
  38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  40
  41// debugging, turns off buffer write mode if set to 1
  42#define FORCE_WORD_WRITE 0
  43
  44/* Intel chips */
  45#define I82802AB        0x00ad
  46#define I82802AC        0x00ac
  47#define PF38F4476       0x881c
  48/* STMicroelectronics chips */
  49#define M50LPW080       0x002F
  50#define M50FLW080A      0x0080
  51#define M50FLW080B      0x0081
  52/* Atmel chips */
  53#define AT49BV640D      0x02de
  54#define AT49BV640DT     0x02db
  55/* Sharp chips */
  56#define LH28F640BFHE_PTTL90     0x00b0
  57#define LH28F640BFHE_PBTL90     0x00b1
  58#define LH28F640BFHE_PTTL70A    0x00b2
  59#define LH28F640BFHE_PBTL70A    0x00b3
  60
  61static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  62static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  63static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  64static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  65static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  66static void cfi_intelext_sync (struct mtd_info *);
  67static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  68static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  69static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
  70                                  uint64_t len);
  71#ifdef CONFIG_MTD_OTP
  72static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  73static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  74static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  75static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  76static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
  77                                           size_t *, struct otp_info *);
  78static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
  79                                           size_t *, struct otp_info *);
  80#endif
  81static int cfi_intelext_suspend (struct mtd_info *);
  82static void cfi_intelext_resume (struct mtd_info *);
  83static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  84
  85static void cfi_intelext_destroy(struct mtd_info *);
  86
  87struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  88
  89static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  90static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  91
  92static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  93                     size_t *retlen, void **virt, resource_size_t *phys);
  94static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
  95
  96static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  97static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  98static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  99#include "fwh_lock.h"
 100
 101
 102
 103/*
 104 *  *********** SETUP AND PROBE BITS  ***********
 105 */
 106
 107static struct mtd_chip_driver cfi_intelext_chipdrv = {
 108        .probe          = NULL, /* Not usable directly */
 109        .destroy        = cfi_intelext_destroy,
 110        .name           = "cfi_cmdset_0001",
 111        .module         = THIS_MODULE
 112};
 113
 114/* #define DEBUG_LOCK_BITS */
 115/* #define DEBUG_CFI_FEATURES */
 116
 117#ifdef DEBUG_CFI_FEATURES
 118static void cfi_tell_features(struct cfi_pri_intelext *extp)
 119{
 120        int i;
 121        printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 122        printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 123        printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 124        printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 125        printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 126        printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 127        printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 128        printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 129        printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 130        printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 131        printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 132        printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 133        printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 134        for (i=11; i<32; i++) {
 135                if (extp->FeatureSupport & (1<<i))
 136                        printk("     - Unknown Bit %X:      supported\n", i);
 137        }
 138
 139        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 140        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 141        for (i=1; i<8; i++) {
 142                if (extp->SuspendCmdSupport & (1<<i))
 143                        printk("     - Unknown Bit %X:               supported\n", i);
 144        }
 145
 146        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 147        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 148        printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 149        for (i=2; i<3; i++) {
 150                if (extp->BlkStatusRegMask & (1<<i))
 151                        printk("     - Unknown Bit %X Active: yes\n",i);
 152        }
 153        printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 154        printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 155        for (i=6; i<16; i++) {
 156                if (extp->BlkStatusRegMask & (1<<i))
 157                        printk("     - Unknown Bit %X Active: yes\n",i);
 158        }
 159
 160        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 161               extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 162        if (extp->VppOptimal)
 163                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 164                       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 165}
 166#endif
 167
 168/* Atmel chips don't use the same PRI format as Intel chips */
 169static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 170{
 171        struct map_info *map = mtd->priv;
 172        struct cfi_private *cfi = map->fldrv_priv;
 173        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 174        struct cfi_pri_atmel atmel_pri;
 175        uint32_t features = 0;
 176
 177        /* Reverse byteswapping */
 178        extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 179        extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 180        extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 181
 182        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 183        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 184
 185        printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 186
 187        if (atmel_pri.Features & 0x01) /* chip erase supported */
 188                features |= (1<<0);
 189        if (atmel_pri.Features & 0x02) /* erase suspend supported */
 190                features |= (1<<1);
 191        if (atmel_pri.Features & 0x04) /* program suspend supported */
 192                features |= (1<<2);
 193        if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 194                features |= (1<<9);
 195        if (atmel_pri.Features & 0x20) /* page mode read supported */
 196                features |= (1<<7);
 197        if (atmel_pri.Features & 0x40) /* queued erase supported */
 198                features |= (1<<4);
 199        if (atmel_pri.Features & 0x80) /* Protection bits supported */
 200                features |= (1<<6);
 201
 202        extp->FeatureSupport = features;
 203
 204        /* burst write mode not supported */
 205        cfi->cfiq->BufWriteTimeoutTyp = 0;
 206        cfi->cfiq->BufWriteTimeoutMax = 0;
 207}
 208
 209static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 210{
 211        struct map_info *map = mtd->priv;
 212        struct cfi_private *cfi = map->fldrv_priv;
 213        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 214
 215        cfip->FeatureSupport |= (1 << 5);
 216        mtd->flags |= MTD_POWERUP_LOCK;
 217}
 218
 219#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 220/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 221static void fixup_intel_strataflash(struct mtd_info *mtd)
 222{
 223        struct map_info *map = mtd->priv;
 224        struct cfi_private *cfi = map->fldrv_priv;
 225        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 226
 227        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 228                            "erase on write disabled.\n");
 229        extp->SuspendCmdSupport &= ~1;
 230}
 231#endif
 232
 233#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 234static void fixup_no_write_suspend(struct mtd_info *mtd)
 235{
 236        struct map_info *map = mtd->priv;
 237        struct cfi_private *cfi = map->fldrv_priv;
 238        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 239
 240        if (cfip && (cfip->FeatureSupport&4)) {
 241                cfip->FeatureSupport &= ~4;
 242                printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 243        }
 244}
 245#endif
 246
 247static void fixup_st_m28w320ct(struct mtd_info *mtd)
 248{
 249        struct map_info *map = mtd->priv;
 250        struct cfi_private *cfi = map->fldrv_priv;
 251
 252        cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
 253        cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
 254}
 255
 256static void fixup_st_m28w320cb(struct mtd_info *mtd)
 257{
 258        struct map_info *map = mtd->priv;
 259        struct cfi_private *cfi = map->fldrv_priv;
 260
 261        /* Note this is done after the region info is endian swapped */
 262        cfi->cfiq->EraseRegionInfo[1] =
 263                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 264};
 265
 266static int is_LH28F640BF(struct cfi_private *cfi)
 267{
 268        /* Sharp LH28F640BF Family */
 269        if (cfi->mfr == CFI_MFR_SHARP && (
 270            cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
 271            cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
 272                return 1;
 273        return 0;
 274}
 275
 276static void fixup_LH28F640BF(struct mtd_info *mtd)
 277{
 278        struct map_info *map = mtd->priv;
 279        struct cfi_private *cfi = map->fldrv_priv;
 280        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 281
 282        /* Reset the Partition Configuration Register on LH28F640BF
 283         * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
 284        if (is_LH28F640BF(cfi)) {
 285                printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
 286                map_write(map, CMD(0x60), 0);
 287                map_write(map, CMD(0x04), 0);
 288
 289                /* We have set one single partition thus
 290                 * Simultaneous Operations are not allowed */
 291                printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
 292                extp->FeatureSupport &= ~512;
 293        }
 294}
 295
 296static void fixup_use_point(struct mtd_info *mtd)
 297{
 298        struct map_info *map = mtd->priv;
 299        if (!mtd->_point && map_is_linear(map)) {
 300                mtd->_point   = cfi_intelext_point;
 301                mtd->_unpoint = cfi_intelext_unpoint;
 302        }
 303}
 304
 305static void fixup_use_write_buffers(struct mtd_info *mtd)
 306{
 307        struct map_info *map = mtd->priv;
 308        struct cfi_private *cfi = map->fldrv_priv;
 309        if (cfi->cfiq->BufWriteTimeoutTyp) {
 310                printk(KERN_INFO "Using buffer write method\n" );
 311                mtd->_write = cfi_intelext_write_buffers;
 312                mtd->_writev = cfi_intelext_writev;
 313        }
 314}
 315
 316/*
 317 * Some chips power-up with all sectors locked by default.
 318 */
 319static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 320{
 321        struct map_info *map = mtd->priv;
 322        struct cfi_private *cfi = map->fldrv_priv;
 323        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 324
 325        if (cfip->FeatureSupport&32) {
 326                printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 327                mtd->flags |= MTD_POWERUP_LOCK;
 328        }
 329}
 330
 331static struct cfi_fixup cfi_fixup_table[] = {
 332        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 333        { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
 334        { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 335#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 336        { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 337#endif
 338#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 339        { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 340#endif
 341#if !FORCE_WORD_WRITE
 342        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 343#endif
 344        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
 345        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
 346        { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
 347        { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
 348        { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
 349        { 0, 0, NULL }
 350};
 351
 352static struct cfi_fixup jedec_fixup_table[] = {
 353        { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
 354        { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
 355        { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
 356        { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
 357        { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
 358        { 0, 0, NULL }
 359};
 360static struct cfi_fixup fixup_table[] = {
 361        /* The CFI vendor ids and the JEDEC vendor IDs appear
 362         * to be common.  It is like the devices id's are as
 363         * well.  This table is to pick all cases where
 364         * we know that is the case.
 365         */
 366        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
 367        { 0, 0, NULL }
 368};
 369
 370static void cfi_fixup_major_minor(struct cfi_private *cfi,
 371                                                struct cfi_pri_intelext *extp)
 372{
 373        if (cfi->mfr == CFI_MFR_INTEL &&
 374                        cfi->id == PF38F4476 && extp->MinorVersion == '3')
 375                extp->MinorVersion = '1';
 376}
 377
 378static inline struct cfi_pri_intelext *
 379read_pri_intelext(struct map_info *map, __u16 adr)
 380{
 381        struct cfi_private *cfi = map->fldrv_priv;
 382        struct cfi_pri_intelext *extp;
 383        unsigned int extra_size = 0;
 384        unsigned int extp_size = sizeof(*extp);
 385
 386 again:
 387        extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 388        if (!extp)
 389                return NULL;
 390
 391        cfi_fixup_major_minor(cfi, extp);
 392
 393        if (extp->MajorVersion != '1' ||
 394            (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 395                printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 396                       "version %c.%c.\n",  extp->MajorVersion,
 397                       extp->MinorVersion);
 398                kfree(extp);
 399                return NULL;
 400        }
 401
 402        /* Do some byteswapping if necessary */
 403        extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 404        extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 405        extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 406
 407        if (extp->MinorVersion >= '0') {
 408                extra_size = 0;
 409
 410                /* Protection Register info */
 411                extra_size += (extp->NumProtectionFields - 1) *
 412                              sizeof(struct cfi_intelext_otpinfo);
 413        }
 414
 415        if (extp->MinorVersion >= '1') {
 416                /* Burst Read info */
 417                extra_size += 2;
 418                if (extp_size < sizeof(*extp) + extra_size)
 419                        goto need_more;
 420                extra_size += extp->extra[extra_size - 1];
 421        }
 422
 423        if (extp->MinorVersion >= '3') {
 424                int nb_parts, i;
 425
 426                /* Number of hardware-partitions */
 427                extra_size += 1;
 428                if (extp_size < sizeof(*extp) + extra_size)
 429                        goto need_more;
 430                nb_parts = extp->extra[extra_size - 1];
 431
 432                /* skip the sizeof(partregion) field in CFI 1.4 */
 433                if (extp->MinorVersion >= '4')
 434                        extra_size += 2;
 435
 436                for (i = 0; i < nb_parts; i++) {
 437                        struct cfi_intelext_regioninfo *rinfo;
 438                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 439                        extra_size += sizeof(*rinfo);
 440                        if (extp_size < sizeof(*extp) + extra_size)
 441                                goto need_more;
 442                        rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 443                        extra_size += (rinfo->NumBlockTypes - 1)
 444                                      * sizeof(struct cfi_intelext_blockinfo);
 445                }
 446
 447                if (extp->MinorVersion >= '4')
 448                        extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 449
 450                if (extp_size < sizeof(*extp) + extra_size) {
 451                        need_more:
 452                        extp_size = sizeof(*extp) + extra_size;
 453                        kfree(extp);
 454                        if (extp_size > 4096) {
 455                                printk(KERN_ERR
 456                                        "%s: cfi_pri_intelext is too fat\n",
 457                                        __func__);
 458                                return NULL;
 459                        }
 460                        goto again;
 461                }
 462        }
 463
 464        return extp;
 465}
 466
 467struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 468{
 469        struct cfi_private *cfi = map->fldrv_priv;
 470        struct mtd_info *mtd;
 471        int i;
 472
 473        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 474        if (!mtd)
 475                return NULL;
 476        mtd->priv = map;
 477        mtd->type = MTD_NORFLASH;
 478
 479        /* Fill in the default mtd operations */
 480        mtd->_erase   = cfi_intelext_erase_varsize;
 481        mtd->_read    = cfi_intelext_read;
 482        mtd->_write   = cfi_intelext_write_words;
 483        mtd->_sync    = cfi_intelext_sync;
 484        mtd->_lock    = cfi_intelext_lock;
 485        mtd->_unlock  = cfi_intelext_unlock;
 486        mtd->_is_locked = cfi_intelext_is_locked;
 487        mtd->_suspend = cfi_intelext_suspend;
 488        mtd->_resume  = cfi_intelext_resume;
 489        mtd->flags   = MTD_CAP_NORFLASH;
 490        mtd->name    = map->name;
 491        mtd->writesize = 1;
 492        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 493
 494        mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 495
 496        if (cfi->cfi_mode == CFI_MODE_CFI) {
 497                /*
 498                 * It's a real CFI chip, not one for which the probe
 499                 * routine faked a CFI structure. So we read the feature
 500                 * table from it.
 501                 */
 502                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 503                struct cfi_pri_intelext *extp;
 504
 505                extp = read_pri_intelext(map, adr);
 506                if (!extp) {
 507                        kfree(mtd);
 508                        return NULL;
 509                }
 510
 511                /* Install our own private info structure */
 512                cfi->cmdset_priv = extp;
 513
 514                cfi_fixup(mtd, cfi_fixup_table);
 515
 516#ifdef DEBUG_CFI_FEATURES
 517                /* Tell the user about it in lots of lovely detail */
 518                cfi_tell_features(extp);
 519#endif
 520
 521                if(extp->SuspendCmdSupport & 1) {
 522                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 523                }
 524        }
 525        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 526                /* Apply jedec specific fixups */
 527                cfi_fixup(mtd, jedec_fixup_table);
 528        }
 529        /* Apply generic fixups */
 530        cfi_fixup(mtd, fixup_table);
 531
 532        for (i=0; i< cfi->numchips; i++) {
 533                if (cfi->cfiq->WordWriteTimeoutTyp)
 534                        cfi->chips[i].word_write_time =
 535                                1<<cfi->cfiq->WordWriteTimeoutTyp;
 536                else
 537                        cfi->chips[i].word_write_time = 50000;
 538
 539                if (cfi->cfiq->BufWriteTimeoutTyp)
 540                        cfi->chips[i].buffer_write_time =
 541                                1<<cfi->cfiq->BufWriteTimeoutTyp;
 542                /* No default; if it isn't specified, we won't use it */
 543
 544                if (cfi->cfiq->BlockEraseTimeoutTyp)
 545                        cfi->chips[i].erase_time =
 546                                1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 547                else
 548                        cfi->chips[i].erase_time = 2000000;
 549
 550                if (cfi->cfiq->WordWriteTimeoutTyp &&
 551                    cfi->cfiq->WordWriteTimeoutMax)
 552                        cfi->chips[i].word_write_time_max =
 553                                1<<(cfi->cfiq->WordWriteTimeoutTyp +
 554                                    cfi->cfiq->WordWriteTimeoutMax);
 555                else
 556                        cfi->chips[i].word_write_time_max = 50000 * 8;
 557
 558                if (cfi->cfiq->BufWriteTimeoutTyp &&
 559                    cfi->cfiq->BufWriteTimeoutMax)
 560                        cfi->chips[i].buffer_write_time_max =
 561                                1<<(cfi->cfiq->BufWriteTimeoutTyp +
 562                                    cfi->cfiq->BufWriteTimeoutMax);
 563
 564                if (cfi->cfiq->BlockEraseTimeoutTyp &&
 565                    cfi->cfiq->BlockEraseTimeoutMax)
 566                        cfi->chips[i].erase_time_max =
 567                                1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
 568                                       cfi->cfiq->BlockEraseTimeoutMax);
 569                else
 570                        cfi->chips[i].erase_time_max = 2000000 * 8;
 571
 572                cfi->chips[i].ref_point_counter = 0;
 573                init_waitqueue_head(&(cfi->chips[i].wq));
 574        }
 575
 576        map->fldrv = &cfi_intelext_chipdrv;
 577
 578        return cfi_intelext_setup(mtd);
 579}
 580struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 581struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 582EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 583EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 584EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 585
 586static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 587{
 588        struct map_info *map = mtd->priv;
 589        struct cfi_private *cfi = map->fldrv_priv;
 590        unsigned long offset = 0;
 591        int i,j;
 592        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 593
 594        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 595
 596        mtd->size = devsize * cfi->numchips;
 597
 598        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 599        mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
 600                        * mtd->numeraseregions, GFP_KERNEL);
 601        if (!mtd->eraseregions)
 602                goto setup_err;
 603
 604        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 605                unsigned long ernum, ersize;
 606                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 607                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 608
 609                if (mtd->erasesize < ersize) {
 610                        mtd->erasesize = ersize;
 611                }
 612                for (j=0; j<cfi->numchips; j++) {
 613                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 614                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 615                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 616                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 617                        if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
 618                                goto setup_err;
 619                }
 620                offset += (ersize * ernum);
 621        }
 622
 623        if (offset != devsize) {
 624                /* Argh */
 625                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 626                goto setup_err;
 627        }
 628
 629        for (i=0; i<mtd->numeraseregions;i++){
 630                printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
 631                       i,(unsigned long long)mtd->eraseregions[i].offset,
 632                       mtd->eraseregions[i].erasesize,
 633                       mtd->eraseregions[i].numblocks);
 634        }
 635
 636#ifdef CONFIG_MTD_OTP
 637        mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 638        mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 639        mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 640        mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 641        mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 642        mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
 643#endif
 644
 645        /* This function has the potential to distort the reality
 646           a bit and therefore should be called last. */
 647        if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 648                goto setup_err;
 649
 650        __module_get(THIS_MODULE);
 651        register_reboot_notifier(&mtd->reboot_notifier);
 652        return mtd;
 653
 654 setup_err:
 655        if (mtd->eraseregions)
 656                for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
 657                        for (j=0; j<cfi->numchips; j++)
 658                                kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
 659        kfree(mtd->eraseregions);
 660        kfree(mtd);
 661        kfree(cfi->cmdset_priv);
 662        return NULL;
 663}
 664
 665static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 666                                        struct cfi_private **pcfi)
 667{
 668        struct map_info *map = mtd->priv;
 669        struct cfi_private *cfi = *pcfi;
 670        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 671
 672        /*
 673         * Probing of multi-partition flash chips.
 674         *
 675         * To support multiple partitions when available, we simply arrange
 676         * for each of them to have their own flchip structure even if they
 677         * are on the same physical chip.  This means completely recreating
 678         * a new cfi_private structure right here which is a blatent code
 679         * layering violation, but this is still the least intrusive
 680         * arrangement at this point. This can be rearranged in the future
 681         * if someone feels motivated enough.  --nico
 682         */
 683        if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 684            && extp->FeatureSupport & (1 << 9)) {
 685                struct cfi_private *newcfi;
 686                struct flchip *chip;
 687                struct flchip_shared *shared;
 688                int offs, numregions, numparts, partshift, numvirtchips, i, j;
 689
 690                /* Protection Register info */
 691                offs = (extp->NumProtectionFields - 1) *
 692                       sizeof(struct cfi_intelext_otpinfo);
 693
 694                /* Burst Read info */
 695                offs += extp->extra[offs+1]+2;
 696
 697                /* Number of partition regions */
 698                numregions = extp->extra[offs];
 699                offs += 1;
 700
 701                /* skip the sizeof(partregion) field in CFI 1.4 */
 702                if (extp->MinorVersion >= '4')
 703                        offs += 2;
 704
 705                /* Number of hardware partitions */
 706                numparts = 0;
 707                for (i = 0; i < numregions; i++) {
 708                        struct cfi_intelext_regioninfo *rinfo;
 709                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 710                        numparts += rinfo->NumIdentPartitions;
 711                        offs += sizeof(*rinfo)
 712                                + (rinfo->NumBlockTypes - 1) *
 713                                  sizeof(struct cfi_intelext_blockinfo);
 714                }
 715
 716                if (!numparts)
 717                        numparts = 1;
 718
 719                /* Programming Region info */
 720                if (extp->MinorVersion >= '4') {
 721                        struct cfi_intelext_programming_regioninfo *prinfo;
 722                        prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 723                        mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 724                        mtd->flags &= ~MTD_BIT_WRITEABLE;
 725                        printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 726                               map->name, mtd->writesize,
 727                               cfi->interleave * prinfo->ControlValid,
 728                               cfi->interleave * prinfo->ControlInvalid);
 729                }
 730
 731                /*
 732                 * All functions below currently rely on all chips having
 733                 * the same geometry so we'll just assume that all hardware
 734                 * partitions are of the same size too.
 735                 */
 736                partshift = cfi->chipshift - __ffs(numparts);
 737
 738                if ((1 << partshift) < mtd->erasesize) {
 739                        printk( KERN_ERR
 740                                "%s: bad number of hw partitions (%d)\n",
 741                                __func__, numparts);
 742                        return -EINVAL;
 743                }
 744
 745                numvirtchips = cfi->numchips * numparts;
 746                newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
 747                if (!newcfi)
 748                        return -ENOMEM;
 749                shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
 750                if (!shared) {
 751                        kfree(newcfi);
 752                        return -ENOMEM;
 753                }
 754                memcpy(newcfi, cfi, sizeof(struct cfi_private));
 755                newcfi->numchips = numvirtchips;
 756                newcfi->chipshift = partshift;
 757
 758                chip = &newcfi->chips[0];
 759                for (i = 0; i < cfi->numchips; i++) {
 760                        shared[i].writing = shared[i].erasing = NULL;
 761                        mutex_init(&shared[i].lock);
 762                        for (j = 0; j < numparts; j++) {
 763                                *chip = cfi->chips[i];
 764                                chip->start += j << partshift;
 765                                chip->priv = &shared[i];
 766                                /* those should be reset too since
 767                                   they create memory references. */
 768                                init_waitqueue_head(&chip->wq);
 769                                mutex_init(&chip->mutex);
 770                                chip++;
 771                        }
 772                }
 773
 774                printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 775                                  "--> %d partitions of %d KiB\n",
 776                                  map->name, cfi->numchips, cfi->interleave,
 777                                  newcfi->numchips, 1<<(newcfi->chipshift-10));
 778
 779                map->fldrv_priv = newcfi;
 780                *pcfi = newcfi;
 781                kfree(cfi);
 782        }
 783
 784        return 0;
 785}
 786
 787/*
 788 *  *********** CHIP ACCESS FUNCTIONS ***********
 789 */
 790static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 791{
 792        DECLARE_WAITQUEUE(wait, current);
 793        struct cfi_private *cfi = map->fldrv_priv;
 794        map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 795        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 796        unsigned long timeo = jiffies + HZ;
 797
 798        /* Prevent setting state FL_SYNCING for chip in suspended state. */
 799        if (mode == FL_SYNCING && chip->oldstate != FL_READY)
 800                goto sleep;
 801
 802        switch (chip->state) {
 803
 804        case FL_STATUS:
 805                for (;;) {
 806                        status = map_read(map, adr);
 807                        if (map_word_andequal(map, status, status_OK, status_OK))
 808                                break;
 809
 810                        /* At this point we're fine with write operations
 811                           in other partitions as they don't conflict. */
 812                        if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 813                                break;
 814
 815                        mutex_unlock(&chip->mutex);
 816                        cfi_udelay(1);
 817                        mutex_lock(&chip->mutex);
 818                        /* Someone else might have been playing with it. */
 819                        return -EAGAIN;
 820                }
 821                /* Fall through */
 822        case FL_READY:
 823        case FL_CFI_QUERY:
 824        case FL_JEDEC_QUERY:
 825                return 0;
 826
 827        case FL_ERASING:
 828                if (!cfip ||
 829                    !(cfip->FeatureSupport & 2) ||
 830                    !(mode == FL_READY || mode == FL_POINT ||
 831                     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 832                        goto sleep;
 833
 834
 835                /* Erase suspend */
 836                map_write(map, CMD(0xB0), adr);
 837
 838                /* If the flash has finished erasing, then 'erase suspend'
 839                 * appears to make some (28F320) flash devices switch to
 840                 * 'read' mode.  Make sure that we switch to 'read status'
 841                 * mode so we get the right data. --rmk
 842                 */
 843                map_write(map, CMD(0x70), adr);
 844                chip->oldstate = FL_ERASING;
 845                chip->state = FL_ERASE_SUSPENDING;
 846                chip->erase_suspended = 1;
 847                for (;;) {
 848                        status = map_read(map, adr);
 849                        if (map_word_andequal(map, status, status_OK, status_OK))
 850                                break;
 851
 852                        if (time_after(jiffies, timeo)) {
 853                                /* Urgh. Resume and pretend we weren't here.
 854                                 * Make sure we're in 'read status' mode if it had finished */
 855                                put_chip(map, chip, adr);
 856                                printk(KERN_ERR "%s: Chip not ready after erase "
 857                                       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 858                                return -EIO;
 859                        }
 860
 861                        mutex_unlock(&chip->mutex);
 862                        cfi_udelay(1);
 863                        mutex_lock(&chip->mutex);
 864                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 865                           So we can just loop here. */
 866                }
 867                chip->state = FL_STATUS;
 868                return 0;
 869
 870        case FL_XIP_WHILE_ERASING:
 871                if (mode != FL_READY && mode != FL_POINT &&
 872                    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 873                        goto sleep;
 874                chip->oldstate = chip->state;
 875                chip->state = FL_READY;
 876                return 0;
 877
 878        case FL_SHUTDOWN:
 879                /* The machine is rebooting now,so no one can get chip anymore */
 880                return -EIO;
 881        case FL_POINT:
 882                /* Only if there's no operation suspended... */
 883                if (mode == FL_READY && chip->oldstate == FL_READY)
 884                        return 0;
 885                /* Fall through */
 886        default:
 887        sleep:
 888                set_current_state(TASK_UNINTERRUPTIBLE);
 889                add_wait_queue(&chip->wq, &wait);
 890                mutex_unlock(&chip->mutex);
 891                schedule();
 892                remove_wait_queue(&chip->wq, &wait);
 893                mutex_lock(&chip->mutex);
 894                return -EAGAIN;
 895        }
 896}
 897
 898static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 899{
 900        int ret;
 901        DECLARE_WAITQUEUE(wait, current);
 902
 903 retry:
 904        if (chip->priv &&
 905            (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
 906            || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 907                /*
 908                 * OK. We have possibility for contention on the write/erase
 909                 * operations which are global to the real chip and not per
 910                 * partition.  So let's fight it over in the partition which
 911                 * currently has authority on the operation.
 912                 *
 913                 * The rules are as follows:
 914                 *
 915                 * - any write operation must own shared->writing.
 916                 *
 917                 * - any erase operation must own _both_ shared->writing and
 918                 *   shared->erasing.
 919                 *
 920                 * - contention arbitration is handled in the owner's context.
 921                 *
 922                 * The 'shared' struct can be read and/or written only when
 923                 * its lock is taken.
 924                 */
 925                struct flchip_shared *shared = chip->priv;
 926                struct flchip *contender;
 927                mutex_lock(&shared->lock);
 928                contender = shared->writing;
 929                if (contender && contender != chip) {
 930                        /*
 931                         * The engine to perform desired operation on this
 932                         * partition is already in use by someone else.
 933                         * Let's fight over it in the context of the chip
 934                         * currently using it.  If it is possible to suspend,
 935                         * that other partition will do just that, otherwise
 936                         * it'll happily send us to sleep.  In any case, when
 937                         * get_chip returns success we're clear to go ahead.
 938                         */
 939                        ret = mutex_trylock(&contender->mutex);
 940                        mutex_unlock(&shared->lock);
 941                        if (!ret)
 942                                goto retry;
 943                        mutex_unlock(&chip->mutex);
 944                        ret = chip_ready(map, contender, contender->start, mode);
 945                        mutex_lock(&chip->mutex);
 946
 947                        if (ret == -EAGAIN) {
 948                                mutex_unlock(&contender->mutex);
 949                                goto retry;
 950                        }
 951                        if (ret) {
 952                                mutex_unlock(&contender->mutex);
 953                                return ret;
 954                        }
 955                        mutex_lock(&shared->lock);
 956
 957                        /* We should not own chip if it is already
 958                         * in FL_SYNCING state. Put contender and retry. */
 959                        if (chip->state == FL_SYNCING) {
 960                                put_chip(map, contender, contender->start);
 961                                mutex_unlock(&contender->mutex);
 962                                goto retry;
 963                        }
 964                        mutex_unlock(&contender->mutex);
 965                }
 966
 967                /* Check if we already have suspended erase
 968                 * on this chip. Sleep. */
 969                if (mode == FL_ERASING && shared->erasing
 970                    && shared->erasing->oldstate == FL_ERASING) {
 971                        mutex_unlock(&shared->lock);
 972                        set_current_state(TASK_UNINTERRUPTIBLE);
 973                        add_wait_queue(&chip->wq, &wait);
 974                        mutex_unlock(&chip->mutex);
 975                        schedule();
 976                        remove_wait_queue(&chip->wq, &wait);
 977                        mutex_lock(&chip->mutex);
 978                        goto retry;
 979                }
 980
 981                /* We now own it */
 982                shared->writing = chip;
 983                if (mode == FL_ERASING)
 984                        shared->erasing = chip;
 985                mutex_unlock(&shared->lock);
 986        }
 987        ret = chip_ready(map, chip, adr, mode);
 988        if (ret == -EAGAIN)
 989                goto retry;
 990
 991        return ret;
 992}
 993
 994static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 995{
 996        struct cfi_private *cfi = map->fldrv_priv;
 997
 998        if (chip->priv) {
 999                struct flchip_shared *shared = chip->priv;
1000                mutex_lock(&shared->lock);
1001                if (shared->writing == chip && chip->oldstate == FL_READY) {
1002                        /* We own the ability to write, but we're done */
1003                        shared->writing = shared->erasing;
1004                        if (shared->writing && shared->writing != chip) {
1005                                /* give back ownership to who we loaned it from */
1006                                struct flchip *loaner = shared->writing;
1007                                mutex_lock(&loaner->mutex);
1008                                mutex_unlock(&shared->lock);
1009                                mutex_unlock(&chip->mutex);
1010                                put_chip(map, loaner, loaner->start);
1011                                mutex_lock(&chip->mutex);
1012                                mutex_unlock(&loaner->mutex);
1013                                wake_up(&chip->wq);
1014                                return;
1015                        }
1016                        shared->erasing = NULL;
1017                        shared->writing = NULL;
1018                } else if (shared->erasing == chip && shared->writing != chip) {
1019                        /*
1020                         * We own the ability to erase without the ability
1021                         * to write, which means the erase was suspended
1022                         * and some other partition is currently writing.
1023                         * Don't let the switch below mess things up since
1024                         * we don't have ownership to resume anything.
1025                         */
1026                        mutex_unlock(&shared->lock);
1027                        wake_up(&chip->wq);
1028                        return;
1029                }
1030                mutex_unlock(&shared->lock);
1031        }
1032
1033        switch(chip->oldstate) {
1034        case FL_ERASING:
1035                /* What if one interleaved chip has finished and the
1036                   other hasn't? The old code would leave the finished
1037                   one in READY mode. That's bad, and caused -EROFS
1038                   errors to be returned from do_erase_oneblock because
1039                   that's the only bit it checked for at the time.
1040                   As the state machine appears to explicitly allow
1041                   sending the 0x70 (Read Status) command to an erasing
1042                   chip and expecting it to be ignored, that's what we
1043                   do. */
1044                map_write(map, CMD(0xd0), adr);
1045                map_write(map, CMD(0x70), adr);
1046                chip->oldstate = FL_READY;
1047                chip->state = FL_ERASING;
1048                break;
1049
1050        case FL_XIP_WHILE_ERASING:
1051                chip->state = chip->oldstate;
1052                chip->oldstate = FL_READY;
1053                break;
1054
1055        case FL_READY:
1056        case FL_STATUS:
1057        case FL_JEDEC_QUERY:
1058                break;
1059        default:
1060                printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1061        }
1062        wake_up(&chip->wq);
1063}
1064
1065#ifdef CONFIG_MTD_XIP
1066
1067/*
1068 * No interrupt what so ever can be serviced while the flash isn't in array
1069 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1070 * enclosing any code path where the flash is known not to be in array mode.
1071 * And within a XIP disabled code path, only functions marked with __xipram
1072 * may be called and nothing else (it's a good thing to inspect generated
1073 * assembly to make sure inline functions were actually inlined and that gcc
1074 * didn't emit calls to its own support functions). Also configuring MTD CFI
1075 * support to a single buswidth and a single interleave is also recommended.
1076 */
1077
1078static void xip_disable(struct map_info *map, struct flchip *chip,
1079                        unsigned long adr)
1080{
1081        /* TODO: chips with no XIP use should ignore and return */
1082        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1083        local_irq_disable();
1084}
1085
1086static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1087                                unsigned long adr)
1088{
1089        struct cfi_private *cfi = map->fldrv_priv;
1090        if (chip->state != FL_POINT && chip->state != FL_READY) {
1091                map_write(map, CMD(0xff), adr);
1092                chip->state = FL_READY;
1093        }
1094        (void) map_read(map, adr);
1095        xip_iprefetch();
1096        local_irq_enable();
1097}
1098
1099/*
1100 * When a delay is required for the flash operation to complete, the
1101 * xip_wait_for_operation() function is polling for both the given timeout
1102 * and pending (but still masked) hardware interrupts.  Whenever there is an
1103 * interrupt pending then the flash erase or write operation is suspended,
1104 * array mode restored and interrupts unmasked.  Task scheduling might also
1105 * happen at that point.  The CPU eventually returns from the interrupt or
1106 * the call to schedule() and the suspended flash operation is resumed for
1107 * the remaining of the delay period.
1108 *
1109 * Warning: this function _will_ fool interrupt latency tracing tools.
1110 */
1111
1112static int __xipram xip_wait_for_operation(
1113                struct map_info *map, struct flchip *chip,
1114                unsigned long adr, unsigned int chip_op_time_max)
1115{
1116        struct cfi_private *cfi = map->fldrv_priv;
1117        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1118        map_word status, OK = CMD(0x80);
1119        unsigned long usec, suspended, start, done;
1120        flstate_t oldstate, newstate;
1121
1122        start = xip_currtime();
1123        usec = chip_op_time_max;
1124        if (usec == 0)
1125                usec = 500000;
1126        done = 0;
1127
1128        do {
1129                cpu_relax();
1130                if (xip_irqpending() && cfip &&
1131                    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1132                     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1133                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1134                        /*
1135                         * Let's suspend the erase or write operation when
1136                         * supported.  Note that we currently don't try to
1137                         * suspend interleaved chips if there is already
1138                         * another operation suspended (imagine what happens
1139                         * when one chip was already done with the current
1140                         * operation while another chip suspended it, then
1141                         * we resume the whole thing at once).  Yes, it
1142                         * can happen!
1143                         */
1144                        usec -= done;
1145                        map_write(map, CMD(0xb0), adr);
1146                        map_write(map, CMD(0x70), adr);
1147                        suspended = xip_currtime();
1148                        do {
1149                                if (xip_elapsed_since(suspended) > 100000) {
1150                                        /*
1151                                         * The chip doesn't want to suspend
1152                                         * after waiting for 100 msecs.
1153                                         * This is a critical error but there
1154                                         * is not much we can do here.
1155                                         */
1156                                        return -EIO;
1157                                }
1158                                status = map_read(map, adr);
1159                        } while (!map_word_andequal(map, status, OK, OK));
1160
1161                        /* Suspend succeeded */
1162                        oldstate = chip->state;
1163                        if (oldstate == FL_ERASING) {
1164                                if (!map_word_bitsset(map, status, CMD(0x40)))
1165                                        break;
1166                                newstate = FL_XIP_WHILE_ERASING;
1167                                chip->erase_suspended = 1;
1168                        } else {
1169                                if (!map_word_bitsset(map, status, CMD(0x04)))
1170                                        break;
1171                                newstate = FL_XIP_WHILE_WRITING;
1172                                chip->write_suspended = 1;
1173                        }
1174                        chip->state = newstate;
1175                        map_write(map, CMD(0xff), adr);
1176                        (void) map_read(map, adr);
1177                        xip_iprefetch();
1178                        local_irq_enable();
1179                        mutex_unlock(&chip->mutex);
1180                        xip_iprefetch();
1181                        cond_resched();
1182
1183                        /*
1184                         * We're back.  However someone else might have
1185                         * decided to go write to the chip if we are in
1186                         * a suspended erase state.  If so let's wait
1187                         * until it's done.
1188                         */
1189                        mutex_lock(&chip->mutex);
1190                        while (chip->state != newstate) {
1191                                DECLARE_WAITQUEUE(wait, current);
1192                                set_current_state(TASK_UNINTERRUPTIBLE);
1193                                add_wait_queue(&chip->wq, &wait);
1194                                mutex_unlock(&chip->mutex);
1195                                schedule();
1196                                remove_wait_queue(&chip->wq, &wait);
1197                                mutex_lock(&chip->mutex);
1198                        }
1199                        /* Disallow XIP again */
1200                        local_irq_disable();
1201
1202                        /* Resume the write or erase operation */
1203                        map_write(map, CMD(0xd0), adr);
1204                        map_write(map, CMD(0x70), adr);
1205                        chip->state = oldstate;
1206                        start = xip_currtime();
1207                } else if (usec >= 1000000/HZ) {
1208                        /*
1209                         * Try to save on CPU power when waiting delay
1210                         * is at least a system timer tick period.
1211                         * No need to be extremely accurate here.
1212                         */
1213                        xip_cpu_idle();
1214                }
1215                status = map_read(map, adr);
1216                done = xip_elapsed_since(start);
1217        } while (!map_word_andequal(map, status, OK, OK)
1218                 && done < usec);
1219
1220        return (done >= usec) ? -ETIME : 0;
1221}
1222
1223/*
1224 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1225 * the flash is actively programming or erasing since we have to poll for
1226 * the operation to complete anyway.  We can't do that in a generic way with
1227 * a XIP setup so do it before the actual flash operation in this case
1228 * and stub it out from INVAL_CACHE_AND_WAIT.
1229 */
1230#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1231        INVALIDATE_CACHED_RANGE(map, from, size)
1232
1233#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1234        xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1235
1236#else
1237
1238#define xip_disable(map, chip, adr)
1239#define xip_enable(map, chip, adr)
1240#define XIP_INVAL_CACHED_RANGE(x...)
1241#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1242
1243static int inval_cache_and_wait_for_operation(
1244                struct map_info *map, struct flchip *chip,
1245                unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1246                unsigned int chip_op_time, unsigned int chip_op_time_max)
1247{
1248        struct cfi_private *cfi = map->fldrv_priv;
1249        map_word status, status_OK = CMD(0x80);
1250        int chip_state = chip->state;
1251        unsigned int timeo, sleep_time, reset_timeo;
1252
1253        mutex_unlock(&chip->mutex);
1254        if (inval_len)
1255                INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1256        mutex_lock(&chip->mutex);
1257
1258        timeo = chip_op_time_max;
1259        if (!timeo)
1260                timeo = 500000;
1261        reset_timeo = timeo;
1262        sleep_time = chip_op_time / 2;
1263
1264        for (;;) {
1265                if (chip->state != chip_state) {
1266                        /* Someone's suspended the operation: sleep */
1267                        DECLARE_WAITQUEUE(wait, current);
1268                        set_current_state(TASK_UNINTERRUPTIBLE);
1269                        add_wait_queue(&chip->wq, &wait);
1270                        mutex_unlock(&chip->mutex);
1271                        schedule();
1272                        remove_wait_queue(&chip->wq, &wait);
1273                        mutex_lock(&chip->mutex);
1274                        continue;
1275                }
1276
1277                status = map_read(map, cmd_adr);
1278                if (map_word_andequal(map, status, status_OK, status_OK))
1279                        break;
1280
1281                if (chip->erase_suspended && chip_state == FL_ERASING)  {
1282                        /* Erase suspend occurred while sleep: reset timeout */
1283                        timeo = reset_timeo;
1284                        chip->erase_suspended = 0;
1285                }
1286                if (chip->write_suspended && chip_state == FL_WRITING)  {
1287                        /* Write suspend occurred while sleep: reset timeout */
1288                        timeo = reset_timeo;
1289                        chip->write_suspended = 0;
1290                }
1291                if (!timeo) {
1292                        map_write(map, CMD(0x70), cmd_adr);
1293                        chip->state = FL_STATUS;
1294                        return -ETIME;
1295                }
1296
1297                /* OK Still waiting. Drop the lock, wait a while and retry. */
1298                mutex_unlock(&chip->mutex);
1299                if (sleep_time >= 1000000/HZ) {
1300                        /*
1301                         * Half of the normal delay still remaining
1302                         * can be performed with a sleeping delay instead
1303                         * of busy waiting.
1304                         */
1305                        msleep(sleep_time/1000);
1306                        timeo -= sleep_time;
1307                        sleep_time = 1000000/HZ;
1308                } else {
1309                        udelay(1);
1310                        cond_resched();
1311                        timeo--;
1312                }
1313                mutex_lock(&chip->mutex);
1314        }
1315
1316        /* Done and happy. */
1317        chip->state = FL_STATUS;
1318        return 0;
1319}
1320
1321#endif
1322
1323#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1324        INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1325
1326
1327static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1328{
1329        unsigned long cmd_addr;
1330        struct cfi_private *cfi = map->fldrv_priv;
1331        int ret = 0;
1332
1333        adr += chip->start;
1334
1335        /* Ensure cmd read/writes are aligned. */
1336        cmd_addr = adr & ~(map_bankwidth(map)-1);
1337
1338        mutex_lock(&chip->mutex);
1339
1340        ret = get_chip(map, chip, cmd_addr, FL_POINT);
1341
1342        if (!ret) {
1343                if (chip->state != FL_POINT && chip->state != FL_READY)
1344                        map_write(map, CMD(0xff), cmd_addr);
1345
1346                chip->state = FL_POINT;
1347                chip->ref_point_counter++;
1348        }
1349        mutex_unlock(&chip->mutex);
1350
1351        return ret;
1352}
1353
1354static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1355                size_t *retlen, void **virt, resource_size_t *phys)
1356{
1357        struct map_info *map = mtd->priv;
1358        struct cfi_private *cfi = map->fldrv_priv;
1359        unsigned long ofs, last_end = 0;
1360        int chipnum;
1361        int ret = 0;
1362
1363        if (!map->virt)
1364                return -EINVAL;
1365
1366        /* Now lock the chip(s) to POINT state */
1367
1368        /* ofs: offset within the first chip that the first read should start */
1369        chipnum = (from >> cfi->chipshift);
1370        ofs = from - (chipnum << cfi->chipshift);
1371
1372        *virt = map->virt + cfi->chips[chipnum].start + ofs;
1373        if (phys)
1374                *phys = map->phys + cfi->chips[chipnum].start + ofs;
1375
1376        while (len) {
1377                unsigned long thislen;
1378
1379                if (chipnum >= cfi->numchips)
1380                        break;
1381
1382                /* We cannot point across chips that are virtually disjoint */
1383                if (!last_end)
1384                        last_end = cfi->chips[chipnum].start;
1385                else if (cfi->chips[chipnum].start != last_end)
1386                        break;
1387
1388                if ((len + ofs -1) >> cfi->chipshift)
1389                        thislen = (1<<cfi->chipshift) - ofs;
1390                else
1391                        thislen = len;
1392
1393                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1394                if (ret)
1395                        break;
1396
1397                *retlen += thislen;
1398                len -= thislen;
1399
1400                ofs = 0;
1401                last_end += 1 << cfi->chipshift;
1402                chipnum++;
1403        }
1404        return 0;
1405}
1406
1407static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1408{
1409        struct map_info *map = mtd->priv;
1410        struct cfi_private *cfi = map->fldrv_priv;
1411        unsigned long ofs;
1412        int chipnum, err = 0;
1413
1414        /* Now unlock the chip(s) POINT state */
1415
1416        /* ofs: offset within the first chip that the first read should start */
1417        chipnum = (from >> cfi->chipshift);
1418        ofs = from - (chipnum <<  cfi->chipshift);
1419
1420        while (len && !err) {
1421                unsigned long thislen;
1422                struct flchip *chip;
1423
1424                chip = &cfi->chips[chipnum];
1425                if (chipnum >= cfi->numchips)
1426                        break;
1427
1428                if ((len + ofs -1) >> cfi->chipshift)
1429                        thislen = (1<<cfi->chipshift) - ofs;
1430                else
1431                        thislen = len;
1432
1433                mutex_lock(&chip->mutex);
1434                if (chip->state == FL_POINT) {
1435                        chip->ref_point_counter--;
1436                        if(chip->ref_point_counter == 0)
1437                                chip->state = FL_READY;
1438                } else {
1439                        printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1440                        err = -EINVAL;
1441                }
1442
1443                put_chip(map, chip, chip->start);
1444                mutex_unlock(&chip->mutex);
1445
1446                len -= thislen;
1447                ofs = 0;
1448                chipnum++;
1449        }
1450
1451        return err;
1452}
1453
1454static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1455{
1456        unsigned long cmd_addr;
1457        struct cfi_private *cfi = map->fldrv_priv;
1458        int ret;
1459
1460        adr += chip->start;
1461
1462        /* Ensure cmd read/writes are aligned. */
1463        cmd_addr = adr & ~(map_bankwidth(map)-1);
1464
1465        mutex_lock(&chip->mutex);
1466        ret = get_chip(map, chip, cmd_addr, FL_READY);
1467        if (ret) {
1468                mutex_unlock(&chip->mutex);
1469                return ret;
1470        }
1471
1472        if (chip->state != FL_POINT && chip->state != FL_READY) {
1473                map_write(map, CMD(0xff), cmd_addr);
1474
1475                chip->state = FL_READY;
1476        }
1477
1478        map_copy_from(map, buf, adr, len);
1479
1480        put_chip(map, chip, cmd_addr);
1481
1482        mutex_unlock(&chip->mutex);
1483        return 0;
1484}
1485
1486static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1487{
1488        struct map_info *map = mtd->priv;
1489        struct cfi_private *cfi = map->fldrv_priv;
1490        unsigned long ofs;
1491        int chipnum;
1492        int ret = 0;
1493
1494        /* ofs: offset within the first chip that the first read should start */
1495        chipnum = (from >> cfi->chipshift);
1496        ofs = from - (chipnum <<  cfi->chipshift);
1497
1498        while (len) {
1499                unsigned long thislen;
1500
1501                if (chipnum >= cfi->numchips)
1502                        break;
1503
1504                if ((len + ofs -1) >> cfi->chipshift)
1505                        thislen = (1<<cfi->chipshift) - ofs;
1506                else
1507                        thislen = len;
1508
1509                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1510                if (ret)
1511                        break;
1512
1513                *retlen += thislen;
1514                len -= thislen;
1515                buf += thislen;
1516
1517                ofs = 0;
1518                chipnum++;
1519        }
1520        return ret;
1521}
1522
1523static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1524                                     unsigned long adr, map_word datum, int mode)
1525{
1526        struct cfi_private *cfi = map->fldrv_priv;
1527        map_word status, write_cmd;
1528        int ret=0;
1529
1530        adr += chip->start;
1531
1532        switch (mode) {
1533        case FL_WRITING:
1534                write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1535                break;
1536        case FL_OTP_WRITE:
1537                write_cmd = CMD(0xc0);
1538                break;
1539        default:
1540                return -EINVAL;
1541        }
1542
1543        mutex_lock(&chip->mutex);
1544        ret = get_chip(map, chip, adr, mode);
1545        if (ret) {
1546                mutex_unlock(&chip->mutex);
1547                return ret;
1548        }
1549
1550        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1551        ENABLE_VPP(map);
1552        xip_disable(map, chip, adr);
1553        map_write(map, write_cmd, adr);
1554        map_write(map, datum, adr);
1555        chip->state = mode;
1556
1557        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1558                                   adr, map_bankwidth(map),
1559                                   chip->word_write_time,
1560                                   chip->word_write_time_max);
1561        if (ret) {
1562                xip_enable(map, chip, adr);
1563                printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1564                goto out;
1565        }
1566
1567        /* check for errors */
1568        status = map_read(map, adr);
1569        if (map_word_bitsset(map, status, CMD(0x1a))) {
1570                unsigned long chipstatus = MERGESTATUS(status);
1571
1572                /* reset status */
1573                map_write(map, CMD(0x50), adr);
1574                map_write(map, CMD(0x70), adr);
1575                xip_enable(map, chip, adr);
1576
1577                if (chipstatus & 0x02) {
1578                        ret = -EROFS;
1579                } else if (chipstatus & 0x08) {
1580                        printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1581                        ret = -EIO;
1582                } else {
1583                        printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1584                        ret = -EINVAL;
1585                }
1586
1587                goto out;
1588        }
1589
1590        xip_enable(map, chip, adr);
1591 out:   DISABLE_VPP(map);
1592        put_chip(map, chip, adr);
1593        mutex_unlock(&chip->mutex);
1594        return ret;
1595}
1596
1597
1598static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1599{
1600        struct map_info *map = mtd->priv;
1601        struct cfi_private *cfi = map->fldrv_priv;
1602        int ret = 0;
1603        int chipnum;
1604        unsigned long ofs;
1605
1606        chipnum = to >> cfi->chipshift;
1607        ofs = to  - (chipnum << cfi->chipshift);
1608
1609        /* If it's not bus-aligned, do the first byte write */
1610        if (ofs & (map_bankwidth(map)-1)) {
1611                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1612                int gap = ofs - bus_ofs;
1613                int n;
1614                map_word datum;
1615
1616                n = min_t(int, len, map_bankwidth(map)-gap);
1617                datum = map_word_ff(map);
1618                datum = map_word_load_partial(map, datum, buf, gap, n);
1619
1620                ret = do_write_oneword(map, &cfi->chips[chipnum],
1621                                               bus_ofs, datum, FL_WRITING);
1622                if (ret)
1623                        return ret;
1624
1625                len -= n;
1626                ofs += n;
1627                buf += n;
1628                (*retlen) += n;
1629
1630                if (ofs >> cfi->chipshift) {
1631                        chipnum ++;
1632                        ofs = 0;
1633                        if (chipnum == cfi->numchips)
1634                                return 0;
1635                }
1636        }
1637
1638        while(len >= map_bankwidth(map)) {
1639                map_word datum = map_word_load(map, buf);
1640
1641                ret = do_write_oneword(map, &cfi->chips[chipnum],
1642                                       ofs, datum, FL_WRITING);
1643                if (ret)
1644                        return ret;
1645
1646                ofs += map_bankwidth(map);
1647                buf += map_bankwidth(map);
1648                (*retlen) += map_bankwidth(map);
1649                len -= map_bankwidth(map);
1650
1651                if (ofs >> cfi->chipshift) {
1652                        chipnum ++;
1653                        ofs = 0;
1654                        if (chipnum == cfi->numchips)
1655                                return 0;
1656                }
1657        }
1658
1659        if (len & (map_bankwidth(map)-1)) {
1660                map_word datum;
1661
1662                datum = map_word_ff(map);
1663                datum = map_word_load_partial(map, datum, buf, 0, len);
1664
1665                ret = do_write_oneword(map, &cfi->chips[chipnum],
1666                                       ofs, datum, FL_WRITING);
1667                if (ret)
1668                        return ret;
1669
1670                (*retlen) += len;
1671        }
1672
1673        return 0;
1674}
1675
1676
1677static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1678                                    unsigned long adr, const struct kvec **pvec,
1679                                    unsigned long *pvec_seek, int len)
1680{
1681        struct cfi_private *cfi = map->fldrv_priv;
1682        map_word status, write_cmd, datum;
1683        unsigned long cmd_adr;
1684        int ret, wbufsize, word_gap, words;
1685        const struct kvec *vec;
1686        unsigned long vec_seek;
1687        unsigned long initial_adr;
1688        int initial_len = len;
1689
1690        wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1691        adr += chip->start;
1692        initial_adr = adr;
1693        cmd_adr = adr & ~(wbufsize-1);
1694
1695        /* Sharp LH28F640BF chips need the first address for the
1696         * Page Buffer Program command. See Table 5 of
1697         * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1698        if (is_LH28F640BF(cfi))
1699                cmd_adr = adr;
1700
1701        /* Let's determine this according to the interleave only once */
1702        write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1703
1704        mutex_lock(&chip->mutex);
1705        ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1706        if (ret) {
1707                mutex_unlock(&chip->mutex);
1708                return ret;
1709        }
1710
1711        XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1712        ENABLE_VPP(map);
1713        xip_disable(map, chip, cmd_adr);
1714
1715        /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1716           [...], the device will not accept any more Write to Buffer commands".
1717           So we must check here and reset those bits if they're set. Otherwise
1718           we're just pissing in the wind */
1719        if (chip->state != FL_STATUS) {
1720                map_write(map, CMD(0x70), cmd_adr);
1721                chip->state = FL_STATUS;
1722        }
1723        status = map_read(map, cmd_adr);
1724        if (map_word_bitsset(map, status, CMD(0x30))) {
1725                xip_enable(map, chip, cmd_adr);
1726                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1727                xip_disable(map, chip, cmd_adr);
1728                map_write(map, CMD(0x50), cmd_adr);
1729                map_write(map, CMD(0x70), cmd_adr);
1730        }
1731
1732        chip->state = FL_WRITING_TO_BUFFER;
1733        map_write(map, write_cmd, cmd_adr);
1734        ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1735        if (ret) {
1736                /* Argh. Not ready for write to buffer */
1737                map_word Xstatus = map_read(map, cmd_adr);
1738                map_write(map, CMD(0x70), cmd_adr);
1739                chip->state = FL_STATUS;
1740                status = map_read(map, cmd_adr);
1741                map_write(map, CMD(0x50), cmd_adr);
1742                map_write(map, CMD(0x70), cmd_adr);
1743                xip_enable(map, chip, cmd_adr);
1744                printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1745                                map->name, Xstatus.x[0], status.x[0]);
1746                goto out;
1747        }
1748
1749        /* Figure out the number of words to write */
1750        word_gap = (-adr & (map_bankwidth(map)-1));
1751        words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1752        if (!word_gap) {
1753                words--;
1754        } else {
1755                word_gap = map_bankwidth(map) - word_gap;
1756                adr -= word_gap;
1757                datum = map_word_ff(map);
1758        }
1759
1760        /* Write length of data to come */
1761        map_write(map, CMD(words), cmd_adr );
1762
1763        /* Write data */
1764        vec = *pvec;
1765        vec_seek = *pvec_seek;
1766        do {
1767                int n = map_bankwidth(map) - word_gap;
1768                if (n > vec->iov_len - vec_seek)
1769                        n = vec->iov_len - vec_seek;
1770                if (n > len)
1771                        n = len;
1772
1773                if (!word_gap && len < map_bankwidth(map))
1774                        datum = map_word_ff(map);
1775
1776                datum = map_word_load_partial(map, datum,
1777                                              vec->iov_base + vec_seek,
1778                                              word_gap, n);
1779
1780                len -= n;
1781                word_gap += n;
1782                if (!len || word_gap == map_bankwidth(map)) {
1783                        map_write(map, datum, adr);
1784                        adr += map_bankwidth(map);
1785                        word_gap = 0;
1786                }
1787
1788                vec_seek += n;
1789                if (vec_seek == vec->iov_len) {
1790                        vec++;
1791                        vec_seek = 0;
1792                }
1793        } while (len);
1794        *pvec = vec;
1795        *pvec_seek = vec_seek;
1796
1797        /* GO GO GO */
1798        map_write(map, CMD(0xd0), cmd_adr);
1799        chip->state = FL_WRITING;
1800
1801        ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1802                                   initial_adr, initial_len,
1803                                   chip->buffer_write_time,
1804                                   chip->buffer_write_time_max);
1805        if (ret) {
1806                map_write(map, CMD(0x70), cmd_adr);
1807                chip->state = FL_STATUS;
1808                xip_enable(map, chip, cmd_adr);
1809                printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1810                goto out;
1811        }
1812
1813        /* check for errors */
1814        status = map_read(map, cmd_adr);
1815        if (map_word_bitsset(map, status, CMD(0x1a))) {
1816                unsigned long chipstatus = MERGESTATUS(status);
1817
1818                /* reset status */
1819                map_write(map, CMD(0x50), cmd_adr);
1820                map_write(map, CMD(0x70), cmd_adr);
1821                xip_enable(map, chip, cmd_adr);
1822
1823                if (chipstatus & 0x02) {
1824                        ret = -EROFS;
1825                } else if (chipstatus & 0x08) {
1826                        printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1827                        ret = -EIO;
1828                } else {
1829                        printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1830                        ret = -EINVAL;
1831                }
1832
1833                goto out;
1834        }
1835
1836        xip_enable(map, chip, cmd_adr);
1837 out:   DISABLE_VPP(map);
1838        put_chip(map, chip, cmd_adr);
1839        mutex_unlock(&chip->mutex);
1840        return ret;
1841}
1842
1843static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1844                                unsigned long count, loff_t to, size_t *retlen)
1845{
1846        struct map_info *map = mtd->priv;
1847        struct cfi_private *cfi = map->fldrv_priv;
1848        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1849        int ret = 0;
1850        int chipnum;
1851        unsigned long ofs, vec_seek, i;
1852        size_t len = 0;
1853
1854        for (i = 0; i < count; i++)
1855                len += vecs[i].iov_len;
1856
1857        if (!len)
1858                return 0;
1859
1860        chipnum = to >> cfi->chipshift;
1861        ofs = to - (chipnum << cfi->chipshift);
1862        vec_seek = 0;
1863
1864        do {
1865                /* We must not cross write block boundaries */
1866                int size = wbufsize - (ofs & (wbufsize-1));
1867
1868                if (size > len)
1869                        size = len;
1870                ret = do_write_buffer(map, &cfi->chips[chipnum],
1871                                      ofs, &vecs, &vec_seek, size);
1872                if (ret)
1873                        return ret;
1874
1875                ofs += size;
1876                (*retlen) += size;
1877                len -= size;
1878
1879                if (ofs >> cfi->chipshift) {
1880                        chipnum ++;
1881                        ofs = 0;
1882                        if (chipnum == cfi->numchips)
1883                                return 0;
1884                }
1885
1886                /* Be nice and reschedule with the chip in a usable state for other
1887                   processes. */
1888                cond_resched();
1889
1890        } while (len);
1891
1892        return 0;
1893}
1894
1895static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1896                                       size_t len, size_t *retlen, const u_char *buf)
1897{
1898        struct kvec vec;
1899
1900        vec.iov_base = (void *) buf;
1901        vec.iov_len = len;
1902
1903        return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1904}
1905
1906static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1907                                      unsigned long adr, int len, void *thunk)
1908{
1909        struct cfi_private *cfi = map->fldrv_priv;
1910        map_word status;
1911        int retries = 3;
1912        int ret;
1913
1914        adr += chip->start;
1915
1916 retry:
1917        mutex_lock(&chip->mutex);
1918        ret = get_chip(map, chip, adr, FL_ERASING);
1919        if (ret) {
1920                mutex_unlock(&chip->mutex);
1921                return ret;
1922        }
1923
1924        XIP_INVAL_CACHED_RANGE(map, adr, len);
1925        ENABLE_VPP(map);
1926        xip_disable(map, chip, adr);
1927
1928        /* Clear the status register first */
1929        map_write(map, CMD(0x50), adr);
1930
1931        /* Now erase */
1932        map_write(map, CMD(0x20), adr);
1933        map_write(map, CMD(0xD0), adr);
1934        chip->state = FL_ERASING;
1935        chip->erase_suspended = 0;
1936
1937        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1938                                   adr, len,
1939                                   chip->erase_time,
1940                                   chip->erase_time_max);
1941        if (ret) {
1942                map_write(map, CMD(0x70), adr);
1943                chip->state = FL_STATUS;
1944                xip_enable(map, chip, adr);
1945                printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1946                goto out;
1947        }
1948
1949        /* We've broken this before. It doesn't hurt to be safe */
1950        map_write(map, CMD(0x70), adr);
1951        chip->state = FL_STATUS;
1952        status = map_read(map, adr);
1953
1954        /* check for errors */
1955        if (map_word_bitsset(map, status, CMD(0x3a))) {
1956                unsigned long chipstatus = MERGESTATUS(status);
1957
1958                /* Reset the error bits */
1959                map_write(map, CMD(0x50), adr);
1960                map_write(map, CMD(0x70), adr);
1961                xip_enable(map, chip, adr);
1962
1963                if ((chipstatus & 0x30) == 0x30) {
1964                        printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1965                        ret = -EINVAL;
1966                } else if (chipstatus & 0x02) {
1967                        /* Protection bit set */
1968                        ret = -EROFS;
1969                } else if (chipstatus & 0x8) {
1970                        /* Voltage */
1971                        printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1972                        ret = -EIO;
1973                } else if (chipstatus & 0x20 && retries--) {
1974                        printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1975                        DISABLE_VPP(map);
1976                        put_chip(map, chip, adr);
1977                        mutex_unlock(&chip->mutex);
1978                        goto retry;
1979                } else {
1980                        printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1981                        ret = -EIO;
1982                }
1983
1984                goto out;
1985        }
1986
1987        xip_enable(map, chip, adr);
1988 out:   DISABLE_VPP(map);
1989        put_chip(map, chip, adr);
1990        mutex_unlock(&chip->mutex);
1991        return ret;
1992}
1993
1994static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1995{
1996        unsigned long ofs, len;
1997        int ret;
1998
1999        ofs = instr->addr;
2000        len = instr->len;
2001
2002        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2003        if (ret)
2004                return ret;
2005
2006        instr->state = MTD_ERASE_DONE;
2007        mtd_erase_callback(instr);
2008
2009        return 0;
2010}
2011
2012static void cfi_intelext_sync (struct mtd_info *mtd)
2013{
2014        struct map_info *map = mtd->priv;
2015        struct cfi_private *cfi = map->fldrv_priv;
2016        int i;
2017        struct flchip *chip;
2018        int ret = 0;
2019
2020        for (i=0; !ret && i<cfi->numchips; i++) {
2021                chip = &cfi->chips[i];
2022
2023                mutex_lock(&chip->mutex);
2024                ret = get_chip(map, chip, chip->start, FL_SYNCING);
2025
2026                if (!ret) {
2027                        chip->oldstate = chip->state;
2028                        chip->state = FL_SYNCING;
2029                        /* No need to wake_up() on this state change -
2030                         * as the whole point is that nobody can do anything
2031                         * with the chip now anyway.
2032                         */
2033                }
2034                mutex_unlock(&chip->mutex);
2035        }
2036
2037        /* Unlock the chips again */
2038
2039        for (i--; i >=0; i--) {
2040                chip = &cfi->chips[i];
2041
2042                mutex_lock(&chip->mutex);
2043
2044                if (chip->state == FL_SYNCING) {
2045                        chip->state = chip->oldstate;
2046                        chip->oldstate = FL_READY;
2047                        wake_up(&chip->wq);
2048                }
2049                mutex_unlock(&chip->mutex);
2050        }
2051}
2052
2053static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2054                                                struct flchip *chip,
2055                                                unsigned long adr,
2056                                                int len, void *thunk)
2057{
2058        struct cfi_private *cfi = map->fldrv_priv;
2059        int status, ofs_factor = cfi->interleave * cfi->device_type;
2060
2061        adr += chip->start;
2062        xip_disable(map, chip, adr+(2*ofs_factor));
2063        map_write(map, CMD(0x90), adr+(2*ofs_factor));
2064        chip->state = FL_JEDEC_QUERY;
2065        status = cfi_read_query(map, adr+(2*ofs_factor));
2066        xip_enable(map, chip, 0);
2067        return status;
2068}
2069
2070#ifdef DEBUG_LOCK_BITS
2071static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2072                                                struct flchip *chip,
2073                                                unsigned long adr,
2074                                                int len, void *thunk)
2075{
2076        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2077               adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2078        return 0;
2079}
2080#endif
2081
2082#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2083#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2084
2085static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2086                                       unsigned long adr, int len, void *thunk)
2087{
2088        struct cfi_private *cfi = map->fldrv_priv;
2089        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2090        int mdelay;
2091        int ret;
2092
2093        adr += chip->start;
2094
2095        mutex_lock(&chip->mutex);
2096        ret = get_chip(map, chip, adr, FL_LOCKING);
2097        if (ret) {
2098                mutex_unlock(&chip->mutex);
2099                return ret;
2100        }
2101
2102        ENABLE_VPP(map);
2103        xip_disable(map, chip, adr);
2104
2105        map_write(map, CMD(0x60), adr);
2106        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2107                map_write(map, CMD(0x01), adr);
2108                chip->state = FL_LOCKING;
2109        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2110                map_write(map, CMD(0xD0), adr);
2111                chip->state = FL_UNLOCKING;
2112        } else
2113                BUG();
2114
2115        /*
2116         * If Instant Individual Block Locking supported then no need
2117         * to delay.
2118         */
2119        /*
2120         * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2121         * lets use a max of 1.5 seconds (1500ms) as timeout.
2122         *
2123         * See "Clear Block Lock-Bits Time" on page 40 in
2124         * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2125         * from February 2003
2126         */
2127        mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2128
2129        ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2130        if (ret) {
2131                map_write(map, CMD(0x70), adr);
2132                chip->state = FL_STATUS;
2133                xip_enable(map, chip, adr);
2134                printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2135                goto out;
2136        }
2137
2138        xip_enable(map, chip, adr);
2139 out:   DISABLE_VPP(map);
2140        put_chip(map, chip, adr);
2141        mutex_unlock(&chip->mutex);
2142        return ret;
2143}
2144
2145static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2146{
2147        int ret;
2148
2149#ifdef DEBUG_LOCK_BITS
2150        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2151               __func__, ofs, len);
2152        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2153                ofs, len, NULL);
2154#endif
2155
2156        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2157                ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2158
2159#ifdef DEBUG_LOCK_BITS
2160        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2161               __func__, ret);
2162        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2163                ofs, len, NULL);
2164#endif
2165
2166        return ret;
2167}
2168
2169static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2170{
2171        int ret;
2172
2173#ifdef DEBUG_LOCK_BITS
2174        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2175               __func__, ofs, len);
2176        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2177                ofs, len, NULL);
2178#endif
2179
2180        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2181                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2182
2183#ifdef DEBUG_LOCK_BITS
2184        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2185               __func__, ret);
2186        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2187                ofs, len, NULL);
2188#endif
2189
2190        return ret;
2191}
2192
2193static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2194                                  uint64_t len)
2195{
2196        return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2197                                ofs, len, NULL) ? 1 : 0;
2198}
2199
2200#ifdef CONFIG_MTD_OTP
2201
2202typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2203                        u_long data_offset, u_char *buf, u_int size,
2204                        u_long prot_offset, u_int groupno, u_int groupsize);
2205
2206static int __xipram
2207do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2208            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2209{
2210        struct cfi_private *cfi = map->fldrv_priv;
2211        int ret;
2212
2213        mutex_lock(&chip->mutex);
2214        ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2215        if (ret) {
2216                mutex_unlock(&chip->mutex);
2217                return ret;
2218        }
2219
2220        /* let's ensure we're not reading back cached data from array mode */
2221        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2222
2223        xip_disable(map, chip, chip->start);
2224        if (chip->state != FL_JEDEC_QUERY) {
2225                map_write(map, CMD(0x90), chip->start);
2226                chip->state = FL_JEDEC_QUERY;
2227        }
2228        map_copy_from(map, buf, chip->start + offset, size);
2229        xip_enable(map, chip, chip->start);
2230
2231        /* then ensure we don't keep OTP data in the cache */
2232        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2233
2234        put_chip(map, chip, chip->start);
2235        mutex_unlock(&chip->mutex);
2236        return 0;
2237}
2238
2239static int
2240do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2241             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2242{
2243        int ret;
2244
2245        while (size) {
2246                unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2247                int gap = offset - bus_ofs;
2248                int n = min_t(int, size, map_bankwidth(map)-gap);
2249                map_word datum = map_word_ff(map);
2250
2251                datum = map_word_load_partial(map, datum, buf, gap, n);
2252                ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2253                if (ret)
2254                        return ret;
2255
2256                offset += n;
2257                buf += n;
2258                size -= n;
2259        }
2260
2261        return 0;
2262}
2263
2264static int
2265do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2266            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2267{
2268        struct cfi_private *cfi = map->fldrv_priv;
2269        map_word datum;
2270
2271        /* make sure area matches group boundaries */
2272        if (size != grpsz)
2273                return -EXDEV;
2274
2275        datum = map_word_ff(map);
2276        datum = map_word_clr(map, datum, CMD(1 << grpno));
2277        return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2278}
2279
2280static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2281                                 size_t *retlen, u_char *buf,
2282                                 otp_op_t action, int user_regs)
2283{
2284        struct map_info *map = mtd->priv;
2285        struct cfi_private *cfi = map->fldrv_priv;
2286        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2287        struct flchip *chip;
2288        struct cfi_intelext_otpinfo *otp;
2289        u_long devsize, reg_prot_offset, data_offset;
2290        u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2291        u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2292        int ret;
2293
2294        *retlen = 0;
2295
2296        /* Check that we actually have some OTP registers */
2297        if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2298                return -ENODATA;
2299
2300        /* we need real chips here not virtual ones */
2301        devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2302        chip_step = devsize >> cfi->chipshift;
2303        chip_num = 0;
2304
2305        /* Some chips have OTP located in the _top_ partition only.
2306           For example: Intel 28F256L18T (T means top-parameter device) */
2307        if (cfi->mfr == CFI_MFR_INTEL) {
2308                switch (cfi->id) {
2309                case 0x880b:
2310                case 0x880c:
2311                case 0x880d:
2312                        chip_num = chip_step - 1;
2313                }
2314        }
2315
2316        for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2317                chip = &cfi->chips[chip_num];
2318                otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2319
2320                /* first OTP region */
2321                field = 0;
2322                reg_prot_offset = extp->ProtRegAddr;
2323                reg_fact_groups = 1;
2324                reg_fact_size = 1 << extp->FactProtRegSize;
2325                reg_user_groups = 1;
2326                reg_user_size = 1 << extp->UserProtRegSize;
2327
2328                while (len > 0) {
2329                        /* flash geometry fixup */
2330                        data_offset = reg_prot_offset + 1;
2331                        data_offset *= cfi->interleave * cfi->device_type;
2332                        reg_prot_offset *= cfi->interleave * cfi->device_type;
2333                        reg_fact_size *= cfi->interleave;
2334                        reg_user_size *= cfi->interleave;
2335
2336                        if (user_regs) {
2337                                groups = reg_user_groups;
2338                                groupsize = reg_user_size;
2339                                /* skip over factory reg area */
2340                                groupno = reg_fact_groups;
2341                                data_offset += reg_fact_groups * reg_fact_size;
2342                        } else {
2343                                groups = reg_fact_groups;
2344                                groupsize = reg_fact_size;
2345                                groupno = 0;
2346                        }
2347
2348                        while (len > 0 && groups > 0) {
2349                                if (!action) {
2350                                        /*
2351                                         * Special case: if action is NULL
2352                                         * we fill buf with otp_info records.
2353                                         */
2354                                        struct otp_info *otpinfo;
2355                                        map_word lockword;
2356                                        len -= sizeof(struct otp_info);
2357                                        if (len <= 0)
2358                                                return -ENOSPC;
2359                                        ret = do_otp_read(map, chip,
2360                                                          reg_prot_offset,
2361                                                          (u_char *)&lockword,
2362                                                          map_bankwidth(map),
2363                                                          0, 0,  0);
2364                                        if (ret)
2365                                                return ret;
2366                                        otpinfo = (struct otp_info *)buf;
2367                                        otpinfo->start = from;
2368                                        otpinfo->length = groupsize;
2369                                        otpinfo->locked =
2370                                           !map_word_bitsset(map, lockword,
2371                                                             CMD(1 << groupno));
2372                                        from += groupsize;
2373                                        buf += sizeof(*otpinfo);
2374                                        *retlen += sizeof(*otpinfo);
2375                                } else if (from >= groupsize) {
2376                                        from -= groupsize;
2377                                        data_offset += groupsize;
2378                                } else {
2379                                        int size = groupsize;
2380                                        data_offset += from;
2381                                        size -= from;
2382                                        from = 0;
2383                                        if (size > len)
2384                                                size = len;
2385                                        ret = action(map, chip, data_offset,
2386                                                     buf, size, reg_prot_offset,
2387                                                     groupno, groupsize);
2388                                        if (ret < 0)
2389                                                return ret;
2390                                        buf += size;
2391                                        len -= size;
2392                                        *retlen += size;
2393                                        data_offset += size;
2394                                }
2395                                groupno++;
2396                                groups--;
2397                        }
2398
2399                        /* next OTP region */
2400                        if (++field == extp->NumProtectionFields)
2401                                break;
2402                        reg_prot_offset = otp->ProtRegAddr;
2403                        reg_fact_groups = otp->FactGroups;
2404                        reg_fact_size = 1 << otp->FactProtRegSize;
2405                        reg_user_groups = otp->UserGroups;
2406                        reg_user_size = 1 << otp->UserProtRegSize;
2407                        otp++;
2408                }
2409        }
2410
2411        return 0;
2412}
2413
2414static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2415                                           size_t len, size_t *retlen,
2416                                            u_char *buf)
2417{
2418        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2419                                     buf, do_otp_read, 0);
2420}
2421
2422static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2423                                           size_t len, size_t *retlen,
2424                                            u_char *buf)
2425{
2426        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2427                                     buf, do_otp_read, 1);
2428}
2429
2430static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2431                                            size_t len, size_t *retlen,
2432                                             u_char *buf)
2433{
2434        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2435                                     buf, do_otp_write, 1);
2436}
2437
2438static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2439                                           loff_t from, size_t len)
2440{
2441        size_t retlen;
2442        return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2443                                     NULL, do_otp_lock, 1);
2444}
2445
2446static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2447                                           size_t *retlen, struct otp_info *buf)
2448
2449{
2450        return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2451                                     NULL, 0);
2452}
2453
2454static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2455                                           size_t *retlen, struct otp_info *buf)
2456{
2457        return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2458                                     NULL, 1);
2459}
2460
2461#endif
2462
2463static void cfi_intelext_save_locks(struct mtd_info *mtd)
2464{
2465        struct mtd_erase_region_info *region;
2466        int block, status, i;
2467        unsigned long adr;
2468        size_t len;
2469
2470        for (i = 0; i < mtd->numeraseregions; i++) {
2471                region = &mtd->eraseregions[i];
2472                if (!region->lockmap)
2473                        continue;
2474
2475                for (block = 0; block < region->numblocks; block++){
2476                        len = region->erasesize;
2477                        adr = region->offset + block * len;
2478
2479                        status = cfi_varsize_frob(mtd,
2480                                        do_getlockstatus_oneblock, adr, len, NULL);
2481                        if (status)
2482                                set_bit(block, region->lockmap);
2483                        else
2484                                clear_bit(block, region->lockmap);
2485                }
2486        }
2487}
2488
2489static int cfi_intelext_suspend(struct mtd_info *mtd)
2490{
2491        struct map_info *map = mtd->priv;
2492        struct cfi_private *cfi = map->fldrv_priv;
2493        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2494        int i;
2495        struct flchip *chip;
2496        int ret = 0;
2497
2498        if ((mtd->flags & MTD_POWERUP_LOCK)
2499            && extp && (extp->FeatureSupport & (1 << 5)))
2500                cfi_intelext_save_locks(mtd);
2501
2502        for (i=0; !ret && i<cfi->numchips; i++) {
2503                chip = &cfi->chips[i];
2504
2505                mutex_lock(&chip->mutex);
2506
2507                switch (chip->state) {
2508                case FL_READY:
2509                case FL_STATUS:
2510                case FL_CFI_QUERY:
2511                case FL_JEDEC_QUERY:
2512                        if (chip->oldstate == FL_READY) {
2513                                /* place the chip in a known state before suspend */
2514                                map_write(map, CMD(0xFF), cfi->chips[i].start);
2515                                chip->oldstate = chip->state;
2516                                chip->state = FL_PM_SUSPENDED;
2517                                /* No need to wake_up() on this state change -
2518                                 * as the whole point is that nobody can do anything
2519                                 * with the chip now anyway.
2520                                 */
2521                        } else {
2522                                /* There seems to be an operation pending. We must wait for it. */
2523                                printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2524                                ret = -EAGAIN;
2525                        }
2526                        break;
2527                default:
2528                        /* Should we actually wait? Once upon a time these routines weren't
2529                           allowed to. Or should we return -EAGAIN, because the upper layers
2530                           ought to have already shut down anything which was using the device
2531                           anyway? The latter for now. */
2532                        printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2533                        ret = -EAGAIN;
2534                case FL_PM_SUSPENDED:
2535                        break;
2536                }
2537                mutex_unlock(&chip->mutex);
2538        }
2539
2540        /* Unlock the chips again */
2541
2542        if (ret) {
2543                for (i--; i >=0; i--) {
2544                        chip = &cfi->chips[i];
2545
2546                        mutex_lock(&chip->mutex);
2547
2548                        if (chip->state == FL_PM_SUSPENDED) {
2549                                /* No need to force it into a known state here,
2550                                   because we're returning failure, and it didn't
2551                                   get power cycled */
2552                                chip->state = chip->oldstate;
2553                                chip->oldstate = FL_READY;
2554                                wake_up(&chip->wq);
2555                        }
2556                        mutex_unlock(&chip->mutex);
2557                }
2558        }
2559
2560        return ret;
2561}
2562
2563static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2564{
2565        struct mtd_erase_region_info *region;
2566        int block, i;
2567        unsigned long adr;
2568        size_t len;
2569
2570        for (i = 0; i < mtd->numeraseregions; i++) {
2571                region = &mtd->eraseregions[i];
2572                if (!region->lockmap)
2573                        continue;
2574
2575                for_each_clear_bit(block, region->lockmap, region->numblocks) {
2576                        len = region->erasesize;
2577                        adr = region->offset + block * len;
2578                        cfi_intelext_unlock(mtd, adr, len);
2579                }
2580        }
2581}
2582
2583static void cfi_intelext_resume(struct mtd_info *mtd)
2584{
2585        struct map_info *map = mtd->priv;
2586        struct cfi_private *cfi = map->fldrv_priv;
2587        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2588        int i;
2589        struct flchip *chip;
2590
2591        for (i=0; i<cfi->numchips; i++) {
2592
2593                chip = &cfi->chips[i];
2594
2595                mutex_lock(&chip->mutex);
2596
2597                /* Go to known state. Chip may have been power cycled */
2598                if (chip->state == FL_PM_SUSPENDED) {
2599                        /* Refresh LH28F640BF Partition Config. Register */
2600                        fixup_LH28F640BF(mtd);
2601                        map_write(map, CMD(0xFF), cfi->chips[i].start);
2602                        chip->oldstate = chip->state = FL_READY;
2603                        wake_up(&chip->wq);
2604                }
2605
2606                mutex_unlock(&chip->mutex);
2607        }
2608
2609        if ((mtd->flags & MTD_POWERUP_LOCK)
2610            && extp && (extp->FeatureSupport & (1 << 5)))
2611                cfi_intelext_restore_locks(mtd);
2612}
2613
2614static int cfi_intelext_reset(struct mtd_info *mtd)
2615{
2616        struct map_info *map = mtd->priv;
2617        struct cfi_private *cfi = map->fldrv_priv;
2618        int i, ret;
2619
2620        for (i=0; i < cfi->numchips; i++) {
2621                struct flchip *chip = &cfi->chips[i];
2622
2623                /* force the completion of any ongoing operation
2624                   and switch to array mode so any bootloader in
2625                   flash is accessible for soft reboot. */
2626                mutex_lock(&chip->mutex);
2627                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2628                if (!ret) {
2629                        map_write(map, CMD(0xff), chip->start);
2630                        chip->state = FL_SHUTDOWN;
2631                        put_chip(map, chip, chip->start);
2632                }
2633                mutex_unlock(&chip->mutex);
2634        }
2635
2636        return 0;
2637}
2638
2639static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2640                               void *v)
2641{
2642        struct mtd_info *mtd;
2643
2644        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2645        cfi_intelext_reset(mtd);
2646        return NOTIFY_DONE;
2647}
2648
2649static void cfi_intelext_destroy(struct mtd_info *mtd)
2650{
2651        struct map_info *map = mtd->priv;
2652        struct cfi_private *cfi = map->fldrv_priv;
2653        struct mtd_erase_region_info *region;
2654        int i;
2655        cfi_intelext_reset(mtd);
2656        unregister_reboot_notifier(&mtd->reboot_notifier);
2657        kfree(cfi->cmdset_priv);
2658        kfree(cfi->cfiq);
2659        kfree(cfi->chips[0].priv);
2660        kfree(cfi);
2661        for (i = 0; i < mtd->numeraseregions; i++) {
2662                region = &mtd->eraseregions[i];
2663                kfree(region->lockmap);
2664        }
2665        kfree(mtd->eraseregions);
2666}
2667
2668MODULE_LICENSE("GPL");
2669MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2670MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2671MODULE_ALIAS("cfi_cmdset_0003");
2672MODULE_ALIAS("cfi_cmdset_0200");
2673