linux/drivers/mtd/chips/cfi_cmdset_0001.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 *
   8 * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
   9 *      - completely revamped method functions so they are aware and
  10 *        independent of the flash geometry (buswidth, interleave, etc.)
  11 *      - scalability vs code size is completely set at compile-time
  12 *        (see include/linux/mtd/cfi.h for selection)
  13 *      - optimized write buffer method
  14 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  15 *      - reworked lock/unlock/erase support for var size flash
  16 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  17 *      - auto unlock sectors on resume for auto locking flash on power up
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <asm/io.h>
  25#include <asm/byteorder.h>
  26
  27#include <linux/errno.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/interrupt.h>
  31#include <linux/reboot.h>
  32#include <linux/bitmap.h>
  33#include <linux/mtd/xip.h>
  34#include <linux/mtd/map.h>
  35#include <linux/mtd/mtd.h>
  36#include <linux/mtd/cfi.h>
  37
  38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  40
  41// debugging, turns off buffer write mode if set to 1
  42#define FORCE_WORD_WRITE 0
  43
  44/* Intel chips */
  45#define I82802AB        0x00ad
  46#define I82802AC        0x00ac
  47#define PF38F4476       0x881c
  48#define M28F00AP30      0x8963
  49/* STMicroelectronics chips */
  50#define M50LPW080       0x002F
  51#define M50FLW080A      0x0080
  52#define M50FLW080B      0x0081
  53/* Atmel chips */
  54#define AT49BV640D      0x02de
  55#define AT49BV640DT     0x02db
  56/* Sharp chips */
  57#define LH28F640BFHE_PTTL90     0x00b0
  58#define LH28F640BFHE_PBTL90     0x00b1
  59#define LH28F640BFHE_PTTL70A    0x00b2
  60#define LH28F640BFHE_PBTL70A    0x00b3
  61
  62static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  63static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  64static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  65static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  66static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  67static void cfi_intelext_sync (struct mtd_info *);
  68static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  69static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  70static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
  71                                  uint64_t len);
  72#ifdef CONFIG_MTD_OTP
  73static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  74static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  75static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  76static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  77static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
  78                                           size_t *, struct otp_info *);
  79static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
  80                                           size_t *, struct otp_info *);
  81#endif
  82static int cfi_intelext_suspend (struct mtd_info *);
  83static void cfi_intelext_resume (struct mtd_info *);
  84static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  85
  86static void cfi_intelext_destroy(struct mtd_info *);
  87
  88struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  89
  90static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  91static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  92
  93static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  94                     size_t *retlen, void **virt, resource_size_t *phys);
  95static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
  96
  97static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  98static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  99static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
 100#include "fwh_lock.h"
 101
 102
 103
 104/*
 105 *  *********** SETUP AND PROBE BITS  ***********
 106 */
 107
 108static struct mtd_chip_driver cfi_intelext_chipdrv = {
 109        .probe          = NULL, /* Not usable directly */
 110        .destroy        = cfi_intelext_destroy,
 111        .name           = "cfi_cmdset_0001",
 112        .module         = THIS_MODULE
 113};
 114
 115/* #define DEBUG_LOCK_BITS */
 116/* #define DEBUG_CFI_FEATURES */
 117
 118#ifdef DEBUG_CFI_FEATURES
 119static void cfi_tell_features(struct cfi_pri_intelext *extp)
 120{
 121        int i;
 122        printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 123        printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 124        printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 125        printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 126        printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 127        printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 128        printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 129        printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 130        printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 131        printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 132        printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 133        printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 134        printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 135        for (i=11; i<32; i++) {
 136                if (extp->FeatureSupport & (1<<i))
 137                        printk("     - Unknown Bit %X:      supported\n", i);
 138        }
 139
 140        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 141        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 142        for (i=1; i<8; i++) {
 143                if (extp->SuspendCmdSupport & (1<<i))
 144                        printk("     - Unknown Bit %X:               supported\n", i);
 145        }
 146
 147        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 148        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 149        printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 150        for (i=2; i<3; i++) {
 151                if (extp->BlkStatusRegMask & (1<<i))
 152                        printk("     - Unknown Bit %X Active: yes\n",i);
 153        }
 154        printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 155        printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 156        for (i=6; i<16; i++) {
 157                if (extp->BlkStatusRegMask & (1<<i))
 158                        printk("     - Unknown Bit %X Active: yes\n",i);
 159        }
 160
 161        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 162               extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 163        if (extp->VppOptimal)
 164                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 165                       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 166}
 167#endif
 168
 169/* Atmel chips don't use the same PRI format as Intel chips */
 170static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 171{
 172        struct map_info *map = mtd->priv;
 173        struct cfi_private *cfi = map->fldrv_priv;
 174        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 175        struct cfi_pri_atmel atmel_pri;
 176        uint32_t features = 0;
 177
 178        /* Reverse byteswapping */
 179        extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 180        extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 181        extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 182
 183        memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 184        memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 185
 186        printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 187
 188        if (atmel_pri.Features & 0x01) /* chip erase supported */
 189                features |= (1<<0);
 190        if (atmel_pri.Features & 0x02) /* erase suspend supported */
 191                features |= (1<<1);
 192        if (atmel_pri.Features & 0x04) /* program suspend supported */
 193                features |= (1<<2);
 194        if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 195                features |= (1<<9);
 196        if (atmel_pri.Features & 0x20) /* page mode read supported */
 197                features |= (1<<7);
 198        if (atmel_pri.Features & 0x40) /* queued erase supported */
 199                features |= (1<<4);
 200        if (atmel_pri.Features & 0x80) /* Protection bits supported */
 201                features |= (1<<6);
 202
 203        extp->FeatureSupport = features;
 204
 205        /* burst write mode not supported */
 206        cfi->cfiq->BufWriteTimeoutTyp = 0;
 207        cfi->cfiq->BufWriteTimeoutMax = 0;
 208}
 209
 210static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 211{
 212        struct map_info *map = mtd->priv;
 213        struct cfi_private *cfi = map->fldrv_priv;
 214        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 215
 216        cfip->FeatureSupport |= (1 << 5);
 217        mtd->flags |= MTD_POWERUP_LOCK;
 218}
 219
 220#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 221/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 222static void fixup_intel_strataflash(struct mtd_info *mtd)
 223{
 224        struct map_info *map = mtd->priv;
 225        struct cfi_private *cfi = map->fldrv_priv;
 226        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 227
 228        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 229                            "erase on write disabled.\n");
 230        extp->SuspendCmdSupport &= ~1;
 231}
 232#endif
 233
 234#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 235static void fixup_no_write_suspend(struct mtd_info *mtd)
 236{
 237        struct map_info *map = mtd->priv;
 238        struct cfi_private *cfi = map->fldrv_priv;
 239        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 240
 241        if (cfip && (cfip->FeatureSupport&4)) {
 242                cfip->FeatureSupport &= ~4;
 243                printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 244        }
 245}
 246#endif
 247
 248static void fixup_st_m28w320ct(struct mtd_info *mtd)
 249{
 250        struct map_info *map = mtd->priv;
 251        struct cfi_private *cfi = map->fldrv_priv;
 252
 253        cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
 254        cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
 255}
 256
 257static void fixup_st_m28w320cb(struct mtd_info *mtd)
 258{
 259        struct map_info *map = mtd->priv;
 260        struct cfi_private *cfi = map->fldrv_priv;
 261
 262        /* Note this is done after the region info is endian swapped */
 263        cfi->cfiq->EraseRegionInfo[1] =
 264                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 265};
 266
 267static int is_LH28F640BF(struct cfi_private *cfi)
 268{
 269        /* Sharp LH28F640BF Family */
 270        if (cfi->mfr == CFI_MFR_SHARP && (
 271            cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
 272            cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
 273                return 1;
 274        return 0;
 275}
 276
 277static void fixup_LH28F640BF(struct mtd_info *mtd)
 278{
 279        struct map_info *map = mtd->priv;
 280        struct cfi_private *cfi = map->fldrv_priv;
 281        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 282
 283        /* Reset the Partition Configuration Register on LH28F640BF
 284         * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
 285        if (is_LH28F640BF(cfi)) {
 286                printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
 287                map_write(map, CMD(0x60), 0);
 288                map_write(map, CMD(0x04), 0);
 289
 290                /* We have set one single partition thus
 291                 * Simultaneous Operations are not allowed */
 292                printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
 293                extp->FeatureSupport &= ~512;
 294        }
 295}
 296
 297static void fixup_use_point(struct mtd_info *mtd)
 298{
 299        struct map_info *map = mtd->priv;
 300        if (!mtd->_point && map_is_linear(map)) {
 301                mtd->_point   = cfi_intelext_point;
 302                mtd->_unpoint = cfi_intelext_unpoint;
 303        }
 304}
 305
 306static void fixup_use_write_buffers(struct mtd_info *mtd)
 307{
 308        struct map_info *map = mtd->priv;
 309        struct cfi_private *cfi = map->fldrv_priv;
 310        if (cfi->cfiq->BufWriteTimeoutTyp) {
 311                printk(KERN_INFO "Using buffer write method\n" );
 312                mtd->_write = cfi_intelext_write_buffers;
 313                mtd->_writev = cfi_intelext_writev;
 314        }
 315}
 316
 317/*
 318 * Some chips power-up with all sectors locked by default.
 319 */
 320static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 321{
 322        struct map_info *map = mtd->priv;
 323        struct cfi_private *cfi = map->fldrv_priv;
 324        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 325
 326        if (cfip->FeatureSupport&32) {
 327                printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 328                mtd->flags |= MTD_POWERUP_LOCK;
 329        }
 330}
 331
 332static struct cfi_fixup cfi_fixup_table[] = {
 333        { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 334        { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
 335        { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 336#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 337        { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 338#endif
 339#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 340        { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 341#endif
 342#if !FORCE_WORD_WRITE
 343        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 344#endif
 345        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
 346        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
 347        { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
 348        { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
 349        { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
 350        { 0, 0, NULL }
 351};
 352
 353static struct cfi_fixup jedec_fixup_table[] = {
 354        { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
 355        { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
 356        { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
 357        { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
 358        { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
 359        { 0, 0, NULL }
 360};
 361static struct cfi_fixup fixup_table[] = {
 362        /* The CFI vendor ids and the JEDEC vendor IDs appear
 363         * to be common.  It is like the devices id's are as
 364         * well.  This table is to pick all cases where
 365         * we know that is the case.
 366         */
 367        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
 368        { 0, 0, NULL }
 369};
 370
 371static void cfi_fixup_major_minor(struct cfi_private *cfi,
 372                                                struct cfi_pri_intelext *extp)
 373{
 374        if (cfi->mfr == CFI_MFR_INTEL &&
 375                        cfi->id == PF38F4476 && extp->MinorVersion == '3')
 376                extp->MinorVersion = '1';
 377}
 378
 379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
 380{
 381        /*
 382         * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
 383         * Erase Supend for their small Erase Blocks(0x8000)
 384         */
 385        if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
 386                return 1;
 387        return 0;
 388}
 389
 390static inline struct cfi_pri_intelext *
 391read_pri_intelext(struct map_info *map, __u16 adr)
 392{
 393        struct cfi_private *cfi = map->fldrv_priv;
 394        struct cfi_pri_intelext *extp;
 395        unsigned int extra_size = 0;
 396        unsigned int extp_size = sizeof(*extp);
 397
 398 again:
 399        extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 400        if (!extp)
 401                return NULL;
 402
 403        cfi_fixup_major_minor(cfi, extp);
 404
 405        if (extp->MajorVersion != '1' ||
 406            (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 407                printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 408                       "version %c.%c.\n",  extp->MajorVersion,
 409                       extp->MinorVersion);
 410                kfree(extp);
 411                return NULL;
 412        }
 413
 414        /* Do some byteswapping if necessary */
 415        extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 416        extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 417        extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 418
 419        if (extp->MinorVersion >= '0') {
 420                extra_size = 0;
 421
 422                /* Protection Register info */
 423                if (extp->NumProtectionFields)
 424                        extra_size += (extp->NumProtectionFields - 1) *
 425                                      sizeof(struct cfi_intelext_otpinfo);
 426        }
 427
 428        if (extp->MinorVersion >= '1') {
 429                /* Burst Read info */
 430                extra_size += 2;
 431                if (extp_size < sizeof(*extp) + extra_size)
 432                        goto need_more;
 433                extra_size += extp->extra[extra_size - 1];
 434        }
 435
 436        if (extp->MinorVersion >= '3') {
 437                int nb_parts, i;
 438
 439                /* Number of hardware-partitions */
 440                extra_size += 1;
 441                if (extp_size < sizeof(*extp) + extra_size)
 442                        goto need_more;
 443                nb_parts = extp->extra[extra_size - 1];
 444
 445                /* skip the sizeof(partregion) field in CFI 1.4 */
 446                if (extp->MinorVersion >= '4')
 447                        extra_size += 2;
 448
 449                for (i = 0; i < nb_parts; i++) {
 450                        struct cfi_intelext_regioninfo *rinfo;
 451                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 452                        extra_size += sizeof(*rinfo);
 453                        if (extp_size < sizeof(*extp) + extra_size)
 454                                goto need_more;
 455                        rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 456                        extra_size += (rinfo->NumBlockTypes - 1)
 457                                      * sizeof(struct cfi_intelext_blockinfo);
 458                }
 459
 460                if (extp->MinorVersion >= '4')
 461                        extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 462
 463                if (extp_size < sizeof(*extp) + extra_size) {
 464                        need_more:
 465                        extp_size = sizeof(*extp) + extra_size;
 466                        kfree(extp);
 467                        if (extp_size > 4096) {
 468                                printk(KERN_ERR
 469                                        "%s: cfi_pri_intelext is too fat\n",
 470                                        __func__);
 471                                return NULL;
 472                        }
 473                        goto again;
 474                }
 475        }
 476
 477        return extp;
 478}
 479
 480struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 481{
 482        struct cfi_private *cfi = map->fldrv_priv;
 483        struct mtd_info *mtd;
 484        int i;
 485
 486        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 487        if (!mtd)
 488                return NULL;
 489        mtd->priv = map;
 490        mtd->type = MTD_NORFLASH;
 491
 492        /* Fill in the default mtd operations */
 493        mtd->_erase   = cfi_intelext_erase_varsize;
 494        mtd->_read    = cfi_intelext_read;
 495        mtd->_write   = cfi_intelext_write_words;
 496        mtd->_sync    = cfi_intelext_sync;
 497        mtd->_lock    = cfi_intelext_lock;
 498        mtd->_unlock  = cfi_intelext_unlock;
 499        mtd->_is_locked = cfi_intelext_is_locked;
 500        mtd->_suspend = cfi_intelext_suspend;
 501        mtd->_resume  = cfi_intelext_resume;
 502        mtd->flags   = MTD_CAP_NORFLASH;
 503        mtd->name    = map->name;
 504        mtd->writesize = 1;
 505        mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 506
 507        mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 508
 509        if (cfi->cfi_mode == CFI_MODE_CFI) {
 510                /*
 511                 * It's a real CFI chip, not one for which the probe
 512                 * routine faked a CFI structure. So we read the feature
 513                 * table from it.
 514                 */
 515                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 516                struct cfi_pri_intelext *extp;
 517
 518                extp = read_pri_intelext(map, adr);
 519                if (!extp) {
 520                        kfree(mtd);
 521                        return NULL;
 522                }
 523
 524                /* Install our own private info structure */
 525                cfi->cmdset_priv = extp;
 526
 527                cfi_fixup(mtd, cfi_fixup_table);
 528
 529#ifdef DEBUG_CFI_FEATURES
 530                /* Tell the user about it in lots of lovely detail */
 531                cfi_tell_features(extp);
 532#endif
 533
 534                if(extp->SuspendCmdSupport & 1) {
 535                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 536                }
 537        }
 538        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 539                /* Apply jedec specific fixups */
 540                cfi_fixup(mtd, jedec_fixup_table);
 541        }
 542        /* Apply generic fixups */
 543        cfi_fixup(mtd, fixup_table);
 544
 545        for (i=0; i< cfi->numchips; i++) {
 546                if (cfi->cfiq->WordWriteTimeoutTyp)
 547                        cfi->chips[i].word_write_time =
 548                                1<<cfi->cfiq->WordWriteTimeoutTyp;
 549                else
 550                        cfi->chips[i].word_write_time = 50000;
 551
 552                if (cfi->cfiq->BufWriteTimeoutTyp)
 553                        cfi->chips[i].buffer_write_time =
 554                                1<<cfi->cfiq->BufWriteTimeoutTyp;
 555                /* No default; if it isn't specified, we won't use it */
 556
 557                if (cfi->cfiq->BlockEraseTimeoutTyp)
 558                        cfi->chips[i].erase_time =
 559                                1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 560                else
 561                        cfi->chips[i].erase_time = 2000000;
 562
 563                if (cfi->cfiq->WordWriteTimeoutTyp &&
 564                    cfi->cfiq->WordWriteTimeoutMax)
 565                        cfi->chips[i].word_write_time_max =
 566                                1<<(cfi->cfiq->WordWriteTimeoutTyp +
 567                                    cfi->cfiq->WordWriteTimeoutMax);
 568                else
 569                        cfi->chips[i].word_write_time_max = 50000 * 8;
 570
 571                if (cfi->cfiq->BufWriteTimeoutTyp &&
 572                    cfi->cfiq->BufWriteTimeoutMax)
 573                        cfi->chips[i].buffer_write_time_max =
 574                                1<<(cfi->cfiq->BufWriteTimeoutTyp +
 575                                    cfi->cfiq->BufWriteTimeoutMax);
 576
 577                if (cfi->cfiq->BlockEraseTimeoutTyp &&
 578                    cfi->cfiq->BlockEraseTimeoutMax)
 579                        cfi->chips[i].erase_time_max =
 580                                1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
 581                                       cfi->cfiq->BlockEraseTimeoutMax);
 582                else
 583                        cfi->chips[i].erase_time_max = 2000000 * 8;
 584
 585                cfi->chips[i].ref_point_counter = 0;
 586                init_waitqueue_head(&(cfi->chips[i].wq));
 587        }
 588
 589        map->fldrv = &cfi_intelext_chipdrv;
 590
 591        return cfi_intelext_setup(mtd);
 592}
 593struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 594struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 595EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 596EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 597EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 598
 599static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 600{
 601        struct map_info *map = mtd->priv;
 602        struct cfi_private *cfi = map->fldrv_priv;
 603        unsigned long offset = 0;
 604        int i,j;
 605        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 606
 607        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 608
 609        mtd->size = devsize * cfi->numchips;
 610
 611        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 612        mtd->eraseregions = kcalloc(mtd->numeraseregions,
 613                                    sizeof(struct mtd_erase_region_info),
 614                                    GFP_KERNEL);
 615        if (!mtd->eraseregions)
 616                goto setup_err;
 617
 618        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 619                unsigned long ernum, ersize;
 620                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 621                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 622
 623                if (mtd->erasesize < ersize) {
 624                        mtd->erasesize = ersize;
 625                }
 626                for (j=0; j<cfi->numchips; j++) {
 627                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 628                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 629                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 630                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 631                        if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
 632                                goto setup_err;
 633                }
 634                offset += (ersize * ernum);
 635        }
 636
 637        if (offset != devsize) {
 638                /* Argh */
 639                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 640                goto setup_err;
 641        }
 642
 643        for (i=0; i<mtd->numeraseregions;i++){
 644                printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
 645                       i,(unsigned long long)mtd->eraseregions[i].offset,
 646                       mtd->eraseregions[i].erasesize,
 647                       mtd->eraseregions[i].numblocks);
 648        }
 649
 650#ifdef CONFIG_MTD_OTP
 651        mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 652        mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 653        mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 654        mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 655        mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 656        mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
 657#endif
 658
 659        /* This function has the potential to distort the reality
 660           a bit and therefore should be called last. */
 661        if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 662                goto setup_err;
 663
 664        __module_get(THIS_MODULE);
 665        register_reboot_notifier(&mtd->reboot_notifier);
 666        return mtd;
 667
 668 setup_err:
 669        if (mtd->eraseregions)
 670                for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
 671                        for (j=0; j<cfi->numchips; j++)
 672                                kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
 673        kfree(mtd->eraseregions);
 674        kfree(mtd);
 675        kfree(cfi->cmdset_priv);
 676        return NULL;
 677}
 678
 679static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 680                                        struct cfi_private **pcfi)
 681{
 682        struct map_info *map = mtd->priv;
 683        struct cfi_private *cfi = *pcfi;
 684        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 685
 686        /*
 687         * Probing of multi-partition flash chips.
 688         *
 689         * To support multiple partitions when available, we simply arrange
 690         * for each of them to have their own flchip structure even if they
 691         * are on the same physical chip.  This means completely recreating
 692         * a new cfi_private structure right here which is a blatent code
 693         * layering violation, but this is still the least intrusive
 694         * arrangement at this point. This can be rearranged in the future
 695         * if someone feels motivated enough.  --nico
 696         */
 697        if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 698            && extp->FeatureSupport & (1 << 9)) {
 699                int offs = 0;
 700                struct cfi_private *newcfi;
 701                struct flchip *chip;
 702                struct flchip_shared *shared;
 703                int numregions, numparts, partshift, numvirtchips, i, j;
 704
 705                /* Protection Register info */
 706                if (extp->NumProtectionFields)
 707                        offs = (extp->NumProtectionFields - 1) *
 708                               sizeof(struct cfi_intelext_otpinfo);
 709
 710                /* Burst Read info */
 711                offs += extp->extra[offs+1]+2;
 712
 713                /* Number of partition regions */
 714                numregions = extp->extra[offs];
 715                offs += 1;
 716
 717                /* skip the sizeof(partregion) field in CFI 1.4 */
 718                if (extp->MinorVersion >= '4')
 719                        offs += 2;
 720
 721                /* Number of hardware partitions */
 722                numparts = 0;
 723                for (i = 0; i < numregions; i++) {
 724                        struct cfi_intelext_regioninfo *rinfo;
 725                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 726                        numparts += rinfo->NumIdentPartitions;
 727                        offs += sizeof(*rinfo)
 728                                + (rinfo->NumBlockTypes - 1) *
 729                                  sizeof(struct cfi_intelext_blockinfo);
 730                }
 731
 732                if (!numparts)
 733                        numparts = 1;
 734
 735                /* Programming Region info */
 736                if (extp->MinorVersion >= '4') {
 737                        struct cfi_intelext_programming_regioninfo *prinfo;
 738                        prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 739                        mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 740                        mtd->flags &= ~MTD_BIT_WRITEABLE;
 741                        printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 742                               map->name, mtd->writesize,
 743                               cfi->interleave * prinfo->ControlValid,
 744                               cfi->interleave * prinfo->ControlInvalid);
 745                }
 746
 747                /*
 748                 * All functions below currently rely on all chips having
 749                 * the same geometry so we'll just assume that all hardware
 750                 * partitions are of the same size too.
 751                 */
 752                partshift = cfi->chipshift - __ffs(numparts);
 753
 754                if ((1 << partshift) < mtd->erasesize) {
 755                        printk( KERN_ERR
 756                                "%s: bad number of hw partitions (%d)\n",
 757                                __func__, numparts);
 758                        return -EINVAL;
 759                }
 760
 761                numvirtchips = cfi->numchips * numparts;
 762                newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
 763                                 GFP_KERNEL);
 764                if (!newcfi)
 765                        return -ENOMEM;
 766                shared = kmalloc_array(cfi->numchips,
 767                                       sizeof(struct flchip_shared),
 768                                       GFP_KERNEL);
 769                if (!shared) {
 770                        kfree(newcfi);
 771                        return -ENOMEM;
 772                }
 773                memcpy(newcfi, cfi, sizeof(struct cfi_private));
 774                newcfi->numchips = numvirtchips;
 775                newcfi->chipshift = partshift;
 776
 777                chip = &newcfi->chips[0];
 778                for (i = 0; i < cfi->numchips; i++) {
 779                        shared[i].writing = shared[i].erasing = NULL;
 780                        mutex_init(&shared[i].lock);
 781                        for (j = 0; j < numparts; j++) {
 782                                *chip = cfi->chips[i];
 783                                chip->start += j << partshift;
 784                                chip->priv = &shared[i];
 785                                /* those should be reset too since
 786                                   they create memory references. */
 787                                init_waitqueue_head(&chip->wq);
 788                                mutex_init(&chip->mutex);
 789                                chip++;
 790                        }
 791                }
 792
 793                printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 794                                  "--> %d partitions of %d KiB\n",
 795                                  map->name, cfi->numchips, cfi->interleave,
 796                                  newcfi->numchips, 1<<(newcfi->chipshift-10));
 797
 798                map->fldrv_priv = newcfi;
 799                *pcfi = newcfi;
 800                kfree(cfi);
 801        }
 802
 803        return 0;
 804}
 805
 806/*
 807 *  *********** CHIP ACCESS FUNCTIONS ***********
 808 */
 809static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 810{
 811        DECLARE_WAITQUEUE(wait, current);
 812        struct cfi_private *cfi = map->fldrv_priv;
 813        map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 814        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 815        unsigned long timeo = jiffies + HZ;
 816
 817        /* Prevent setting state FL_SYNCING for chip in suspended state. */
 818        if (mode == FL_SYNCING && chip->oldstate != FL_READY)
 819                goto sleep;
 820
 821        switch (chip->state) {
 822
 823        case FL_STATUS:
 824                for (;;) {
 825                        status = map_read(map, adr);
 826                        if (map_word_andequal(map, status, status_OK, status_OK))
 827                                break;
 828
 829                        /* At this point we're fine with write operations
 830                           in other partitions as they don't conflict. */
 831                        if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 832                                break;
 833
 834                        mutex_unlock(&chip->mutex);
 835                        cfi_udelay(1);
 836                        mutex_lock(&chip->mutex);
 837                        /* Someone else might have been playing with it. */
 838                        return -EAGAIN;
 839                }
 840                fallthrough;
 841        case FL_READY:
 842        case FL_CFI_QUERY:
 843        case FL_JEDEC_QUERY:
 844                return 0;
 845
 846        case FL_ERASING:
 847                if (!cfip ||
 848                    !(cfip->FeatureSupport & 2) ||
 849                    !(mode == FL_READY || mode == FL_POINT ||
 850                     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 851                        goto sleep;
 852
 853                /* Do not allow suspend iff read/write to EB address */
 854                if ((adr & chip->in_progress_block_mask) ==
 855                    chip->in_progress_block_addr)
 856                        goto sleep;
 857
 858                /* do not suspend small EBs, buggy Micron Chips */
 859                if (cfi_is_micron_28F00AP30(cfi, chip) &&
 860                    (chip->in_progress_block_mask == ~(0x8000-1)))
 861                        goto sleep;
 862
 863                /* Erase suspend */
 864                map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 865
 866                /* If the flash has finished erasing, then 'erase suspend'
 867                 * appears to make some (28F320) flash devices switch to
 868                 * 'read' mode.  Make sure that we switch to 'read status'
 869                 * mode so we get the right data. --rmk
 870                 */
 871                map_write(map, CMD(0x70), chip->in_progress_block_addr);
 872                chip->oldstate = FL_ERASING;
 873                chip->state = FL_ERASE_SUSPENDING;
 874                chip->erase_suspended = 1;
 875                for (;;) {
 876                        status = map_read(map, chip->in_progress_block_addr);
 877                        if (map_word_andequal(map, status, status_OK, status_OK))
 878                                break;
 879
 880                        if (time_after(jiffies, timeo)) {
 881                                /* Urgh. Resume and pretend we weren't here.
 882                                 * Make sure we're in 'read status' mode if it had finished */
 883                                put_chip(map, chip, adr);
 884                                printk(KERN_ERR "%s: Chip not ready after erase "
 885                                       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 886                                return -EIO;
 887                        }
 888
 889                        mutex_unlock(&chip->mutex);
 890                        cfi_udelay(1);
 891                        mutex_lock(&chip->mutex);
 892                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 893                           So we can just loop here. */
 894                }
 895                chip->state = FL_STATUS;
 896                return 0;
 897
 898        case FL_XIP_WHILE_ERASING:
 899                if (mode != FL_READY && mode != FL_POINT &&
 900                    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 901                        goto sleep;
 902                chip->oldstate = chip->state;
 903                chip->state = FL_READY;
 904                return 0;
 905
 906        case FL_SHUTDOWN:
 907                /* The machine is rebooting now,so no one can get chip anymore */
 908                return -EIO;
 909        case FL_POINT:
 910                /* Only if there's no operation suspended... */
 911                if (mode == FL_READY && chip->oldstate == FL_READY)
 912                        return 0;
 913                fallthrough;
 914        default:
 915        sleep:
 916                set_current_state(TASK_UNINTERRUPTIBLE);
 917                add_wait_queue(&chip->wq, &wait);
 918                mutex_unlock(&chip->mutex);
 919                schedule();
 920                remove_wait_queue(&chip->wq, &wait);
 921                mutex_lock(&chip->mutex);
 922                return -EAGAIN;
 923        }
 924}
 925
 926static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 927{
 928        int ret;
 929        DECLARE_WAITQUEUE(wait, current);
 930
 931 retry:
 932        if (chip->priv &&
 933            (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
 934            || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 935                /*
 936                 * OK. We have possibility for contention on the write/erase
 937                 * operations which are global to the real chip and not per
 938                 * partition.  So let's fight it over in the partition which
 939                 * currently has authority on the operation.
 940                 *
 941                 * The rules are as follows:
 942                 *
 943                 * - any write operation must own shared->writing.
 944                 *
 945                 * - any erase operation must own _both_ shared->writing and
 946                 *   shared->erasing.
 947                 *
 948                 * - contention arbitration is handled in the owner's context.
 949                 *
 950                 * The 'shared' struct can be read and/or written only when
 951                 * its lock is taken.
 952                 */
 953                struct flchip_shared *shared = chip->priv;
 954                struct flchip *contender;
 955                mutex_lock(&shared->lock);
 956                contender = shared->writing;
 957                if (contender && contender != chip) {
 958                        /*
 959                         * The engine to perform desired operation on this
 960                         * partition is already in use by someone else.
 961                         * Let's fight over it in the context of the chip
 962                         * currently using it.  If it is possible to suspend,
 963                         * that other partition will do just that, otherwise
 964                         * it'll happily send us to sleep.  In any case, when
 965                         * get_chip returns success we're clear to go ahead.
 966                         */
 967                        ret = mutex_trylock(&contender->mutex);
 968                        mutex_unlock(&shared->lock);
 969                        if (!ret)
 970                                goto retry;
 971                        mutex_unlock(&chip->mutex);
 972                        ret = chip_ready(map, contender, contender->start, mode);
 973                        mutex_lock(&chip->mutex);
 974
 975                        if (ret == -EAGAIN) {
 976                                mutex_unlock(&contender->mutex);
 977                                goto retry;
 978                        }
 979                        if (ret) {
 980                                mutex_unlock(&contender->mutex);
 981                                return ret;
 982                        }
 983                        mutex_lock(&shared->lock);
 984
 985                        /* We should not own chip if it is already
 986                         * in FL_SYNCING state. Put contender and retry. */
 987                        if (chip->state == FL_SYNCING) {
 988                                put_chip(map, contender, contender->start);
 989                                mutex_unlock(&contender->mutex);
 990                                goto retry;
 991                        }
 992                        mutex_unlock(&contender->mutex);
 993                }
 994
 995                /* Check if we already have suspended erase
 996                 * on this chip. Sleep. */
 997                if (mode == FL_ERASING && shared->erasing
 998                    && shared->erasing->oldstate == FL_ERASING) {
 999                        mutex_unlock(&shared->lock);
1000                        set_current_state(TASK_UNINTERRUPTIBLE);
1001                        add_wait_queue(&chip->wq, &wait);
1002                        mutex_unlock(&chip->mutex);
1003                        schedule();
1004                        remove_wait_queue(&chip->wq, &wait);
1005                        mutex_lock(&chip->mutex);
1006                        goto retry;
1007                }
1008
1009                /* We now own it */
1010                shared->writing = chip;
1011                if (mode == FL_ERASING)
1012                        shared->erasing = chip;
1013                mutex_unlock(&shared->lock);
1014        }
1015        ret = chip_ready(map, chip, adr, mode);
1016        if (ret == -EAGAIN)
1017                goto retry;
1018
1019        return ret;
1020}
1021
1022static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1023{
1024        struct cfi_private *cfi = map->fldrv_priv;
1025
1026        if (chip->priv) {
1027                struct flchip_shared *shared = chip->priv;
1028                mutex_lock(&shared->lock);
1029                if (shared->writing == chip && chip->oldstate == FL_READY) {
1030                        /* We own the ability to write, but we're done */
1031                        shared->writing = shared->erasing;
1032                        if (shared->writing && shared->writing != chip) {
1033                                /* give back ownership to who we loaned it from */
1034                                struct flchip *loaner = shared->writing;
1035                                mutex_lock(&loaner->mutex);
1036                                mutex_unlock(&shared->lock);
1037                                mutex_unlock(&chip->mutex);
1038                                put_chip(map, loaner, loaner->start);
1039                                mutex_lock(&chip->mutex);
1040                                mutex_unlock(&loaner->mutex);
1041                                wake_up(&chip->wq);
1042                                return;
1043                        }
1044                        shared->erasing = NULL;
1045                        shared->writing = NULL;
1046                } else if (shared->erasing == chip && shared->writing != chip) {
1047                        /*
1048                         * We own the ability to erase without the ability
1049                         * to write, which means the erase was suspended
1050                         * and some other partition is currently writing.
1051                         * Don't let the switch below mess things up since
1052                         * we don't have ownership to resume anything.
1053                         */
1054                        mutex_unlock(&shared->lock);
1055                        wake_up(&chip->wq);
1056                        return;
1057                }
1058                mutex_unlock(&shared->lock);
1059        }
1060
1061        switch(chip->oldstate) {
1062        case FL_ERASING:
1063                /* What if one interleaved chip has finished and the
1064                   other hasn't? The old code would leave the finished
1065                   one in READY mode. That's bad, and caused -EROFS
1066                   errors to be returned from do_erase_oneblock because
1067                   that's the only bit it checked for at the time.
1068                   As the state machine appears to explicitly allow
1069                   sending the 0x70 (Read Status) command to an erasing
1070                   chip and expecting it to be ignored, that's what we
1071                   do. */
1072                map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1073                map_write(map, CMD(0x70), chip->in_progress_block_addr);
1074                chip->oldstate = FL_READY;
1075                chip->state = FL_ERASING;
1076                break;
1077
1078        case FL_XIP_WHILE_ERASING:
1079                chip->state = chip->oldstate;
1080                chip->oldstate = FL_READY;
1081                break;
1082
1083        case FL_READY:
1084        case FL_STATUS:
1085        case FL_JEDEC_QUERY:
1086                break;
1087        default:
1088                printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1089        }
1090        wake_up(&chip->wq);
1091}
1092
1093#ifdef CONFIG_MTD_XIP
1094
1095/*
1096 * No interrupt what so ever can be serviced while the flash isn't in array
1097 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1098 * enclosing any code path where the flash is known not to be in array mode.
1099 * And within a XIP disabled code path, only functions marked with __xipram
1100 * may be called and nothing else (it's a good thing to inspect generated
1101 * assembly to make sure inline functions were actually inlined and that gcc
1102 * didn't emit calls to its own support functions). Also configuring MTD CFI
1103 * support to a single buswidth and a single interleave is also recommended.
1104 */
1105
1106static void xip_disable(struct map_info *map, struct flchip *chip,
1107                        unsigned long adr)
1108{
1109        /* TODO: chips with no XIP use should ignore and return */
1110        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1111        local_irq_disable();
1112}
1113
1114static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1115                                unsigned long adr)
1116{
1117        struct cfi_private *cfi = map->fldrv_priv;
1118        if (chip->state != FL_POINT && chip->state != FL_READY) {
1119                map_write(map, CMD(0xff), adr);
1120                chip->state = FL_READY;
1121        }
1122        (void) map_read(map, adr);
1123        xip_iprefetch();
1124        local_irq_enable();
1125}
1126
1127/*
1128 * When a delay is required for the flash operation to complete, the
1129 * xip_wait_for_operation() function is polling for both the given timeout
1130 * and pending (but still masked) hardware interrupts.  Whenever there is an
1131 * interrupt pending then the flash erase or write operation is suspended,
1132 * array mode restored and interrupts unmasked.  Task scheduling might also
1133 * happen at that point.  The CPU eventually returns from the interrupt or
1134 * the call to schedule() and the suspended flash operation is resumed for
1135 * the remaining of the delay period.
1136 *
1137 * Warning: this function _will_ fool interrupt latency tracing tools.
1138 */
1139
1140static int __xipram xip_wait_for_operation(
1141                struct map_info *map, struct flchip *chip,
1142                unsigned long adr, unsigned int chip_op_time_max)
1143{
1144        struct cfi_private *cfi = map->fldrv_priv;
1145        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1146        map_word status, OK = CMD(0x80);
1147        unsigned long usec, suspended, start, done;
1148        flstate_t oldstate, newstate;
1149
1150        start = xip_currtime();
1151        usec = chip_op_time_max;
1152        if (usec == 0)
1153                usec = 500000;
1154        done = 0;
1155
1156        do {
1157                cpu_relax();
1158                if (xip_irqpending() && cfip &&
1159                    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1160                     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1161                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1162                        /*
1163                         * Let's suspend the erase or write operation when
1164                         * supported.  Note that we currently don't try to
1165                         * suspend interleaved chips if there is already
1166                         * another operation suspended (imagine what happens
1167                         * when one chip was already done with the current
1168                         * operation while another chip suspended it, then
1169                         * we resume the whole thing at once).  Yes, it
1170                         * can happen!
1171                         */
1172                        usec -= done;
1173                        map_write(map, CMD(0xb0), adr);
1174                        map_write(map, CMD(0x70), adr);
1175                        suspended = xip_currtime();
1176                        do {
1177                                if (xip_elapsed_since(suspended) > 100000) {
1178                                        /*
1179                                         * The chip doesn't want to suspend
1180                                         * after waiting for 100 msecs.
1181                                         * This is a critical error but there
1182                                         * is not much we can do here.
1183                                         */
1184                                        return -EIO;
1185                                }
1186                                status = map_read(map, adr);
1187                        } while (!map_word_andequal(map, status, OK, OK));
1188
1189                        /* Suspend succeeded */
1190                        oldstate = chip->state;
1191                        if (oldstate == FL_ERASING) {
1192                                if (!map_word_bitsset(map, status, CMD(0x40)))
1193                                        break;
1194                                newstate = FL_XIP_WHILE_ERASING;
1195                                chip->erase_suspended = 1;
1196                        } else {
1197                                if (!map_word_bitsset(map, status, CMD(0x04)))
1198                                        break;
1199                                newstate = FL_XIP_WHILE_WRITING;
1200                                chip->write_suspended = 1;
1201                        }
1202                        chip->state = newstate;
1203                        map_write(map, CMD(0xff), adr);
1204                        (void) map_read(map, adr);
1205                        xip_iprefetch();
1206                        local_irq_enable();
1207                        mutex_unlock(&chip->mutex);
1208                        xip_iprefetch();
1209                        cond_resched();
1210
1211                        /*
1212                         * We're back.  However someone else might have
1213                         * decided to go write to the chip if we are in
1214                         * a suspended erase state.  If so let's wait
1215                         * until it's done.
1216                         */
1217                        mutex_lock(&chip->mutex);
1218                        while (chip->state != newstate) {
1219                                DECLARE_WAITQUEUE(wait, current);
1220                                set_current_state(TASK_UNINTERRUPTIBLE);
1221                                add_wait_queue(&chip->wq, &wait);
1222                                mutex_unlock(&chip->mutex);
1223                                schedule();
1224                                remove_wait_queue(&chip->wq, &wait);
1225                                mutex_lock(&chip->mutex);
1226                        }
1227                        /* Disallow XIP again */
1228                        local_irq_disable();
1229
1230                        /* Resume the write or erase operation */
1231                        map_write(map, CMD(0xd0), adr);
1232                        map_write(map, CMD(0x70), adr);
1233                        chip->state = oldstate;
1234                        start = xip_currtime();
1235                } else if (usec >= 1000000/HZ) {
1236                        /*
1237                         * Try to save on CPU power when waiting delay
1238                         * is at least a system timer tick period.
1239                         * No need to be extremely accurate here.
1240                         */
1241                        xip_cpu_idle();
1242                }
1243                status = map_read(map, adr);
1244                done = xip_elapsed_since(start);
1245        } while (!map_word_andequal(map, status, OK, OK)
1246                 && done < usec);
1247
1248        return (done >= usec) ? -ETIME : 0;
1249}
1250
1251/*
1252 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1253 * the flash is actively programming or erasing since we have to poll for
1254 * the operation to complete anyway.  We can't do that in a generic way with
1255 * a XIP setup so do it before the actual flash operation in this case
1256 * and stub it out from INVAL_CACHE_AND_WAIT.
1257 */
1258#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1259        INVALIDATE_CACHED_RANGE(map, from, size)
1260
1261#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1262        xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1263
1264#else
1265
1266#define xip_disable(map, chip, adr)
1267#define xip_enable(map, chip, adr)
1268#define XIP_INVAL_CACHED_RANGE(x...)
1269#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1270
1271static int inval_cache_and_wait_for_operation(
1272                struct map_info *map, struct flchip *chip,
1273                unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1274                unsigned int chip_op_time, unsigned int chip_op_time_max)
1275{
1276        struct cfi_private *cfi = map->fldrv_priv;
1277        map_word status, status_OK = CMD(0x80);
1278        int chip_state = chip->state;
1279        unsigned int timeo, sleep_time, reset_timeo;
1280
1281        mutex_unlock(&chip->mutex);
1282        if (inval_len)
1283                INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1284        mutex_lock(&chip->mutex);
1285
1286        timeo = chip_op_time_max;
1287        if (!timeo)
1288                timeo = 500000;
1289        reset_timeo = timeo;
1290        sleep_time = chip_op_time / 2;
1291
1292        for (;;) {
1293                if (chip->state != chip_state) {
1294                        /* Someone's suspended the operation: sleep */
1295                        DECLARE_WAITQUEUE(wait, current);
1296                        set_current_state(TASK_UNINTERRUPTIBLE);
1297                        add_wait_queue(&chip->wq, &wait);
1298                        mutex_unlock(&chip->mutex);
1299                        schedule();
1300                        remove_wait_queue(&chip->wq, &wait);
1301                        mutex_lock(&chip->mutex);
1302                        continue;
1303                }
1304
1305                status = map_read(map, cmd_adr);
1306                if (map_word_andequal(map, status, status_OK, status_OK))
1307                        break;
1308
1309                if (chip->erase_suspended && chip_state == FL_ERASING)  {
1310                        /* Erase suspend occurred while sleep: reset timeout */
1311                        timeo = reset_timeo;
1312                        chip->erase_suspended = 0;
1313                }
1314                if (chip->write_suspended && chip_state == FL_WRITING)  {
1315                        /* Write suspend occurred while sleep: reset timeout */
1316                        timeo = reset_timeo;
1317                        chip->write_suspended = 0;
1318                }
1319                if (!timeo) {
1320                        map_write(map, CMD(0x70), cmd_adr);
1321                        chip->state = FL_STATUS;
1322                        return -ETIME;
1323                }
1324
1325                /* OK Still waiting. Drop the lock, wait a while and retry. */
1326                mutex_unlock(&chip->mutex);
1327                if (sleep_time >= 1000000/HZ) {
1328                        /*
1329                         * Half of the normal delay still remaining
1330                         * can be performed with a sleeping delay instead
1331                         * of busy waiting.
1332                         */
1333                        msleep(sleep_time/1000);
1334                        timeo -= sleep_time;
1335                        sleep_time = 1000000/HZ;
1336                } else {
1337                        udelay(1);
1338                        cond_resched();
1339                        timeo--;
1340                }
1341                mutex_lock(&chip->mutex);
1342        }
1343
1344        /* Done and happy. */
1345        chip->state = FL_STATUS;
1346        return 0;
1347}
1348
1349#endif
1350
1351#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1352        INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1353
1354
1355static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1356{
1357        unsigned long cmd_addr;
1358        struct cfi_private *cfi = map->fldrv_priv;
1359        int ret;
1360
1361        adr += chip->start;
1362
1363        /* Ensure cmd read/writes are aligned. */
1364        cmd_addr = adr & ~(map_bankwidth(map)-1);
1365
1366        mutex_lock(&chip->mutex);
1367
1368        ret = get_chip(map, chip, cmd_addr, FL_POINT);
1369
1370        if (!ret) {
1371                if (chip->state != FL_POINT && chip->state != FL_READY)
1372                        map_write(map, CMD(0xff), cmd_addr);
1373
1374                chip->state = FL_POINT;
1375                chip->ref_point_counter++;
1376        }
1377        mutex_unlock(&chip->mutex);
1378
1379        return ret;
1380}
1381
1382static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1383                size_t *retlen, void **virt, resource_size_t *phys)
1384{
1385        struct map_info *map = mtd->priv;
1386        struct cfi_private *cfi = map->fldrv_priv;
1387        unsigned long ofs, last_end = 0;
1388        int chipnum;
1389        int ret;
1390
1391        if (!map->virt)
1392                return -EINVAL;
1393
1394        /* Now lock the chip(s) to POINT state */
1395
1396        /* ofs: offset within the first chip that the first read should start */
1397        chipnum = (from >> cfi->chipshift);
1398        ofs = from - (chipnum << cfi->chipshift);
1399
1400        *virt = map->virt + cfi->chips[chipnum].start + ofs;
1401        if (phys)
1402                *phys = map->phys + cfi->chips[chipnum].start + ofs;
1403
1404        while (len) {
1405                unsigned long thislen;
1406
1407                if (chipnum >= cfi->numchips)
1408                        break;
1409
1410                /* We cannot point across chips that are virtually disjoint */
1411                if (!last_end)
1412                        last_end = cfi->chips[chipnum].start;
1413                else if (cfi->chips[chipnum].start != last_end)
1414                        break;
1415
1416                if ((len + ofs -1) >> cfi->chipshift)
1417                        thislen = (1<<cfi->chipshift) - ofs;
1418                else
1419                        thislen = len;
1420
1421                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1422                if (ret)
1423                        break;
1424
1425                *retlen += thislen;
1426                len -= thislen;
1427
1428                ofs = 0;
1429                last_end += 1 << cfi->chipshift;
1430                chipnum++;
1431        }
1432        return 0;
1433}
1434
1435static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1436{
1437        struct map_info *map = mtd->priv;
1438        struct cfi_private *cfi = map->fldrv_priv;
1439        unsigned long ofs;
1440        int chipnum, err = 0;
1441
1442        /* Now unlock the chip(s) POINT state */
1443
1444        /* ofs: offset within the first chip that the first read should start */
1445        chipnum = (from >> cfi->chipshift);
1446        ofs = from - (chipnum <<  cfi->chipshift);
1447
1448        while (len && !err) {
1449                unsigned long thislen;
1450                struct flchip *chip;
1451
1452                chip = &cfi->chips[chipnum];
1453                if (chipnum >= cfi->numchips)
1454                        break;
1455
1456                if ((len + ofs -1) >> cfi->chipshift)
1457                        thislen = (1<<cfi->chipshift) - ofs;
1458                else
1459                        thislen = len;
1460
1461                mutex_lock(&chip->mutex);
1462                if (chip->state == FL_POINT) {
1463                        chip->ref_point_counter--;
1464                        if(chip->ref_point_counter == 0)
1465                                chip->state = FL_READY;
1466                } else {
1467                        printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1468                        err = -EINVAL;
1469                }
1470
1471                put_chip(map, chip, chip->start);
1472                mutex_unlock(&chip->mutex);
1473
1474                len -= thislen;
1475                ofs = 0;
1476                chipnum++;
1477        }
1478
1479        return err;
1480}
1481
1482static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1483{
1484        unsigned long cmd_addr;
1485        struct cfi_private *cfi = map->fldrv_priv;
1486        int ret;
1487
1488        adr += chip->start;
1489
1490        /* Ensure cmd read/writes are aligned. */
1491        cmd_addr = adr & ~(map_bankwidth(map)-1);
1492
1493        mutex_lock(&chip->mutex);
1494        ret = get_chip(map, chip, cmd_addr, FL_READY);
1495        if (ret) {
1496                mutex_unlock(&chip->mutex);
1497                return ret;
1498        }
1499
1500        if (chip->state != FL_POINT && chip->state != FL_READY) {
1501                map_write(map, CMD(0xff), cmd_addr);
1502
1503                chip->state = FL_READY;
1504        }
1505
1506        map_copy_from(map, buf, adr, len);
1507
1508        put_chip(map, chip, cmd_addr);
1509
1510        mutex_unlock(&chip->mutex);
1511        return 0;
1512}
1513
1514static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1515{
1516        struct map_info *map = mtd->priv;
1517        struct cfi_private *cfi = map->fldrv_priv;
1518        unsigned long ofs;
1519        int chipnum;
1520        int ret = 0;
1521
1522        /* ofs: offset within the first chip that the first read should start */
1523        chipnum = (from >> cfi->chipshift);
1524        ofs = from - (chipnum <<  cfi->chipshift);
1525
1526        while (len) {
1527                unsigned long thislen;
1528
1529                if (chipnum >= cfi->numchips)
1530                        break;
1531
1532                if ((len + ofs -1) >> cfi->chipshift)
1533                        thislen = (1<<cfi->chipshift) - ofs;
1534                else
1535                        thislen = len;
1536
1537                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1538                if (ret)
1539                        break;
1540
1541                *retlen += thislen;
1542                len -= thislen;
1543                buf += thislen;
1544
1545                ofs = 0;
1546                chipnum++;
1547        }
1548        return ret;
1549}
1550
1551static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1552                                     unsigned long adr, map_word datum, int mode)
1553{
1554        struct cfi_private *cfi = map->fldrv_priv;
1555        map_word status, write_cmd;
1556        int ret;
1557
1558        adr += chip->start;
1559
1560        switch (mode) {
1561        case FL_WRITING:
1562                write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1563                break;
1564        case FL_OTP_WRITE:
1565                write_cmd = CMD(0xc0);
1566                break;
1567        default:
1568                return -EINVAL;
1569        }
1570
1571        mutex_lock(&chip->mutex);
1572        ret = get_chip(map, chip, adr, mode);
1573        if (ret) {
1574                mutex_unlock(&chip->mutex);
1575                return ret;
1576        }
1577
1578        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1579        ENABLE_VPP(map);
1580        xip_disable(map, chip, adr);
1581        map_write(map, write_cmd, adr);
1582        map_write(map, datum, adr);
1583        chip->state = mode;
1584
1585        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1586                                   adr, map_bankwidth(map),
1587                                   chip->word_write_time,
1588                                   chip->word_write_time_max);
1589        if (ret) {
1590                xip_enable(map, chip, adr);
1591                printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1592                goto out;
1593        }
1594
1595        /* check for errors */
1596        status = map_read(map, adr);
1597        if (map_word_bitsset(map, status, CMD(0x1a))) {
1598                unsigned long chipstatus = MERGESTATUS(status);
1599
1600                /* reset status */
1601                map_write(map, CMD(0x50), adr);
1602                map_write(map, CMD(0x70), adr);
1603                xip_enable(map, chip, adr);
1604
1605                if (chipstatus & 0x02) {
1606                        ret = -EROFS;
1607                } else if (chipstatus & 0x08) {
1608                        printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1609                        ret = -EIO;
1610                } else {
1611                        printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1612                        ret = -EINVAL;
1613                }
1614
1615                goto out;
1616        }
1617
1618        xip_enable(map, chip, adr);
1619 out:   DISABLE_VPP(map);
1620        put_chip(map, chip, adr);
1621        mutex_unlock(&chip->mutex);
1622        return ret;
1623}
1624
1625
1626static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1627{
1628        struct map_info *map = mtd->priv;
1629        struct cfi_private *cfi = map->fldrv_priv;
1630        int ret;
1631        int chipnum;
1632        unsigned long ofs;
1633
1634        chipnum = to >> cfi->chipshift;
1635        ofs = to  - (chipnum << cfi->chipshift);
1636
1637        /* If it's not bus-aligned, do the first byte write */
1638        if (ofs & (map_bankwidth(map)-1)) {
1639                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1640                int gap = ofs - bus_ofs;
1641                int n;
1642                map_word datum;
1643
1644                n = min_t(int, len, map_bankwidth(map)-gap);
1645                datum = map_word_ff(map);
1646                datum = map_word_load_partial(map, datum, buf, gap, n);
1647
1648                ret = do_write_oneword(map, &cfi->chips[chipnum],
1649                                               bus_ofs, datum, FL_WRITING);
1650                if (ret)
1651                        return ret;
1652
1653                len -= n;
1654                ofs += n;
1655                buf += n;
1656                (*retlen) += n;
1657
1658                if (ofs >> cfi->chipshift) {
1659                        chipnum ++;
1660                        ofs = 0;
1661                        if (chipnum == cfi->numchips)
1662                                return 0;
1663                }
1664        }
1665
1666        while(len >= map_bankwidth(map)) {
1667                map_word datum = map_word_load(map, buf);
1668
1669                ret = do_write_oneword(map, &cfi->chips[chipnum],
1670                                       ofs, datum, FL_WRITING);
1671                if (ret)
1672                        return ret;
1673
1674                ofs += map_bankwidth(map);
1675                buf += map_bankwidth(map);
1676                (*retlen) += map_bankwidth(map);
1677                len -= map_bankwidth(map);
1678
1679                if (ofs >> cfi->chipshift) {
1680                        chipnum ++;
1681                        ofs = 0;
1682                        if (chipnum == cfi->numchips)
1683                                return 0;
1684                }
1685        }
1686
1687        if (len & (map_bankwidth(map)-1)) {
1688                map_word datum;
1689
1690                datum = map_word_ff(map);
1691                datum = map_word_load_partial(map, datum, buf, 0, len);
1692
1693                ret = do_write_oneword(map, &cfi->chips[chipnum],
1694                                       ofs, datum, FL_WRITING);
1695                if (ret)
1696                        return ret;
1697
1698                (*retlen) += len;
1699        }
1700
1701        return 0;
1702}
1703
1704
1705static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1706                                    unsigned long adr, const struct kvec **pvec,
1707                                    unsigned long *pvec_seek, int len)
1708{
1709        struct cfi_private *cfi = map->fldrv_priv;
1710        map_word status, write_cmd, datum;
1711        unsigned long cmd_adr;
1712        int ret, wbufsize, word_gap, words;
1713        const struct kvec *vec;
1714        unsigned long vec_seek;
1715        unsigned long initial_adr;
1716        int initial_len = len;
1717
1718        wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1719        adr += chip->start;
1720        initial_adr = adr;
1721        cmd_adr = adr & ~(wbufsize-1);
1722
1723        /* Sharp LH28F640BF chips need the first address for the
1724         * Page Buffer Program command. See Table 5 of
1725         * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1726        if (is_LH28F640BF(cfi))
1727                cmd_adr = adr;
1728
1729        /* Let's determine this according to the interleave only once */
1730        write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1731
1732        mutex_lock(&chip->mutex);
1733        ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1734        if (ret) {
1735                mutex_unlock(&chip->mutex);
1736                return ret;
1737        }
1738
1739        XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1740        ENABLE_VPP(map);
1741        xip_disable(map, chip, cmd_adr);
1742
1743        /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1744           [...], the device will not accept any more Write to Buffer commands".
1745           So we must check here and reset those bits if they're set. Otherwise
1746           we're just pissing in the wind */
1747        if (chip->state != FL_STATUS) {
1748                map_write(map, CMD(0x70), cmd_adr);
1749                chip->state = FL_STATUS;
1750        }
1751        status = map_read(map, cmd_adr);
1752        if (map_word_bitsset(map, status, CMD(0x30))) {
1753                xip_enable(map, chip, cmd_adr);
1754                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1755                xip_disable(map, chip, cmd_adr);
1756                map_write(map, CMD(0x50), cmd_adr);
1757                map_write(map, CMD(0x70), cmd_adr);
1758        }
1759
1760        chip->state = FL_WRITING_TO_BUFFER;
1761        map_write(map, write_cmd, cmd_adr);
1762        ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1763        if (ret) {
1764                /* Argh. Not ready for write to buffer */
1765                map_word Xstatus = map_read(map, cmd_adr);
1766                map_write(map, CMD(0x70), cmd_adr);
1767                chip->state = FL_STATUS;
1768                status = map_read(map, cmd_adr);
1769                map_write(map, CMD(0x50), cmd_adr);
1770                map_write(map, CMD(0x70), cmd_adr);
1771                xip_enable(map, chip, cmd_adr);
1772                printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1773                                map->name, Xstatus.x[0], status.x[0]);
1774                goto out;
1775        }
1776
1777        /* Figure out the number of words to write */
1778        word_gap = (-adr & (map_bankwidth(map)-1));
1779        words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1780        if (!word_gap) {
1781                words--;
1782        } else {
1783                word_gap = map_bankwidth(map) - word_gap;
1784                adr -= word_gap;
1785                datum = map_word_ff(map);
1786        }
1787
1788        /* Write length of data to come */
1789        map_write(map, CMD(words), cmd_adr );
1790
1791        /* Write data */
1792        vec = *pvec;
1793        vec_seek = *pvec_seek;
1794        do {
1795                int n = map_bankwidth(map) - word_gap;
1796                if (n > vec->iov_len - vec_seek)
1797                        n = vec->iov_len - vec_seek;
1798                if (n > len)
1799                        n = len;
1800
1801                if (!word_gap && len < map_bankwidth(map))
1802                        datum = map_word_ff(map);
1803
1804                datum = map_word_load_partial(map, datum,
1805                                              vec->iov_base + vec_seek,
1806                                              word_gap, n);
1807
1808                len -= n;
1809                word_gap += n;
1810                if (!len || word_gap == map_bankwidth(map)) {
1811                        map_write(map, datum, adr);
1812                        adr += map_bankwidth(map);
1813                        word_gap = 0;
1814                }
1815
1816                vec_seek += n;
1817                if (vec_seek == vec->iov_len) {
1818                        vec++;
1819                        vec_seek = 0;
1820                }
1821        } while (len);
1822        *pvec = vec;
1823        *pvec_seek = vec_seek;
1824
1825        /* GO GO GO */
1826        map_write(map, CMD(0xd0), cmd_adr);
1827        chip->state = FL_WRITING;
1828
1829        ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1830                                   initial_adr, initial_len,
1831                                   chip->buffer_write_time,
1832                                   chip->buffer_write_time_max);
1833        if (ret) {
1834                map_write(map, CMD(0x70), cmd_adr);
1835                chip->state = FL_STATUS;
1836                xip_enable(map, chip, cmd_adr);
1837                printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1838                goto out;
1839        }
1840
1841        /* check for errors */
1842        status = map_read(map, cmd_adr);
1843        if (map_word_bitsset(map, status, CMD(0x1a))) {
1844                unsigned long chipstatus = MERGESTATUS(status);
1845
1846                /* reset status */
1847                map_write(map, CMD(0x50), cmd_adr);
1848                map_write(map, CMD(0x70), cmd_adr);
1849                xip_enable(map, chip, cmd_adr);
1850
1851                if (chipstatus & 0x02) {
1852                        ret = -EROFS;
1853                } else if (chipstatus & 0x08) {
1854                        printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1855                        ret = -EIO;
1856                } else {
1857                        printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1858                        ret = -EINVAL;
1859                }
1860
1861                goto out;
1862        }
1863
1864        xip_enable(map, chip, cmd_adr);
1865 out:   DISABLE_VPP(map);
1866        put_chip(map, chip, cmd_adr);
1867        mutex_unlock(&chip->mutex);
1868        return ret;
1869}
1870
1871static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1872                                unsigned long count, loff_t to, size_t *retlen)
1873{
1874        struct map_info *map = mtd->priv;
1875        struct cfi_private *cfi = map->fldrv_priv;
1876        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1877        int ret;
1878        int chipnum;
1879        unsigned long ofs, vec_seek, i;
1880        size_t len = 0;
1881
1882        for (i = 0; i < count; i++)
1883                len += vecs[i].iov_len;
1884
1885        if (!len)
1886                return 0;
1887
1888        chipnum = to >> cfi->chipshift;
1889        ofs = to - (chipnum << cfi->chipshift);
1890        vec_seek = 0;
1891
1892        do {
1893                /* We must not cross write block boundaries */
1894                int size = wbufsize - (ofs & (wbufsize-1));
1895
1896                if (size > len)
1897                        size = len;
1898                ret = do_write_buffer(map, &cfi->chips[chipnum],
1899                                      ofs, &vecs, &vec_seek, size);
1900                if (ret)
1901                        return ret;
1902
1903                ofs += size;
1904                (*retlen) += size;
1905                len -= size;
1906
1907                if (ofs >> cfi->chipshift) {
1908                        chipnum ++;
1909                        ofs = 0;
1910                        if (chipnum == cfi->numchips)
1911                                return 0;
1912                }
1913
1914                /* Be nice and reschedule with the chip in a usable state for other
1915                   processes. */
1916                cond_resched();
1917
1918        } while (len);
1919
1920        return 0;
1921}
1922
1923static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1924                                       size_t len, size_t *retlen, const u_char *buf)
1925{
1926        struct kvec vec;
1927
1928        vec.iov_base = (void *) buf;
1929        vec.iov_len = len;
1930
1931        return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1932}
1933
1934static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1935                                      unsigned long adr, int len, void *thunk)
1936{
1937        struct cfi_private *cfi = map->fldrv_priv;
1938        map_word status;
1939        int retries = 3;
1940        int ret;
1941
1942        adr += chip->start;
1943
1944 retry:
1945        mutex_lock(&chip->mutex);
1946        ret = get_chip(map, chip, adr, FL_ERASING);
1947        if (ret) {
1948                mutex_unlock(&chip->mutex);
1949                return ret;
1950        }
1951
1952        XIP_INVAL_CACHED_RANGE(map, adr, len);
1953        ENABLE_VPP(map);
1954        xip_disable(map, chip, adr);
1955
1956        /* Clear the status register first */
1957        map_write(map, CMD(0x50), adr);
1958
1959        /* Now erase */
1960        map_write(map, CMD(0x20), adr);
1961        map_write(map, CMD(0xD0), adr);
1962        chip->state = FL_ERASING;
1963        chip->erase_suspended = 0;
1964        chip->in_progress_block_addr = adr;
1965        chip->in_progress_block_mask = ~(len - 1);
1966
1967        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1968                                   adr, len,
1969                                   chip->erase_time,
1970                                   chip->erase_time_max);
1971        if (ret) {
1972                map_write(map, CMD(0x70), adr);
1973                chip->state = FL_STATUS;
1974                xip_enable(map, chip, adr);
1975                printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1976                goto out;
1977        }
1978
1979        /* We've broken this before. It doesn't hurt to be safe */
1980        map_write(map, CMD(0x70), adr);
1981        chip->state = FL_STATUS;
1982        status = map_read(map, adr);
1983
1984        /* check for errors */
1985        if (map_word_bitsset(map, status, CMD(0x3a))) {
1986                unsigned long chipstatus = MERGESTATUS(status);
1987
1988                /* Reset the error bits */
1989                map_write(map, CMD(0x50), adr);
1990                map_write(map, CMD(0x70), adr);
1991                xip_enable(map, chip, adr);
1992
1993                if ((chipstatus & 0x30) == 0x30) {
1994                        printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1995                        ret = -EINVAL;
1996                } else if (chipstatus & 0x02) {
1997                        /* Protection bit set */
1998                        ret = -EROFS;
1999                } else if (chipstatus & 0x8) {
2000                        /* Voltage */
2001                        printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2002                        ret = -EIO;
2003                } else if (chipstatus & 0x20 && retries--) {
2004                        printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2005                        DISABLE_VPP(map);
2006                        put_chip(map, chip, adr);
2007                        mutex_unlock(&chip->mutex);
2008                        goto retry;
2009                } else {
2010                        printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2011                        ret = -EIO;
2012                }
2013
2014                goto out;
2015        }
2016
2017        xip_enable(map, chip, adr);
2018 out:   DISABLE_VPP(map);
2019        put_chip(map, chip, adr);
2020        mutex_unlock(&chip->mutex);
2021        return ret;
2022}
2023
2024static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2025{
2026        return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2027                                instr->len, NULL);
2028}
2029
2030static void cfi_intelext_sync (struct mtd_info *mtd)
2031{
2032        struct map_info *map = mtd->priv;
2033        struct cfi_private *cfi = map->fldrv_priv;
2034        int i;
2035        struct flchip *chip;
2036        int ret = 0;
2037
2038        for (i=0; !ret && i<cfi->numchips; i++) {
2039                chip = &cfi->chips[i];
2040
2041                mutex_lock(&chip->mutex);
2042                ret = get_chip(map, chip, chip->start, FL_SYNCING);
2043
2044                if (!ret) {
2045                        chip->oldstate = chip->state;
2046                        chip->state = FL_SYNCING;
2047                        /* No need to wake_up() on this state change -
2048                         * as the whole point is that nobody can do anything
2049                         * with the chip now anyway.
2050                         */
2051                }
2052                mutex_unlock(&chip->mutex);
2053        }
2054
2055        /* Unlock the chips again */
2056
2057        for (i--; i >=0; i--) {
2058                chip = &cfi->chips[i];
2059
2060                mutex_lock(&chip->mutex);
2061
2062                if (chip->state == FL_SYNCING) {
2063                        chip->state = chip->oldstate;
2064                        chip->oldstate = FL_READY;
2065                        wake_up(&chip->wq);
2066                }
2067                mutex_unlock(&chip->mutex);
2068        }
2069}
2070
2071static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2072                                                struct flchip *chip,
2073                                                unsigned long adr,
2074                                                int len, void *thunk)
2075{
2076        struct cfi_private *cfi = map->fldrv_priv;
2077        int status, ofs_factor = cfi->interleave * cfi->device_type;
2078
2079        adr += chip->start;
2080        xip_disable(map, chip, adr+(2*ofs_factor));
2081        map_write(map, CMD(0x90), adr+(2*ofs_factor));
2082        chip->state = FL_JEDEC_QUERY;
2083        status = cfi_read_query(map, adr+(2*ofs_factor));
2084        xip_enable(map, chip, 0);
2085        return status;
2086}
2087
2088#ifdef DEBUG_LOCK_BITS
2089static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2090                                                struct flchip *chip,
2091                                                unsigned long adr,
2092                                                int len, void *thunk)
2093{
2094        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2095               adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2096        return 0;
2097}
2098#endif
2099
2100#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2101#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2102
2103static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2104                                       unsigned long adr, int len, void *thunk)
2105{
2106        struct cfi_private *cfi = map->fldrv_priv;
2107        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2108        int mdelay;
2109        int ret;
2110
2111        adr += chip->start;
2112
2113        mutex_lock(&chip->mutex);
2114        ret = get_chip(map, chip, adr, FL_LOCKING);
2115        if (ret) {
2116                mutex_unlock(&chip->mutex);
2117                return ret;
2118        }
2119
2120        ENABLE_VPP(map);
2121        xip_disable(map, chip, adr);
2122
2123        map_write(map, CMD(0x60), adr);
2124        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2125                map_write(map, CMD(0x01), adr);
2126                chip->state = FL_LOCKING;
2127        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2128                map_write(map, CMD(0xD0), adr);
2129                chip->state = FL_UNLOCKING;
2130        } else
2131                BUG();
2132
2133        /*
2134         * If Instant Individual Block Locking supported then no need
2135         * to delay.
2136         */
2137        /*
2138         * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2139         * lets use a max of 1.5 seconds (1500ms) as timeout.
2140         *
2141         * See "Clear Block Lock-Bits Time" on page 40 in
2142         * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2143         * from February 2003
2144         */
2145        mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2146
2147        ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2148        if (ret) {
2149                map_write(map, CMD(0x70), adr);
2150                chip->state = FL_STATUS;
2151                xip_enable(map, chip, adr);
2152                printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2153                goto out;
2154        }
2155
2156        xip_enable(map, chip, adr);
2157 out:   DISABLE_VPP(map);
2158        put_chip(map, chip, adr);
2159        mutex_unlock(&chip->mutex);
2160        return ret;
2161}
2162
2163static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2164{
2165        int ret;
2166
2167#ifdef DEBUG_LOCK_BITS
2168        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2169               __func__, ofs, len);
2170        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2171                ofs, len, NULL);
2172#endif
2173
2174        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2175                ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2176
2177#ifdef DEBUG_LOCK_BITS
2178        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2179               __func__, ret);
2180        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2181                ofs, len, NULL);
2182#endif
2183
2184        return ret;
2185}
2186
2187static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2188{
2189        int ret;
2190
2191#ifdef DEBUG_LOCK_BITS
2192        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2193               __func__, ofs, len);
2194        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2195                ofs, len, NULL);
2196#endif
2197
2198        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2199                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2200
2201#ifdef DEBUG_LOCK_BITS
2202        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2203               __func__, ret);
2204        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2205                ofs, len, NULL);
2206#endif
2207
2208        return ret;
2209}
2210
2211static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2212                                  uint64_t len)
2213{
2214        return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2215                                ofs, len, NULL) ? 1 : 0;
2216}
2217
2218#ifdef CONFIG_MTD_OTP
2219
2220typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2221                        u_long data_offset, u_char *buf, u_int size,
2222                        u_long prot_offset, u_int groupno, u_int groupsize);
2223
2224static int __xipram
2225do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2226            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2227{
2228        struct cfi_private *cfi = map->fldrv_priv;
2229        int ret;
2230
2231        mutex_lock(&chip->mutex);
2232        ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2233        if (ret) {
2234                mutex_unlock(&chip->mutex);
2235                return ret;
2236        }
2237
2238        /* let's ensure we're not reading back cached data from array mode */
2239        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2240
2241        xip_disable(map, chip, chip->start);
2242        if (chip->state != FL_JEDEC_QUERY) {
2243                map_write(map, CMD(0x90), chip->start);
2244                chip->state = FL_JEDEC_QUERY;
2245        }
2246        map_copy_from(map, buf, chip->start + offset, size);
2247        xip_enable(map, chip, chip->start);
2248
2249        /* then ensure we don't keep OTP data in the cache */
2250        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2251
2252        put_chip(map, chip, chip->start);
2253        mutex_unlock(&chip->mutex);
2254        return 0;
2255}
2256
2257static int
2258do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2259             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2260{
2261        int ret;
2262
2263        while (size) {
2264                unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2265                int gap = offset - bus_ofs;
2266                int n = min_t(int, size, map_bankwidth(map)-gap);
2267                map_word datum = map_word_ff(map);
2268
2269                datum = map_word_load_partial(map, datum, buf, gap, n);
2270                ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2271                if (ret)
2272                        return ret;
2273
2274                offset += n;
2275                buf += n;
2276                size -= n;
2277        }
2278
2279        return 0;
2280}
2281
2282static int
2283do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2284            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2285{
2286        struct cfi_private *cfi = map->fldrv_priv;
2287        map_word datum;
2288
2289        /* make sure area matches group boundaries */
2290        if (size != grpsz)
2291                return -EXDEV;
2292
2293        datum = map_word_ff(map);
2294        datum = map_word_clr(map, datum, CMD(1 << grpno));
2295        return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2296}
2297
2298static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2299                                 size_t *retlen, u_char *buf,
2300                                 otp_op_t action, int user_regs)
2301{
2302        struct map_info *map = mtd->priv;
2303        struct cfi_private *cfi = map->fldrv_priv;
2304        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2305        struct flchip *chip;
2306        struct cfi_intelext_otpinfo *otp;
2307        u_long devsize, reg_prot_offset, data_offset;
2308        u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2309        u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2310        int ret;
2311
2312        *retlen = 0;
2313
2314        /* Check that we actually have some OTP registers */
2315        if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2316                return -ENODATA;
2317
2318        /* we need real chips here not virtual ones */
2319        devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2320        chip_step = devsize >> cfi->chipshift;
2321        chip_num = 0;
2322
2323        /* Some chips have OTP located in the _top_ partition only.
2324           For example: Intel 28F256L18T (T means top-parameter device) */
2325        if (cfi->mfr == CFI_MFR_INTEL) {
2326                switch (cfi->id) {
2327                case 0x880b:
2328                case 0x880c:
2329                case 0x880d:
2330                        chip_num = chip_step - 1;
2331                }
2332        }
2333
2334        for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2335                chip = &cfi->chips[chip_num];
2336                otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2337
2338                /* first OTP region */
2339                field = 0;
2340                reg_prot_offset = extp->ProtRegAddr;
2341                reg_fact_groups = 1;
2342                reg_fact_size = 1 << extp->FactProtRegSize;
2343                reg_user_groups = 1;
2344                reg_user_size = 1 << extp->UserProtRegSize;
2345
2346                while (len > 0) {
2347                        /* flash geometry fixup */
2348                        data_offset = reg_prot_offset + 1;
2349                        data_offset *= cfi->interleave * cfi->device_type;
2350                        reg_prot_offset *= cfi->interleave * cfi->device_type;
2351                        reg_fact_size *= cfi->interleave;
2352                        reg_user_size *= cfi->interleave;
2353
2354                        if (user_regs) {
2355                                groups = reg_user_groups;
2356                                groupsize = reg_user_size;
2357                                /* skip over factory reg area */
2358                                groupno = reg_fact_groups;
2359                                data_offset += reg_fact_groups * reg_fact_size;
2360                        } else {
2361                                groups = reg_fact_groups;
2362                                groupsize = reg_fact_size;
2363                                groupno = 0;
2364                        }
2365
2366                        while (len > 0 && groups > 0) {
2367                                if (!action) {
2368                                        /*
2369                                         * Special case: if action is NULL
2370                                         * we fill buf with otp_info records.
2371                                         */
2372                                        struct otp_info *otpinfo;
2373                                        map_word lockword;
2374                                        len -= sizeof(struct otp_info);
2375                                        if (len <= 0)
2376                                                return -ENOSPC;
2377                                        ret = do_otp_read(map, chip,
2378                                                          reg_prot_offset,
2379                                                          (u_char *)&lockword,
2380                                                          map_bankwidth(map),
2381                                                          0, 0,  0);
2382                                        if (ret)
2383                                                return ret;
2384                                        otpinfo = (struct otp_info *)buf;
2385                                        otpinfo->start = from;
2386                                        otpinfo->length = groupsize;
2387                                        otpinfo->locked =
2388                                           !map_word_bitsset(map, lockword,
2389                                                             CMD(1 << groupno));
2390                                        from += groupsize;
2391                                        buf += sizeof(*otpinfo);
2392                                        *retlen += sizeof(*otpinfo);
2393                                } else if (from >= groupsize) {
2394                                        from -= groupsize;
2395                                        data_offset += groupsize;
2396                                } else {
2397                                        int size = groupsize;
2398                                        data_offset += from;
2399                                        size -= from;
2400                                        from = 0;
2401                                        if (size > len)
2402                                                size = len;
2403                                        ret = action(map, chip, data_offset,
2404                                                     buf, size, reg_prot_offset,
2405                                                     groupno, groupsize);
2406                                        if (ret < 0)
2407                                                return ret;
2408                                        buf += size;
2409                                        len -= size;
2410                                        *retlen += size;
2411                                        data_offset += size;
2412                                }
2413                                groupno++;
2414                                groups--;
2415                        }
2416
2417                        /* next OTP region */
2418                        if (++field == extp->NumProtectionFields)
2419                                break;
2420                        reg_prot_offset = otp->ProtRegAddr;
2421                        reg_fact_groups = otp->FactGroups;
2422                        reg_fact_size = 1 << otp->FactProtRegSize;
2423                        reg_user_groups = otp->UserGroups;
2424                        reg_user_size = 1 << otp->UserProtRegSize;
2425                        otp++;
2426                }
2427        }
2428
2429        return 0;
2430}
2431
2432static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2433                                           size_t len, size_t *retlen,
2434                                            u_char *buf)
2435{
2436        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2437                                     buf, do_otp_read, 0);
2438}
2439
2440static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2441                                           size_t len, size_t *retlen,
2442                                            u_char *buf)
2443{
2444        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2445                                     buf, do_otp_read, 1);
2446}
2447
2448static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2449                                            size_t len, size_t *retlen,
2450                                             u_char *buf)
2451{
2452        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2453                                     buf, do_otp_write, 1);
2454}
2455
2456static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2457                                           loff_t from, size_t len)
2458{
2459        size_t retlen;
2460        return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2461                                     NULL, do_otp_lock, 1);
2462}
2463
2464static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2465                                           size_t *retlen, struct otp_info *buf)
2466
2467{
2468        return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2469                                     NULL, 0);
2470}
2471
2472static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2473                                           size_t *retlen, struct otp_info *buf)
2474{
2475        return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2476                                     NULL, 1);
2477}
2478
2479#endif
2480
2481static void cfi_intelext_save_locks(struct mtd_info *mtd)
2482{
2483        struct mtd_erase_region_info *region;
2484        int block, status, i;
2485        unsigned long adr;
2486        size_t len;
2487
2488        for (i = 0; i < mtd->numeraseregions; i++) {
2489                region = &mtd->eraseregions[i];
2490                if (!region->lockmap)
2491                        continue;
2492
2493                for (block = 0; block < region->numblocks; block++){
2494                        len = region->erasesize;
2495                        adr = region->offset + block * len;
2496
2497                        status = cfi_varsize_frob(mtd,
2498                                        do_getlockstatus_oneblock, adr, len, NULL);
2499                        if (status)
2500                                set_bit(block, region->lockmap);
2501                        else
2502                                clear_bit(block, region->lockmap);
2503                }
2504        }
2505}
2506
2507static int cfi_intelext_suspend(struct mtd_info *mtd)
2508{
2509        struct map_info *map = mtd->priv;
2510        struct cfi_private *cfi = map->fldrv_priv;
2511        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2512        int i;
2513        struct flchip *chip;
2514        int ret = 0;
2515
2516        if ((mtd->flags & MTD_POWERUP_LOCK)
2517            && extp && (extp->FeatureSupport & (1 << 5)))
2518                cfi_intelext_save_locks(mtd);
2519
2520        for (i=0; !ret && i<cfi->numchips; i++) {
2521                chip = &cfi->chips[i];
2522
2523                mutex_lock(&chip->mutex);
2524
2525                switch (chip->state) {
2526                case FL_READY:
2527                case FL_STATUS:
2528                case FL_CFI_QUERY:
2529                case FL_JEDEC_QUERY:
2530                        if (chip->oldstate == FL_READY) {
2531                                /* place the chip in a known state before suspend */
2532                                map_write(map, CMD(0xFF), cfi->chips[i].start);
2533                                chip->oldstate = chip->state;
2534                                chip->state = FL_PM_SUSPENDED;
2535                                /* No need to wake_up() on this state change -
2536                                 * as the whole point is that nobody can do anything
2537                                 * with the chip now anyway.
2538                                 */
2539                        } else {
2540                                /* There seems to be an operation pending. We must wait for it. */
2541                                printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2542                                ret = -EAGAIN;
2543                        }
2544                        break;
2545                default:
2546                        /* Should we actually wait? Once upon a time these routines weren't
2547                           allowed to. Or should we return -EAGAIN, because the upper layers
2548                           ought to have already shut down anything which was using the device
2549                           anyway? The latter for now. */
2550                        printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2551                        ret = -EAGAIN;
2552                case FL_PM_SUSPENDED:
2553                        break;
2554                }
2555                mutex_unlock(&chip->mutex);
2556        }
2557
2558        /* Unlock the chips again */
2559
2560        if (ret) {
2561                for (i--; i >=0; i--) {
2562                        chip = &cfi->chips[i];
2563
2564                        mutex_lock(&chip->mutex);
2565
2566                        if (chip->state == FL_PM_SUSPENDED) {
2567                                /* No need to force it into a known state here,
2568                                   because we're returning failure, and it didn't
2569                                   get power cycled */
2570                                chip->state = chip->oldstate;
2571                                chip->oldstate = FL_READY;
2572                                wake_up(&chip->wq);
2573                        }
2574                        mutex_unlock(&chip->mutex);
2575                }
2576        }
2577
2578        return ret;
2579}
2580
2581static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2582{
2583        struct mtd_erase_region_info *region;
2584        int block, i;
2585        unsigned long adr;
2586        size_t len;
2587
2588        for (i = 0; i < mtd->numeraseregions; i++) {
2589                region = &mtd->eraseregions[i];
2590                if (!region->lockmap)
2591                        continue;
2592
2593                for_each_clear_bit(block, region->lockmap, region->numblocks) {
2594                        len = region->erasesize;
2595                        adr = region->offset + block * len;
2596                        cfi_intelext_unlock(mtd, adr, len);
2597                }
2598        }
2599}
2600
2601static void cfi_intelext_resume(struct mtd_info *mtd)
2602{
2603        struct map_info *map = mtd->priv;
2604        struct cfi_private *cfi = map->fldrv_priv;
2605        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2606        int i;
2607        struct flchip *chip;
2608
2609        for (i=0; i<cfi->numchips; i++) {
2610
2611                chip = &cfi->chips[i];
2612
2613                mutex_lock(&chip->mutex);
2614
2615                /* Go to known state. Chip may have been power cycled */
2616                if (chip->state == FL_PM_SUSPENDED) {
2617                        /* Refresh LH28F640BF Partition Config. Register */
2618                        fixup_LH28F640BF(mtd);
2619                        map_write(map, CMD(0xFF), cfi->chips[i].start);
2620                        chip->oldstate = chip->state = FL_READY;
2621                        wake_up(&chip->wq);
2622                }
2623
2624                mutex_unlock(&chip->mutex);
2625        }
2626
2627        if ((mtd->flags & MTD_POWERUP_LOCK)
2628            && extp && (extp->FeatureSupport & (1 << 5)))
2629                cfi_intelext_restore_locks(mtd);
2630}
2631
2632static int cfi_intelext_reset(struct mtd_info *mtd)
2633{
2634        struct map_info *map = mtd->priv;
2635        struct cfi_private *cfi = map->fldrv_priv;
2636        int i, ret;
2637
2638        for (i=0; i < cfi->numchips; i++) {
2639                struct flchip *chip = &cfi->chips[i];
2640
2641                /* force the completion of any ongoing operation
2642                   and switch to array mode so any bootloader in
2643                   flash is accessible for soft reboot. */
2644                mutex_lock(&chip->mutex);
2645                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2646                if (!ret) {
2647                        map_write(map, CMD(0xff), chip->start);
2648                        chip->state = FL_SHUTDOWN;
2649                        put_chip(map, chip, chip->start);
2650                }
2651                mutex_unlock(&chip->mutex);
2652        }
2653
2654        return 0;
2655}
2656
2657static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2658                               void *v)
2659{
2660        struct mtd_info *mtd;
2661
2662        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2663        cfi_intelext_reset(mtd);
2664        return NOTIFY_DONE;
2665}
2666
2667static void cfi_intelext_destroy(struct mtd_info *mtd)
2668{
2669        struct map_info *map = mtd->priv;
2670        struct cfi_private *cfi = map->fldrv_priv;
2671        struct mtd_erase_region_info *region;
2672        int i;
2673        cfi_intelext_reset(mtd);
2674        unregister_reboot_notifier(&mtd->reboot_notifier);
2675        kfree(cfi->cmdset_priv);
2676        kfree(cfi->cfiq);
2677        kfree(cfi->chips[0].priv);
2678        kfree(cfi);
2679        for (i = 0; i < mtd->numeraseregions; i++) {
2680                region = &mtd->eraseregions[i];
2681                kfree(region->lockmap);
2682        }
2683        kfree(mtd->eraseregions);
2684}
2685
2686MODULE_LICENSE("GPL");
2687MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2688MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2689MODULE_ALIAS("cfi_cmdset_0003");
2690MODULE_ALIAS("cfi_cmdset_0200");
2691