linux/drivers/mtd/chips/cfi_cmdset_0001.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
   8 *
   9 *
  10 * 10/10/2000   Nicolas Pitre <nico@cam.org>
  11 *      - completely revamped method functions so they are aware and
  12 *        independent of the flash geometry (buswidth, interleave, etc.)
  13 *      - scalability vs code size is completely set at compile-time
  14 *        (see include/linux/mtd/cfi.h for selection)
  15 *      - optimized write buffer method
  16 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  17 *      - reworked lock/unlock/erase support for var size flash
  18 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  19 *      - auto unlock sectors on resume for auto locking flash on power up
  20 */
  21
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/kernel.h>
  25#include <linux/sched.h>
  26#include <linux/init.h>
  27#include <asm/io.h>
  28#include <asm/byteorder.h>
  29
  30#include <linux/errno.h>
  31#include <linux/slab.h>
  32#include <linux/delay.h>
  33#include <linux/interrupt.h>
  34#include <linux/reboot.h>
  35#include <linux/bitmap.h>
  36#include <linux/mtd/xip.h>
  37#include <linux/mtd/map.h>
  38#include <linux/mtd/mtd.h>
  39#include <linux/mtd/compatmac.h>
  40#include <linux/mtd/cfi.h>
  41
  42/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  43/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  44
  45// debugging, turns off buffer write mode if set to 1
  46#define FORCE_WORD_WRITE 0
  47
  48#define MANUFACTURER_INTEL      0x0089
  49#define I82802AB        0x00ad
  50#define I82802AC        0x00ac
  51#define MANUFACTURER_ST         0x0020
  52#define M50LPW080       0x002F
  53
  54static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  55static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  56static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  57static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  58static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  59static void cfi_intelext_sync (struct mtd_info *);
  60static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
  61static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
  62#ifdef CONFIG_MTD_OTP
  63static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  64static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  65static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  66static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  67static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
  68                                            struct otp_info *, size_t);
  69static int cfi_intelext_get_user_prot_info (struct mtd_info *,
  70                                            struct otp_info *, size_t);
  71#endif
  72static int cfi_intelext_suspend (struct mtd_info *);
  73static void cfi_intelext_resume (struct mtd_info *);
  74static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  75
  76static void cfi_intelext_destroy(struct mtd_info *);
  77
  78struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  79
  80static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  81static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  82
  83static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  84                     size_t *retlen, u_char **mtdbuf);
  85static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
  86                        size_t len);
  87
  88static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  89static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  90static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  91#include "fwh_lock.h"
  92
  93
  94
  95/*
  96 *  *********** SETUP AND PROBE BITS  ***********
  97 */
  98
  99static struct mtd_chip_driver cfi_intelext_chipdrv = {
 100        .probe          = NULL, /* Not usable directly */
 101        .destroy        = cfi_intelext_destroy,
 102        .name           = "cfi_cmdset_0001",
 103        .module         = THIS_MODULE
 104};
 105
 106/* #define DEBUG_LOCK_BITS */
 107/* #define DEBUG_CFI_FEATURES */
 108
 109#ifdef DEBUG_CFI_FEATURES
 110static void cfi_tell_features(struct cfi_pri_intelext *extp)
 111{
 112        int i;
 113        printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 114        printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 115        printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 116        printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 117        printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 118        printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 119        printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 120        printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 121        printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 122        printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 123        printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 124        printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 125        printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 126        for (i=11; i<32; i++) {
 127                if (extp->FeatureSupport & (1<<i))
 128                        printk("     - Unknown Bit %X:      supported\n", i);
 129        }
 130
 131        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 132        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 133        for (i=1; i<8; i++) {
 134                if (extp->SuspendCmdSupport & (1<<i))
 135                        printk("     - Unknown Bit %X:               supported\n", i);
 136        }
 137
 138        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 139        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 140        printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 141        for (i=2; i<3; i++) {
 142                if (extp->BlkStatusRegMask & (1<<i))
 143                        printk("     - Unknown Bit %X Active: yes\n",i);
 144        }
 145        printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 146        printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 147        for (i=6; i<16; i++) {
 148                if (extp->BlkStatusRegMask & (1<<i))
 149                        printk("     - Unknown Bit %X Active: yes\n",i);
 150        }
 151
 152        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 153               extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 154        if (extp->VppOptimal)
 155                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 156                       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 157}
 158#endif
 159
 160#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 161/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 162static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
 163{
 164        struct map_info *map = mtd->priv;
 165        struct cfi_private *cfi = map->fldrv_priv;
 166        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 167
 168        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 169                            "erase on write disabled.\n");
 170        extp->SuspendCmdSupport &= ~1;
 171}
 172#endif
 173
 174#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 175static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
 176{
 177        struct map_info *map = mtd->priv;
 178        struct cfi_private *cfi = map->fldrv_priv;
 179        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 180
 181        if (cfip && (cfip->FeatureSupport&4)) {
 182                cfip->FeatureSupport &= ~4;
 183                printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 184        }
 185}
 186#endif
 187
 188static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
 189{
 190        struct map_info *map = mtd->priv;
 191        struct cfi_private *cfi = map->fldrv_priv;
 192
 193        cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
 194        cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
 195}
 196
 197static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
 198{
 199        struct map_info *map = mtd->priv;
 200        struct cfi_private *cfi = map->fldrv_priv;
 201
 202        /* Note this is done after the region info is endian swapped */
 203        cfi->cfiq->EraseRegionInfo[1] =
 204                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 205};
 206
 207static void fixup_use_point(struct mtd_info *mtd, void *param)
 208{
 209        struct map_info *map = mtd->priv;
 210        if (!mtd->point && map_is_linear(map)) {
 211                mtd->point   = cfi_intelext_point;
 212                mtd->unpoint = cfi_intelext_unpoint;
 213        }
 214}
 215
 216static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
 217{
 218        struct map_info *map = mtd->priv;
 219        struct cfi_private *cfi = map->fldrv_priv;
 220        if (cfi->cfiq->BufWriteTimeoutTyp) {
 221                printk(KERN_INFO "Using buffer write method\n" );
 222                mtd->write = cfi_intelext_write_buffers;
 223                mtd->writev = cfi_intelext_writev;
 224        }
 225}
 226
 227/*
 228 * Some chips power-up with all sectors locked by default.
 229 */
 230static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
 231{
 232        printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 233        mtd->flags |= MTD_STUPID_LOCK;
 234}
 235
 236static struct cfi_fixup cfi_fixup_table[] = {
 237#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 238        { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
 239#endif
 240#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 241        { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
 242#endif
 243#if !FORCE_WORD_WRITE
 244        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
 245#endif
 246        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
 247        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
 248        { MANUFACTURER_INTEL, 0x891c,         fixup_use_powerup_lock, NULL, },
 249        { 0, 0, NULL, NULL }
 250};
 251
 252static struct cfi_fixup jedec_fixup_table[] = {
 253        { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
 254        { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
 255        { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
 256        { 0, 0, NULL, NULL }
 257};
 258static struct cfi_fixup fixup_table[] = {
 259        /* The CFI vendor ids and the JEDEC vendor IDs appear
 260         * to be common.  It is like the devices id's are as
 261         * well.  This table is to pick all cases where
 262         * we know that is the case.
 263         */
 264        { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
 265        { 0, 0, NULL, NULL }
 266};
 267
 268static inline struct cfi_pri_intelext *
 269read_pri_intelext(struct map_info *map, __u16 adr)
 270{
 271        struct cfi_pri_intelext *extp;
 272        unsigned int extp_size = sizeof(*extp);
 273
 274 again:
 275        extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 276        if (!extp)
 277                return NULL;
 278
 279        if (extp->MajorVersion != '1' ||
 280            (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
 281                printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 282                       "version %c.%c.\n",  extp->MajorVersion,
 283                       extp->MinorVersion);
 284                kfree(extp);
 285                return NULL;
 286        }
 287
 288        /* Do some byteswapping if necessary */
 289        extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 290        extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 291        extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 292
 293        if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
 294                unsigned int extra_size = 0;
 295                int nb_parts, i;
 296
 297                /* Protection Register info */
 298                extra_size += (extp->NumProtectionFields - 1) *
 299                              sizeof(struct cfi_intelext_otpinfo);
 300
 301                /* Burst Read info */
 302                extra_size += 2;
 303                if (extp_size < sizeof(*extp) + extra_size)
 304                        goto need_more;
 305                extra_size += extp->extra[extra_size-1];
 306
 307                /* Number of hardware-partitions */
 308                extra_size += 1;
 309                if (extp_size < sizeof(*extp) + extra_size)
 310                        goto need_more;
 311                nb_parts = extp->extra[extra_size - 1];
 312
 313                /* skip the sizeof(partregion) field in CFI 1.4 */
 314                if (extp->MinorVersion >= '4')
 315                        extra_size += 2;
 316
 317                for (i = 0; i < nb_parts; i++) {
 318                        struct cfi_intelext_regioninfo *rinfo;
 319                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 320                        extra_size += sizeof(*rinfo);
 321                        if (extp_size < sizeof(*extp) + extra_size)
 322                                goto need_more;
 323                        rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 324                        extra_size += (rinfo->NumBlockTypes - 1)
 325                                      * sizeof(struct cfi_intelext_blockinfo);
 326                }
 327
 328                if (extp->MinorVersion >= '4')
 329                        extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 330
 331                if (extp_size < sizeof(*extp) + extra_size) {
 332                        need_more:
 333                        extp_size = sizeof(*extp) + extra_size;
 334                        kfree(extp);
 335                        if (extp_size > 4096) {
 336                                printk(KERN_ERR
 337                                        "%s: cfi_pri_intelext is too fat\n",
 338                                        __FUNCTION__);
 339                                return NULL;
 340                        }
 341                        goto again;
 342                }
 343        }
 344
 345        return extp;
 346}
 347
 348struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 349{
 350        struct cfi_private *cfi = map->fldrv_priv;
 351        struct mtd_info *mtd;
 352        int i;
 353
 354        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 355        if (!mtd) {
 356                printk(KERN_ERR "Failed to allocate memory for MTD device\n");
 357                return NULL;
 358        }
 359        mtd->priv = map;
 360        mtd->type = MTD_NORFLASH;
 361
 362        /* Fill in the default mtd operations */
 363        mtd->erase   = cfi_intelext_erase_varsize;
 364        mtd->read    = cfi_intelext_read;
 365        mtd->write   = cfi_intelext_write_words;
 366        mtd->sync    = cfi_intelext_sync;
 367        mtd->lock    = cfi_intelext_lock;
 368        mtd->unlock  = cfi_intelext_unlock;
 369        mtd->suspend = cfi_intelext_suspend;
 370        mtd->resume  = cfi_intelext_resume;
 371        mtd->flags   = MTD_CAP_NORFLASH;
 372        mtd->name    = map->name;
 373        mtd->writesize = 1;
 374
 375        mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 376
 377        if (cfi->cfi_mode == CFI_MODE_CFI) {
 378                /*
 379                 * It's a real CFI chip, not one for which the probe
 380                 * routine faked a CFI structure. So we read the feature
 381                 * table from it.
 382                 */
 383                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 384                struct cfi_pri_intelext *extp;
 385
 386                extp = read_pri_intelext(map, adr);
 387                if (!extp) {
 388                        kfree(mtd);
 389                        return NULL;
 390                }
 391
 392                /* Install our own private info structure */
 393                cfi->cmdset_priv = extp;
 394
 395                cfi_fixup(mtd, cfi_fixup_table);
 396
 397#ifdef DEBUG_CFI_FEATURES
 398                /* Tell the user about it in lots of lovely detail */
 399                cfi_tell_features(extp);
 400#endif
 401
 402                if(extp->SuspendCmdSupport & 1) {
 403                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 404                }
 405        }
 406        else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 407                /* Apply jedec specific fixups */
 408                cfi_fixup(mtd, jedec_fixup_table);
 409        }
 410        /* Apply generic fixups */
 411        cfi_fixup(mtd, fixup_table);
 412
 413        for (i=0; i< cfi->numchips; i++) {
 414                if (cfi->cfiq->WordWriteTimeoutTyp)
 415                        cfi->chips[i].word_write_time =
 416                                1<<cfi->cfiq->WordWriteTimeoutTyp;
 417                else
 418                        cfi->chips[i].word_write_time = 50000;
 419
 420                if (cfi->cfiq->BufWriteTimeoutTyp)
 421                        cfi->chips[i].buffer_write_time =
 422                                1<<cfi->cfiq->BufWriteTimeoutTyp;
 423                /* No default; if it isn't specified, we won't use it */
 424
 425                if (cfi->cfiq->BlockEraseTimeoutTyp)
 426                        cfi->chips[i].erase_time =
 427                                1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 428                else
 429                        cfi->chips[i].erase_time = 2000000;
 430
 431                cfi->chips[i].ref_point_counter = 0;
 432                init_waitqueue_head(&(cfi->chips[i].wq));
 433        }
 434
 435        map->fldrv = &cfi_intelext_chipdrv;
 436
 437        return cfi_intelext_setup(mtd);
 438}
 439struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 440struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 441EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 442EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 443EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 444
 445static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 446{
 447        struct map_info *map = mtd->priv;
 448        struct cfi_private *cfi = map->fldrv_priv;
 449        unsigned long offset = 0;
 450        int i,j;
 451        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 452
 453        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 454
 455        mtd->size = devsize * cfi->numchips;
 456
 457        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 458        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 459                        * mtd->numeraseregions, GFP_KERNEL);
 460        if (!mtd->eraseregions) {
 461                printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
 462                goto setup_err;
 463        }
 464
 465        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 466                unsigned long ernum, ersize;
 467                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 468                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 469
 470                if (mtd->erasesize < ersize) {
 471                        mtd->erasesize = ersize;
 472                }
 473                for (j=0; j<cfi->numchips; j++) {
 474                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 475                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 476                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 477                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 478                }
 479                offset += (ersize * ernum);
 480        }
 481
 482        if (offset != devsize) {
 483                /* Argh */
 484                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 485                goto setup_err;
 486        }
 487
 488        for (i=0; i<mtd->numeraseregions;i++){
 489                printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
 490                       i,mtd->eraseregions[i].offset,
 491                       mtd->eraseregions[i].erasesize,
 492                       mtd->eraseregions[i].numblocks);
 493        }
 494
 495#ifdef CONFIG_MTD_OTP
 496        mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 497        mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 498        mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 499        mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 500        mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 501        mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
 502#endif
 503
 504        /* This function has the potential to distort the reality
 505           a bit and therefore should be called last. */
 506        if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 507                goto setup_err;
 508
 509        __module_get(THIS_MODULE);
 510        register_reboot_notifier(&mtd->reboot_notifier);
 511        return mtd;
 512
 513 setup_err:
 514        if(mtd) {
 515                kfree(mtd->eraseregions);
 516                kfree(mtd);
 517        }
 518        kfree(cfi->cmdset_priv);
 519        return NULL;
 520}
 521
 522static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 523                                        struct cfi_private **pcfi)
 524{
 525        struct map_info *map = mtd->priv;
 526        struct cfi_private *cfi = *pcfi;
 527        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 528
 529        /*
 530         * Probing of multi-partition flash chips.
 531         *
 532         * To support multiple partitions when available, we simply arrange
 533         * for each of them to have their own flchip structure even if they
 534         * are on the same physical chip.  This means completely recreating
 535         * a new cfi_private structure right here which is a blatent code
 536         * layering violation, but this is still the least intrusive
 537         * arrangement at this point. This can be rearranged in the future
 538         * if someone feels motivated enough.  --nico
 539         */
 540        if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 541            && extp->FeatureSupport & (1 << 9)) {
 542                struct cfi_private *newcfi;
 543                struct flchip *chip;
 544                struct flchip_shared *shared;
 545                int offs, numregions, numparts, partshift, numvirtchips, i, j;
 546
 547                /* Protection Register info */
 548                offs = (extp->NumProtectionFields - 1) *
 549                       sizeof(struct cfi_intelext_otpinfo);
 550
 551                /* Burst Read info */
 552                offs += extp->extra[offs+1]+2;
 553
 554                /* Number of partition regions */
 555                numregions = extp->extra[offs];
 556                offs += 1;
 557
 558                /* skip the sizeof(partregion) field in CFI 1.4 */
 559                if (extp->MinorVersion >= '4')
 560                        offs += 2;
 561
 562                /* Number of hardware partitions */
 563                numparts = 0;
 564                for (i = 0; i < numregions; i++) {
 565                        struct cfi_intelext_regioninfo *rinfo;
 566                        rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 567                        numparts += rinfo->NumIdentPartitions;
 568                        offs += sizeof(*rinfo)
 569                                + (rinfo->NumBlockTypes - 1) *
 570                                  sizeof(struct cfi_intelext_blockinfo);
 571                }
 572
 573                /* Programming Region info */
 574                if (extp->MinorVersion >= '4') {
 575                        struct cfi_intelext_programming_regioninfo *prinfo;
 576                        prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 577                        mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 578                        mtd->flags &= ~MTD_BIT_WRITEABLE;
 579                        printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 580                               map->name, mtd->writesize,
 581                               cfi->interleave * prinfo->ControlValid,
 582                               cfi->interleave * prinfo->ControlInvalid);
 583                }
 584
 585                /*
 586                 * All functions below currently rely on all chips having
 587                 * the same geometry so we'll just assume that all hardware
 588                 * partitions are of the same size too.
 589                 */
 590                partshift = cfi->chipshift - __ffs(numparts);
 591
 592                if ((1 << partshift) < mtd->erasesize) {
 593                        printk( KERN_ERR
 594                                "%s: bad number of hw partitions (%d)\n",
 595                                __FUNCTION__, numparts);
 596                        return -EINVAL;
 597                }
 598
 599                numvirtchips = cfi->numchips * numparts;
 600                newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
 601                if (!newcfi)
 602                        return -ENOMEM;
 603                shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
 604                if (!shared) {
 605                        kfree(newcfi);
 606                        return -ENOMEM;
 607                }
 608                memcpy(newcfi, cfi, sizeof(struct cfi_private));
 609                newcfi->numchips = numvirtchips;
 610                newcfi->chipshift = partshift;
 611
 612                chip = &newcfi->chips[0];
 613                for (i = 0; i < cfi->numchips; i++) {
 614                        shared[i].writing = shared[i].erasing = NULL;
 615                        spin_lock_init(&shared[i].lock);
 616                        for (j = 0; j < numparts; j++) {
 617                                *chip = cfi->chips[i];
 618                                chip->start += j << partshift;
 619                                chip->priv = &shared[i];
 620                                /* those should be reset too since
 621                                   they create memory references. */
 622                                init_waitqueue_head(&chip->wq);
 623                                spin_lock_init(&chip->_spinlock);
 624                                chip->mutex = &chip->_spinlock;
 625                                chip++;
 626                        }
 627                }
 628
 629                printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 630                                  "--> %d partitions of %d KiB\n",
 631                                  map->name, cfi->numchips, cfi->interleave,
 632                                  newcfi->numchips, 1<<(newcfi->chipshift-10));
 633
 634                map->fldrv_priv = newcfi;
 635                *pcfi = newcfi;
 636                kfree(cfi);
 637        }
 638
 639        return 0;
 640}
 641
 642/*
 643 *  *********** CHIP ACCESS FUNCTIONS ***********
 644 */
 645static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 646{
 647        DECLARE_WAITQUEUE(wait, current);
 648        struct cfi_private *cfi = map->fldrv_priv;
 649        map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 650        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 651        unsigned long timeo = jiffies + HZ;
 652
 653        switch (chip->state) {
 654
 655        case FL_STATUS:
 656                for (;;) {
 657                        status = map_read(map, adr);
 658                        if (map_word_andequal(map, status, status_OK, status_OK))
 659                                break;
 660
 661                        /* At this point we're fine with write operations
 662                           in other partitions as they don't conflict. */
 663                        if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 664                                break;
 665
 666                        spin_unlock(chip->mutex);
 667                        cfi_udelay(1);
 668                        spin_lock(chip->mutex);
 669                        /* Someone else might have been playing with it. */
 670                        return -EAGAIN;
 671                }
 672
 673        case FL_READY:
 674        case FL_CFI_QUERY:
 675        case FL_JEDEC_QUERY:
 676                return 0;
 677
 678        case FL_ERASING:
 679                if (!cfip ||
 680                    !(cfip->FeatureSupport & 2) ||
 681                    !(mode == FL_READY || mode == FL_POINT ||
 682                     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 683                        goto sleep;
 684
 685
 686                /* Erase suspend */
 687                map_write(map, CMD(0xB0), adr);
 688
 689                /* If the flash has finished erasing, then 'erase suspend'
 690                 * appears to make some (28F320) flash devices switch to
 691                 * 'read' mode.  Make sure that we switch to 'read status'
 692                 * mode so we get the right data. --rmk
 693                 */
 694                map_write(map, CMD(0x70), adr);
 695                chip->oldstate = FL_ERASING;
 696                chip->state = FL_ERASE_SUSPENDING;
 697                chip->erase_suspended = 1;
 698                for (;;) {
 699                        status = map_read(map, adr);
 700                        if (map_word_andequal(map, status, status_OK, status_OK))
 701                                break;
 702
 703                        if (time_after(jiffies, timeo)) {
 704                                /* Urgh. Resume and pretend we weren't here.  */
 705                                map_write(map, CMD(0xd0), adr);
 706                                /* Make sure we're in 'read status' mode if it had finished */
 707                                map_write(map, CMD(0x70), adr);
 708                                chip->state = FL_ERASING;
 709                                chip->oldstate = FL_READY;
 710                                printk(KERN_ERR "%s: Chip not ready after erase "
 711                                       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 712                                return -EIO;
 713                        }
 714
 715                        spin_unlock(chip->mutex);
 716                        cfi_udelay(1);
 717                        spin_lock(chip->mutex);
 718                        /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 719                           So we can just loop here. */
 720                }
 721                chip->state = FL_STATUS;
 722                return 0;
 723
 724        case FL_XIP_WHILE_ERASING:
 725                if (mode != FL_READY && mode != FL_POINT &&
 726                    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 727                        goto sleep;
 728                chip->oldstate = chip->state;
 729                chip->state = FL_READY;
 730                return 0;
 731
 732        case FL_POINT:
 733                /* Only if there's no operation suspended... */
 734                if (mode == FL_READY && chip->oldstate == FL_READY)
 735                        return 0;
 736
 737        case FL_SHUTDOWN:
 738                /* The machine is rebooting now,so no one can get chip anymore */
 739                return -EIO;
 740        default:
 741        sleep:
 742                set_current_state(TASK_UNINTERRUPTIBLE);
 743                add_wait_queue(&chip->wq, &wait);
 744                spin_unlock(chip->mutex);
 745                schedule();
 746                remove_wait_queue(&chip->wq, &wait);
 747                spin_lock(chip->mutex);
 748                return -EAGAIN;
 749        }
 750}
 751
 752static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 753{
 754        int ret;
 755
 756 retry:
 757        if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
 758                           || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
 759                /*
 760                 * OK. We have possibility for contention on the write/erase
 761                 * operations which are global to the real chip and not per
 762                 * partition.  So let's fight it over in the partition which
 763                 * currently has authority on the operation.
 764                 *
 765                 * The rules are as follows:
 766                 *
 767                 * - any write operation must own shared->writing.
 768                 *
 769                 * - any erase operation must own _both_ shared->writing and
 770                 *   shared->erasing.
 771                 *
 772                 * - contention arbitration is handled in the owner's context.
 773                 *
 774                 * The 'shared' struct can be read and/or written only when
 775                 * its lock is taken.
 776                 */
 777                struct flchip_shared *shared = chip->priv;
 778                struct flchip *contender;
 779                spin_lock(&shared->lock);
 780                contender = shared->writing;
 781                if (contender && contender != chip) {
 782                        /*
 783                         * The engine to perform desired operation on this
 784                         * partition is already in use by someone else.
 785                         * Let's fight over it in the context of the chip
 786                         * currently using it.  If it is possible to suspend,
 787                         * that other partition will do just that, otherwise
 788                         * it'll happily send us to sleep.  In any case, when
 789                         * get_chip returns success we're clear to go ahead.
 790                         */
 791                        ret = spin_trylock(contender->mutex);
 792                        spin_unlock(&shared->lock);
 793                        if (!ret)
 794                                goto retry;
 795                        spin_unlock(chip->mutex);
 796                        ret = chip_ready(map, contender, contender->start, mode);
 797                        spin_lock(chip->mutex);
 798
 799                        if (ret == -EAGAIN) {
 800                                spin_unlock(contender->mutex);
 801                                goto retry;
 802                        }
 803                        if (ret) {
 804                                spin_unlock(contender->mutex);
 805                                return ret;
 806                        }
 807                        spin_lock(&shared->lock);
 808                        spin_unlock(contender->mutex);
 809                }
 810
 811                /* We now own it */
 812                shared->writing = chip;
 813                if (mode == FL_ERASING)
 814                        shared->erasing = chip;
 815                spin_unlock(&shared->lock);
 816        }
 817        ret = chip_ready(map, chip, adr, mode);
 818        if (ret == -EAGAIN)
 819                goto retry;
 820
 821        return ret;
 822}
 823
 824static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 825{
 826        struct cfi_private *cfi = map->fldrv_priv;
 827
 828        if (chip->priv) {
 829                struct flchip_shared *shared = chip->priv;
 830                spin_lock(&shared->lock);
 831                if (shared->writing == chip && chip->oldstate == FL_READY) {
 832                        /* We own the ability to write, but we're done */
 833                        shared->writing = shared->erasing;
 834                        if (shared->writing && shared->writing != chip) {
 835                                /* give back ownership to who we loaned it from */
 836                                struct flchip *loaner = shared->writing;
 837                                spin_lock(loaner->mutex);
 838                                spin_unlock(&shared->lock);
 839                                spin_unlock(chip->mutex);
 840                                put_chip(map, loaner, loaner->start);
 841                                spin_lock(chip->mutex);
 842                                spin_unlock(loaner->mutex);
 843                                wake_up(&chip->wq);
 844                                return;
 845                        }
 846                        shared->erasing = NULL;
 847                        shared->writing = NULL;
 848                } else if (shared->erasing == chip && shared->writing != chip) {
 849                        /*
 850                         * We own the ability to erase without the ability
 851                         * to write, which means the erase was suspended
 852                         * and some other partition is currently writing.
 853                         * Don't let the switch below mess things up since
 854                         * we don't have ownership to resume anything.
 855                         */
 856                        spin_unlock(&shared->lock);
 857                        wake_up(&chip->wq);
 858                        return;
 859                }
 860                spin_unlock(&shared->lock);
 861        }
 862
 863        switch(chip->oldstate) {
 864        case FL_ERASING:
 865                chip->state = chip->oldstate;
 866                /* What if one interleaved chip has finished and the
 867                   other hasn't? The old code would leave the finished
 868                   one in READY mode. That's bad, and caused -EROFS
 869                   errors to be returned from do_erase_oneblock because
 870                   that's the only bit it checked for at the time.
 871                   As the state machine appears to explicitly allow
 872                   sending the 0x70 (Read Status) command to an erasing
 873                   chip and expecting it to be ignored, that's what we
 874                   do. */
 875                map_write(map, CMD(0xd0), adr);
 876                map_write(map, CMD(0x70), adr);
 877                chip->oldstate = FL_READY;
 878                chip->state = FL_ERASING;
 879                break;
 880
 881        case FL_XIP_WHILE_ERASING:
 882                chip->state = chip->oldstate;
 883                chip->oldstate = FL_READY;
 884                break;
 885
 886        case FL_READY:
 887        case FL_STATUS:
 888        case FL_JEDEC_QUERY:
 889                /* We should really make set_vpp() count, rather than doing this */
 890                DISABLE_VPP(map);
 891                break;
 892        default:
 893                printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
 894        }
 895        wake_up(&chip->wq);
 896}
 897
 898#ifdef CONFIG_MTD_XIP
 899
 900/*
 901 * No interrupt what so ever can be serviced while the flash isn't in array
 902 * mode.  This is ensured by the xip_disable() and xip_enable() functions
 903 * enclosing any code path where the flash is known not to be in array mode.
 904 * And within a XIP disabled code path, only functions marked with __xipram
 905 * may be called and nothing else (it's a good thing to inspect generated
 906 * assembly to make sure inline functions were actually inlined and that gcc
 907 * didn't emit calls to its own support functions). Also configuring MTD CFI
 908 * support to a single buswidth and a single interleave is also recommended.
 909 */
 910
 911static void xip_disable(struct map_info *map, struct flchip *chip,
 912                        unsigned long adr)
 913{
 914        /* TODO: chips with no XIP use should ignore and return */
 915        (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 916        local_irq_disable();
 917}
 918
 919static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 920                                unsigned long adr)
 921{
 922        struct cfi_private *cfi = map->fldrv_priv;
 923        if (chip->state != FL_POINT && chip->state != FL_READY) {
 924                map_write(map, CMD(0xff), adr);
 925                chip->state = FL_READY;
 926        }
 927        (void) map_read(map, adr);
 928        xip_iprefetch();
 929        local_irq_enable();
 930}
 931
 932/*
 933 * When a delay is required for the flash operation to complete, the
 934 * xip_wait_for_operation() function is polling for both the given timeout
 935 * and pending (but still masked) hardware interrupts.  Whenever there is an
 936 * interrupt pending then the flash erase or write operation is suspended,
 937 * array mode restored and interrupts unmasked.  Task scheduling might also
 938 * happen at that point.  The CPU eventually returns from the interrupt or
 939 * the call to schedule() and the suspended flash operation is resumed for
 940 * the remaining of the delay period.
 941 *
 942 * Warning: this function _will_ fool interrupt latency tracing tools.
 943 */
 944
 945static int __xipram xip_wait_for_operation(
 946                struct map_info *map, struct flchip *chip,
 947                unsigned long adr, unsigned int chip_op_time )
 948{
 949        struct cfi_private *cfi = map->fldrv_priv;
 950        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 951        map_word status, OK = CMD(0x80);
 952        unsigned long usec, suspended, start, done;
 953        flstate_t oldstate, newstate;
 954
 955        start = xip_currtime();
 956        usec = chip_op_time * 8;
 957        if (usec == 0)
 958                usec = 500000;
 959        done = 0;
 960
 961        do {
 962                cpu_relax();
 963                if (xip_irqpending() && cfip &&
 964                    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
 965                     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
 966                    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 967                        /*
 968                         * Let's suspend the erase or write operation when
 969                         * supported.  Note that we currently don't try to
 970                         * suspend interleaved chips if there is already
 971                         * another operation suspended (imagine what happens
 972                         * when one chip was already done with the current
 973                         * operation while another chip suspended it, then
 974                         * we resume the whole thing at once).  Yes, it
 975                         * can happen!
 976                         */
 977                        usec -= done;
 978                        map_write(map, CMD(0xb0), adr);
 979                        map_write(map, CMD(0x70), adr);
 980                        suspended = xip_currtime();
 981                        do {
 982                                if (xip_elapsed_since(suspended) > 100000) {
 983                                        /*
 984                                         * The chip doesn't want to suspend
 985                                         * after waiting for 100 msecs.
 986                                         * This is a critical error but there
 987                                         * is not much we can do here.
 988                                         */
 989                                        return -EIO;
 990                                }
 991                                status = map_read(map, adr);
 992                        } while (!map_word_andequal(map, status, OK, OK));
 993
 994                        /* Suspend succeeded */
 995                        oldstate = chip->state;
 996                        if (oldstate == FL_ERASING) {
 997                                if (!map_word_bitsset(map, status, CMD(0x40)))
 998                                        break;
 999                                newstate = FL_XIP_WHILE_ERASING;
1000                                chip->erase_suspended = 1;
1001                        } else {
1002                                if (!map_word_bitsset(map, status, CMD(0x04)))
1003                                        break;
1004                                newstate = FL_XIP_WHILE_WRITING;
1005                                chip->write_suspended = 1;
1006                        }
1007                        chip->state = newstate;
1008                        map_write(map, CMD(0xff), adr);
1009                        (void) map_read(map, adr);
1010                        asm volatile (".rep 8; nop; .endr");
1011                        local_irq_enable();
1012                        spin_unlock(chip->mutex);
1013                        asm volatile (".rep 8; nop; .endr");
1014                        cond_resched();
1015
1016                        /*
1017                         * We're back.  However someone else might have
1018                         * decided to go write to the chip if we are in
1019                         * a suspended erase state.  If so let's wait
1020                         * until it's done.
1021                         */
1022                        spin_lock(chip->mutex);
1023                        while (chip->state != newstate) {
1024                                DECLARE_WAITQUEUE(wait, current);
1025                                set_current_state(TASK_UNINTERRUPTIBLE);
1026                                add_wait_queue(&chip->wq, &wait);
1027                                spin_unlock(chip->mutex);
1028                                schedule();
1029                                remove_wait_queue(&chip->wq, &wait);
1030                                spin_lock(chip->mutex);
1031                        }
1032                        /* Disallow XIP again */
1033                        local_irq_disable();
1034
1035                        /* Resume the write or erase operation */
1036                        map_write(map, CMD(0xd0), adr);
1037                        map_write(map, CMD(0x70), adr);
1038                        chip->state = oldstate;
1039                        start = xip_currtime();
1040                } else if (usec >= 1000000/HZ) {
1041                        /*
1042                         * Try to save on CPU power when waiting delay
1043                         * is at least a system timer tick period.
1044                         * No need to be extremely accurate here.
1045                         */
1046                        xip_cpu_idle();
1047                }
1048                status = map_read(map, adr);
1049                done = xip_elapsed_since(start);
1050        } while (!map_word_andequal(map, status, OK, OK)
1051                 && done < usec);
1052
1053        return (done >= usec) ? -ETIME : 0;
1054}
1055
1056/*
1057 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1058 * the flash is actively programming or erasing since we have to poll for
1059 * the operation to complete anyway.  We can't do that in a generic way with
1060 * a XIP setup so do it before the actual flash operation in this case
1061 * and stub it out from INVAL_CACHE_AND_WAIT.
1062 */
1063#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1064        INVALIDATE_CACHED_RANGE(map, from, size)
1065
1066#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1067        xip_wait_for_operation(map, chip, cmd_adr, usec)
1068
1069#else
1070
1071#define xip_disable(map, chip, adr)
1072#define xip_enable(map, chip, adr)
1073#define XIP_INVAL_CACHED_RANGE(x...)
1074#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1075
1076static int inval_cache_and_wait_for_operation(
1077                struct map_info *map, struct flchip *chip,
1078                unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1079                unsigned int chip_op_time)
1080{
1081        struct cfi_private *cfi = map->fldrv_priv;
1082        map_word status, status_OK = CMD(0x80);
1083        int chip_state = chip->state;
1084        unsigned int timeo, sleep_time;
1085
1086        spin_unlock(chip->mutex);
1087        if (inval_len)
1088                INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1089        spin_lock(chip->mutex);
1090
1091        /* set our timeout to 8 times the expected delay */
1092        timeo = chip_op_time * 8;
1093        if (!timeo)
1094                timeo = 500000;
1095        sleep_time = chip_op_time / 2;
1096
1097        for (;;) {
1098                status = map_read(map, cmd_adr);
1099                if (map_word_andequal(map, status, status_OK, status_OK))
1100                        break;
1101
1102                if (!timeo) {
1103                        map_write(map, CMD(0x70), cmd_adr);
1104                        chip->state = FL_STATUS;
1105                        return -ETIME;
1106                }
1107
1108                /* OK Still waiting. Drop the lock, wait a while and retry. */
1109                spin_unlock(chip->mutex);
1110                if (sleep_time >= 1000000/HZ) {
1111                        /*
1112                         * Half of the normal delay still remaining
1113                         * can be performed with a sleeping delay instead
1114                         * of busy waiting.
1115                         */
1116                        msleep(sleep_time/1000);
1117                        timeo -= sleep_time;
1118                        sleep_time = 1000000/HZ;
1119                } else {
1120                        udelay(1);
1121                        cond_resched();
1122                        timeo--;
1123                }
1124                spin_lock(chip->mutex);
1125
1126                while (chip->state != chip_state) {
1127                        /* Someone's suspended the operation: sleep */
1128                        DECLARE_WAITQUEUE(wait, current);
1129                        set_current_state(TASK_UNINTERRUPTIBLE);
1130                        add_wait_queue(&chip->wq, &wait);
1131                        spin_unlock(chip->mutex);
1132                        schedule();
1133                        remove_wait_queue(&chip->wq, &wait);
1134                        spin_lock(chip->mutex);
1135                }
1136        }
1137
1138        /* Done and happy. */
1139        chip->state = FL_STATUS;
1140        return 0;
1141}
1142
1143#endif
1144
1145#define WAIT_TIMEOUT(map, chip, adr, udelay) \
1146        INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1147
1148
1149static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1150{
1151        unsigned long cmd_addr;
1152        struct cfi_private *cfi = map->fldrv_priv;
1153        int ret = 0;
1154
1155        adr += chip->start;
1156
1157        /* Ensure cmd read/writes are aligned. */
1158        cmd_addr = adr & ~(map_bankwidth(map)-1);
1159
1160        spin_lock(chip->mutex);
1161
1162        ret = get_chip(map, chip, cmd_addr, FL_POINT);
1163
1164        if (!ret) {
1165                if (chip->state != FL_POINT && chip->state != FL_READY)
1166                        map_write(map, CMD(0xff), cmd_addr);
1167
1168                chip->state = FL_POINT;
1169                chip->ref_point_counter++;
1170        }
1171        spin_unlock(chip->mutex);
1172
1173        return ret;
1174}
1175
1176static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1177{
1178        struct map_info *map = mtd->priv;
1179        struct cfi_private *cfi = map->fldrv_priv;
1180        unsigned long ofs, last_end = 0;
1181        int chipnum;
1182        int ret = 0;
1183
1184        if (!map->virt || (from + len > mtd->size))
1185                return -EINVAL;
1186
1187        /* Now lock the chip(s) to POINT state */
1188
1189        /* ofs: offset within the first chip that the first read should start */
1190        chipnum = (from >> cfi->chipshift);
1191        ofs = from - (chipnum << cfi->chipshift);
1192
1193        *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1194        *retlen = 0;
1195
1196        while (len) {
1197                unsigned long thislen;
1198
1199                if (chipnum >= cfi->numchips)
1200                        break;
1201
1202                /* We cannot point across chips that are virtually disjoint */
1203                if (!last_end)
1204                        last_end = cfi->chips[chipnum].start;
1205                else if (cfi->chips[chipnum].start != last_end)
1206                        break;
1207
1208                if ((len + ofs -1) >> cfi->chipshift)
1209                        thislen = (1<<cfi->chipshift) - ofs;
1210                else
1211                        thislen = len;
1212
1213                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1214                if (ret)
1215                        break;
1216
1217                *retlen += thislen;
1218                len -= thislen;
1219
1220                ofs = 0;
1221                last_end += 1 << cfi->chipshift;
1222                chipnum++;
1223        }
1224        return 0;
1225}
1226
1227static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1228{
1229        struct map_info *map = mtd->priv;
1230        struct cfi_private *cfi = map->fldrv_priv;
1231        unsigned long ofs;
1232        int chipnum;
1233
1234        /* Now unlock the chip(s) POINT state */
1235
1236        /* ofs: offset within the first chip that the first read should start */
1237        chipnum = (from >> cfi->chipshift);
1238        ofs = from - (chipnum <<  cfi->chipshift);
1239
1240        while (len) {
1241                unsigned long thislen;
1242                struct flchip *chip;
1243
1244                chip = &cfi->chips[chipnum];
1245                if (chipnum >= cfi->numchips)
1246                        break;
1247
1248                if ((len + ofs -1) >> cfi->chipshift)
1249                        thislen = (1<<cfi->chipshift) - ofs;
1250                else
1251                        thislen = len;
1252
1253                spin_lock(chip->mutex);
1254                if (chip->state == FL_POINT) {
1255                        chip->ref_point_counter--;
1256                        if(chip->ref_point_counter == 0)
1257                                chip->state = FL_READY;
1258                } else
1259                        printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1260
1261                put_chip(map, chip, chip->start);
1262                spin_unlock(chip->mutex);
1263
1264                len -= thislen;
1265                ofs = 0;
1266                chipnum++;
1267        }
1268}
1269
1270static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1271{
1272        unsigned long cmd_addr;
1273        struct cfi_private *cfi = map->fldrv_priv;
1274        int ret;
1275
1276        adr += chip->start;
1277
1278        /* Ensure cmd read/writes are aligned. */
1279        cmd_addr = adr & ~(map_bankwidth(map)-1);
1280
1281        spin_lock(chip->mutex);
1282        ret = get_chip(map, chip, cmd_addr, FL_READY);
1283        if (ret) {
1284                spin_unlock(chip->mutex);
1285                return ret;
1286        }
1287
1288        if (chip->state != FL_POINT && chip->state != FL_READY) {
1289                map_write(map, CMD(0xff), cmd_addr);
1290
1291                chip->state = FL_READY;
1292        }
1293
1294        map_copy_from(map, buf, adr, len);
1295
1296        put_chip(map, chip, cmd_addr);
1297
1298        spin_unlock(chip->mutex);
1299        return 0;
1300}
1301
1302static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1303{
1304        struct map_info *map = mtd->priv;
1305        struct cfi_private *cfi = map->fldrv_priv;
1306        unsigned long ofs;
1307        int chipnum;
1308        int ret = 0;
1309
1310        /* ofs: offset within the first chip that the first read should start */
1311        chipnum = (from >> cfi->chipshift);
1312        ofs = from - (chipnum <<  cfi->chipshift);
1313
1314        *retlen = 0;
1315
1316        while (len) {
1317                unsigned long thislen;
1318
1319                if (chipnum >= cfi->numchips)
1320                        break;
1321
1322                if ((len + ofs -1) >> cfi->chipshift)
1323                        thislen = (1<<cfi->chipshift) - ofs;
1324                else
1325                        thislen = len;
1326
1327                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1328                if (ret)
1329                        break;
1330
1331                *retlen += thislen;
1332                len -= thislen;
1333                buf += thislen;
1334
1335                ofs = 0;
1336                chipnum++;
1337        }
1338        return ret;
1339}
1340
1341static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1342                                     unsigned long adr, map_word datum, int mode)
1343{
1344        struct cfi_private *cfi = map->fldrv_priv;
1345        map_word status, write_cmd;
1346        int ret=0;
1347
1348        adr += chip->start;
1349
1350        switch (mode) {
1351        case FL_WRITING:
1352                write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1353                break;
1354        case FL_OTP_WRITE:
1355                write_cmd = CMD(0xc0);
1356                break;
1357        default:
1358                return -EINVAL;
1359        }
1360
1361        spin_lock(chip->mutex);
1362        ret = get_chip(map, chip, adr, mode);
1363        if (ret) {
1364                spin_unlock(chip->mutex);
1365                return ret;
1366        }
1367
1368        XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1369        ENABLE_VPP(map);
1370        xip_disable(map, chip, adr);
1371        map_write(map, write_cmd, adr);
1372        map_write(map, datum, adr);
1373        chip->state = mode;
1374
1375        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1376                                   adr, map_bankwidth(map),
1377                                   chip->word_write_time);
1378        if (ret) {
1379                xip_enable(map, chip, adr);
1380                printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1381                goto out;
1382        }
1383
1384        /* check for errors */
1385        status = map_read(map, adr);
1386        if (map_word_bitsset(map, status, CMD(0x1a))) {
1387                unsigned long chipstatus = MERGESTATUS(status);
1388
1389                /* reset status */
1390                map_write(map, CMD(0x50), adr);
1391                map_write(map, CMD(0x70), adr);
1392                xip_enable(map, chip, adr);
1393
1394                if (chipstatus & 0x02) {
1395                        ret = -EROFS;
1396                } else if (chipstatus & 0x08) {
1397                        printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1398                        ret = -EIO;
1399                } else {
1400                        printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1401                        ret = -EINVAL;
1402                }
1403
1404                goto out;
1405        }
1406
1407        xip_enable(map, chip, adr);
1408 out:   put_chip(map, chip, adr);
1409        spin_unlock(chip->mutex);
1410        return ret;
1411}
1412
1413
1414static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1415{
1416        struct map_info *map = mtd->priv;
1417        struct cfi_private *cfi = map->fldrv_priv;
1418        int ret = 0;
1419        int chipnum;
1420        unsigned long ofs;
1421
1422        *retlen = 0;
1423        if (!len)
1424                return 0;
1425
1426        chipnum = to >> cfi->chipshift;
1427        ofs = to  - (chipnum << cfi->chipshift);
1428
1429        /* If it's not bus-aligned, do the first byte write */
1430        if (ofs & (map_bankwidth(map)-1)) {
1431                unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1432                int gap = ofs - bus_ofs;
1433                int n;
1434                map_word datum;
1435
1436                n = min_t(int, len, map_bankwidth(map)-gap);
1437                datum = map_word_ff(map);
1438                datum = map_word_load_partial(map, datum, buf, gap, n);
1439
1440                ret = do_write_oneword(map, &cfi->chips[chipnum],
1441                                               bus_ofs, datum, FL_WRITING);
1442                if (ret)
1443                        return ret;
1444
1445                len -= n;
1446                ofs += n;
1447                buf += n;
1448                (*retlen) += n;
1449
1450                if (ofs >> cfi->chipshift) {
1451                        chipnum ++;
1452                        ofs = 0;
1453                        if (chipnum == cfi->numchips)
1454                                return 0;
1455                }
1456        }
1457
1458        while(len >= map_bankwidth(map)) {
1459                map_word datum = map_word_load(map, buf);
1460
1461                ret = do_write_oneword(map, &cfi->chips[chipnum],
1462                                       ofs, datum, FL_WRITING);
1463                if (ret)
1464                        return ret;
1465
1466                ofs += map_bankwidth(map);
1467                buf += map_bankwidth(map);
1468                (*retlen) += map_bankwidth(map);
1469                len -= map_bankwidth(map);
1470
1471                if (ofs >> cfi->chipshift) {
1472                        chipnum ++;
1473                        ofs = 0;
1474                        if (chipnum == cfi->numchips)
1475                                return 0;
1476                }
1477        }
1478
1479        if (len & (map_bankwidth(map)-1)) {
1480                map_word datum;
1481
1482                datum = map_word_ff(map);
1483                datum = map_word_load_partial(map, datum, buf, 0, len);
1484
1485                ret = do_write_oneword(map, &cfi->chips[chipnum],
1486                                       ofs, datum, FL_WRITING);
1487                if (ret)
1488                        return ret;
1489
1490                (*retlen) += len;
1491        }
1492
1493        return 0;
1494}
1495
1496
1497static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1498                                    unsigned long adr, const struct kvec **pvec,
1499                                    unsigned long *pvec_seek, int len)
1500{
1501        struct cfi_private *cfi = map->fldrv_priv;
1502        map_word status, write_cmd, datum;
1503        unsigned long cmd_adr;
1504        int ret, wbufsize, word_gap, words;
1505        const struct kvec *vec;
1506        unsigned long vec_seek;
1507        unsigned long initial_adr;
1508        int initial_len = len;
1509
1510        wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1511        adr += chip->start;
1512        initial_adr = adr;
1513        cmd_adr = adr & ~(wbufsize-1);
1514
1515        /* Let's determine this according to the interleave only once */
1516        write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1517
1518        spin_lock(chip->mutex);
1519        ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1520        if (ret) {
1521                spin_unlock(chip->mutex);
1522                return ret;
1523        }
1524
1525        XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1526        ENABLE_VPP(map);
1527        xip_disable(map, chip, cmd_adr);
1528
1529        /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1530           [...], the device will not accept any more Write to Buffer commands".
1531           So we must check here and reset those bits if they're set. Otherwise
1532           we're just pissing in the wind */
1533        if (chip->state != FL_STATUS) {
1534                map_write(map, CMD(0x70), cmd_adr);
1535                chip->state = FL_STATUS;
1536        }
1537        status = map_read(map, cmd_adr);
1538        if (map_word_bitsset(map, status, CMD(0x30))) {
1539                xip_enable(map, chip, cmd_adr);
1540                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1541                xip_disable(map, chip, cmd_adr);
1542                map_write(map, CMD(0x50), cmd_adr);
1543                map_write(map, CMD(0x70), cmd_adr);
1544        }
1545
1546        chip->state = FL_WRITING_TO_BUFFER;
1547        map_write(map, write_cmd, cmd_adr);
1548        ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1549        if (ret) {
1550                /* Argh. Not ready for write to buffer */
1551                map_word Xstatus = map_read(map, cmd_adr);
1552                map_write(map, CMD(0x70), cmd_adr);
1553                chip->state = FL_STATUS;
1554                status = map_read(map, cmd_adr);
1555                map_write(map, CMD(0x50), cmd_adr);
1556                map_write(map, CMD(0x70), cmd_adr);
1557                xip_enable(map, chip, cmd_adr);
1558                printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1559                                map->name, Xstatus.x[0], status.x[0]);
1560                goto out;
1561        }
1562
1563        /* Figure out the number of words to write */
1564        word_gap = (-adr & (map_bankwidth(map)-1));
1565        words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1566        if (!word_gap) {
1567                words--;
1568        } else {
1569                word_gap = map_bankwidth(map) - word_gap;
1570                adr -= word_gap;
1571                datum = map_word_ff(map);
1572        }
1573
1574        /* Write length of data to come */
1575        map_write(map, CMD(words), cmd_adr );
1576
1577        /* Write data */
1578        vec = *pvec;
1579        vec_seek = *pvec_seek;
1580        do {
1581                int n = map_bankwidth(map) - word_gap;
1582                if (n > vec->iov_len - vec_seek)
1583                        n = vec->iov_len - vec_seek;
1584                if (n > len)
1585                        n = len;
1586
1587                if (!word_gap && len < map_bankwidth(map))
1588                        datum = map_word_ff(map);
1589
1590                datum = map_word_load_partial(map, datum,
1591                                              vec->iov_base + vec_seek,
1592                                              word_gap, n);
1593
1594                len -= n;
1595                word_gap += n;
1596                if (!len || word_gap == map_bankwidth(map)) {
1597                        map_write(map, datum, adr);
1598                        adr += map_bankwidth(map);
1599                        word_gap = 0;
1600                }
1601
1602                vec_seek += n;
1603                if (vec_seek == vec->iov_len) {
1604                        vec++;
1605                        vec_seek = 0;
1606                }
1607        } while (len);
1608        *pvec = vec;
1609        *pvec_seek = vec_seek;
1610
1611        /* GO GO GO */
1612        map_write(map, CMD(0xd0), cmd_adr);
1613        chip->state = FL_WRITING;
1614
1615        ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1616                                   initial_adr, initial_len,
1617                                   chip->buffer_write_time);
1618        if (ret) {
1619                map_write(map, CMD(0x70), cmd_adr);
1620                chip->state = FL_STATUS;
1621                xip_enable(map, chip, cmd_adr);
1622                printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1623                goto out;
1624        }
1625
1626        /* check for errors */
1627        status = map_read(map, cmd_adr);
1628        if (map_word_bitsset(map, status, CMD(0x1a))) {
1629                unsigned long chipstatus = MERGESTATUS(status);
1630
1631                /* reset status */
1632                map_write(map, CMD(0x50), cmd_adr);
1633                map_write(map, CMD(0x70), cmd_adr);
1634                xip_enable(map, chip, cmd_adr);
1635
1636                if (chipstatus & 0x02) {
1637                        ret = -EROFS;
1638                } else if (chipstatus & 0x08) {
1639                        printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1640                        ret = -EIO;
1641                } else {
1642                        printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1643                        ret = -EINVAL;
1644                }
1645
1646                goto out;
1647        }
1648
1649        xip_enable(map, chip, cmd_adr);
1650 out:   put_chip(map, chip, cmd_adr);
1651        spin_unlock(chip->mutex);
1652        return ret;
1653}
1654
1655static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1656                                unsigned long count, loff_t to, size_t *retlen)
1657{
1658        struct map_info *map = mtd->priv;
1659        struct cfi_private *cfi = map->fldrv_priv;
1660        int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1661        int ret = 0;
1662        int chipnum;
1663        unsigned long ofs, vec_seek, i;
1664        size_t len = 0;
1665
1666        for (i = 0; i < count; i++)
1667                len += vecs[i].iov_len;
1668
1669        *retlen = 0;
1670        if (!len)
1671                return 0;
1672
1673        chipnum = to >> cfi->chipshift;
1674        ofs = to - (chipnum << cfi->chipshift);
1675        vec_seek = 0;
1676
1677        do {
1678                /* We must not cross write block boundaries */
1679                int size = wbufsize - (ofs & (wbufsize-1));
1680
1681                if (size > len)
1682                        size = len;
1683                ret = do_write_buffer(map, &cfi->chips[chipnum],
1684                                      ofs, &vecs, &vec_seek, size);
1685                if (ret)
1686                        return ret;
1687
1688                ofs += size;
1689                (*retlen) += size;
1690                len -= size;
1691
1692                if (ofs >> cfi->chipshift) {
1693                        chipnum ++;
1694                        ofs = 0;
1695                        if (chipnum == cfi->numchips)
1696                                return 0;
1697                }
1698
1699                /* Be nice and reschedule with the chip in a usable state for other
1700                   processes. */
1701                cond_resched();
1702
1703        } while (len);
1704
1705        return 0;
1706}
1707
1708static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1709                                       size_t len, size_t *retlen, const u_char *buf)
1710{
1711        struct kvec vec;
1712
1713        vec.iov_base = (void *) buf;
1714        vec.iov_len = len;
1715
1716        return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1717}
1718
1719static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1720                                      unsigned long adr, int len, void *thunk)
1721{
1722        struct cfi_private *cfi = map->fldrv_priv;
1723        map_word status;
1724        int retries = 3;
1725        int ret;
1726
1727        adr += chip->start;
1728
1729 retry:
1730        spin_lock(chip->mutex);
1731        ret = get_chip(map, chip, adr, FL_ERASING);
1732        if (ret) {
1733                spin_unlock(chip->mutex);
1734                return ret;
1735        }
1736
1737        XIP_INVAL_CACHED_RANGE(map, adr, len);
1738        ENABLE_VPP(map);
1739        xip_disable(map, chip, adr);
1740
1741        /* Clear the status register first */
1742        map_write(map, CMD(0x50), adr);
1743
1744        /* Now erase */
1745        map_write(map, CMD(0x20), adr);
1746        map_write(map, CMD(0xD0), adr);
1747        chip->state = FL_ERASING;
1748        chip->erase_suspended = 0;
1749
1750        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1751                                   adr, len,
1752                                   chip->erase_time);
1753        if (ret) {
1754                map_write(map, CMD(0x70), adr);
1755                chip->state = FL_STATUS;
1756                xip_enable(map, chip, adr);
1757                printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1758                goto out;
1759        }
1760
1761        /* We've broken this before. It doesn't hurt to be safe */
1762        map_write(map, CMD(0x70), adr);
1763        chip->state = FL_STATUS;
1764        status = map_read(map, adr);
1765
1766        /* check for errors */
1767        if (map_word_bitsset(map, status, CMD(0x3a))) {
1768                unsigned long chipstatus = MERGESTATUS(status);
1769
1770                /* Reset the error bits */
1771                map_write(map, CMD(0x50), adr);
1772                map_write(map, CMD(0x70), adr);
1773                xip_enable(map, chip, adr);
1774
1775                if ((chipstatus & 0x30) == 0x30) {
1776                        printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1777                        ret = -EINVAL;
1778                } else if (chipstatus & 0x02) {
1779                        /* Protection bit set */
1780                        ret = -EROFS;
1781                } else if (chipstatus & 0x8) {
1782                        /* Voltage */
1783                        printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1784                        ret = -EIO;
1785                } else if (chipstatus & 0x20 && retries--) {
1786                        printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1787                        put_chip(map, chip, adr);
1788                        spin_unlock(chip->mutex);
1789                        goto retry;
1790                } else {
1791                        printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1792                        ret = -EIO;
1793                }
1794
1795                goto out;
1796        }
1797
1798        xip_enable(map, chip, adr);
1799 out:   put_chip(map, chip, adr);
1800        spin_unlock(chip->mutex);
1801        return ret;
1802}
1803
1804static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1805{
1806        unsigned long ofs, len;
1807        int ret;
1808
1809        ofs = instr->addr;
1810        len = instr->len;
1811
1812        ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1813        if (ret)
1814                return ret;
1815
1816        instr->state = MTD_ERASE_DONE;
1817        mtd_erase_callback(instr);
1818
1819        return 0;
1820}
1821
1822static void cfi_intelext_sync (struct mtd_info *mtd)
1823{
1824        struct map_info *map = mtd->priv;
1825        struct cfi_private *cfi = map->fldrv_priv;
1826        int i;
1827        struct flchip *chip;
1828        int ret = 0;
1829
1830        for (i=0; !ret && i<cfi->numchips; i++) {
1831                chip = &cfi->chips[i];
1832
1833                spin_lock(chip->mutex);
1834                ret = get_chip(map, chip, chip->start, FL_SYNCING);
1835
1836                if (!ret) {
1837                        chip->oldstate = chip->state;
1838                        chip->state = FL_SYNCING;
1839                        /* No need to wake_up() on this state change -
1840                         * as the whole point is that nobody can do anything
1841                         * with the chip now anyway.
1842                         */
1843                }
1844                spin_unlock(chip->mutex);
1845        }
1846
1847        /* Unlock the chips again */
1848
1849        for (i--; i >=0; i--) {
1850                chip = &cfi->chips[i];
1851
1852                spin_lock(chip->mutex);
1853
1854                if (chip->state == FL_SYNCING) {
1855                        chip->state = chip->oldstate;
1856                        chip->oldstate = FL_READY;
1857                        wake_up(&chip->wq);
1858                }
1859                spin_unlock(chip->mutex);
1860        }
1861}
1862
1863static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1864                                                struct flchip *chip,
1865                                                unsigned long adr,
1866                                                int len, void *thunk)
1867{
1868        struct cfi_private *cfi = map->fldrv_priv;
1869        int status, ofs_factor = cfi->interleave * cfi->device_type;
1870
1871        adr += chip->start;
1872        xip_disable(map, chip, adr+(2*ofs_factor));
1873        map_write(map, CMD(0x90), adr+(2*ofs_factor));
1874        chip->state = FL_JEDEC_QUERY;
1875        status = cfi_read_query(map, adr+(2*ofs_factor));
1876        xip_enable(map, chip, 0);
1877        return status;
1878}
1879
1880#ifdef DEBUG_LOCK_BITS
1881static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1882                                                struct flchip *chip,
1883                                                unsigned long adr,
1884                                                int len, void *thunk)
1885{
1886        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1887               adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1888        return 0;
1889}
1890#endif
1891
1892#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1893#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1894
1895static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1896                                       unsigned long adr, int len, void *thunk)
1897{
1898        struct cfi_private *cfi = map->fldrv_priv;
1899        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1900        int udelay;
1901        int ret;
1902
1903        adr += chip->start;
1904
1905        spin_lock(chip->mutex);
1906        ret = get_chip(map, chip, adr, FL_LOCKING);
1907        if (ret) {
1908                spin_unlock(chip->mutex);
1909                return ret;
1910        }
1911
1912        ENABLE_VPP(map);
1913        xip_disable(map, chip, adr);
1914
1915        map_write(map, CMD(0x60), adr);
1916        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1917                map_write(map, CMD(0x01), adr);
1918                chip->state = FL_LOCKING;
1919        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1920                map_write(map, CMD(0xD0), adr);
1921                chip->state = FL_UNLOCKING;
1922        } else
1923                BUG();
1924
1925        /*
1926         * If Instant Individual Block Locking supported then no need
1927         * to delay.
1928         */
1929        udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1930
1931        ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1932        if (ret) {
1933                map_write(map, CMD(0x70), adr);
1934                chip->state = FL_STATUS;
1935                xip_enable(map, chip, adr);
1936                printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1937                goto out;
1938        }
1939
1940        xip_enable(map, chip, adr);
1941out:    put_chip(map, chip, adr);
1942        spin_unlock(chip->mutex);
1943        return ret;
1944}
1945
1946static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1947{
1948        int ret;
1949
1950#ifdef DEBUG_LOCK_BITS
1951        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1952               __FUNCTION__, ofs, len);
1953        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1954                ofs, len, NULL);
1955#endif
1956
1957        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1958                ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1959
1960#ifdef DEBUG_LOCK_BITS
1961        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1962               __FUNCTION__, ret);
1963        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1964                ofs, len, NULL);
1965#endif
1966
1967        return ret;
1968}
1969
1970static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1971{
1972        int ret;
1973
1974#ifdef DEBUG_LOCK_BITS
1975        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1976               __FUNCTION__, ofs, len);
1977        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1978                ofs, len, NULL);
1979#endif
1980
1981        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1982                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1983
1984#ifdef DEBUG_LOCK_BITS
1985        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1986               __FUNCTION__, ret);
1987        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1988                ofs, len, NULL);
1989#endif
1990
1991        return ret;
1992}
1993
1994#ifdef CONFIG_MTD_OTP
1995
1996typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1997                        u_long data_offset, u_char *buf, u_int size,
1998                        u_long prot_offset, u_int groupno, u_int groupsize);
1999
2000static int __xipram
2001do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2002            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2003{
2004        struct cfi_private *cfi = map->fldrv_priv;
2005        int ret;
2006
2007        spin_lock(chip->mutex);
2008        ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2009        if (ret) {
2010                spin_unlock(chip->mutex);
2011                return ret;
2012        }
2013
2014        /* let's ensure we're not reading back cached data from array mode */
2015        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2016
2017        xip_disable(map, chip, chip->start);
2018        if (chip->state != FL_JEDEC_QUERY) {
2019                map_write(map, CMD(0x90), chip->start);
2020                chip->state = FL_JEDEC_QUERY;
2021        }
2022        map_copy_from(map, buf, chip->start + offset, size);
2023        xip_enable(map, chip, chip->start);
2024
2025        /* then ensure we don't keep OTP data in the cache */
2026        INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2027
2028        put_chip(map, chip, chip->start);
2029        spin_unlock(chip->mutex);
2030        return 0;
2031}
2032
2033static int
2034do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2035             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2036{
2037        int ret;
2038
2039        while (size) {
2040                unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2041                int gap = offset - bus_ofs;
2042                int n = min_t(int, size, map_bankwidth(map)-gap);
2043                map_word datum = map_word_ff(map);
2044
2045                datum = map_word_load_partial(map, datum, buf, gap, n);
2046                ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2047                if (ret)
2048                        return ret;
2049
2050                offset += n;
2051                buf += n;
2052                size -= n;
2053        }
2054
2055        return 0;
2056}
2057
2058static int
2059do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2060            u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2061{
2062        struct cfi_private *cfi = map->fldrv_priv;
2063        map_word datum;
2064
2065        /* make sure area matches group boundaries */
2066        if (size != grpsz)
2067                return -EXDEV;
2068
2069        datum = map_word_ff(map);
2070        datum = map_word_clr(map, datum, CMD(1 << grpno));
2071        return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2072}
2073
2074static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2075                                 size_t *retlen, u_char *buf,
2076                                 otp_op_t action, int user_regs)
2077{
2078        struct map_info *map = mtd->priv;
2079        struct cfi_private *cfi = map->fldrv_priv;
2080        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2081        struct flchip *chip;
2082        struct cfi_intelext_otpinfo *otp;
2083        u_long devsize, reg_prot_offset, data_offset;
2084        u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2085        u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2086        int ret;
2087
2088        *retlen = 0;
2089
2090        /* Check that we actually have some OTP registers */
2091        if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2092                return -ENODATA;
2093
2094        /* we need real chips here not virtual ones */
2095        devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2096        chip_step = devsize >> cfi->chipshift;
2097        chip_num = 0;
2098
2099        /* Some chips have OTP located in the _top_ partition only.
2100           For example: Intel 28F256L18T (T means top-parameter device) */
2101        if (cfi->mfr == MANUFACTURER_INTEL) {
2102                switch (cfi->id) {
2103                case 0x880b:
2104                case 0x880c:
2105                case 0x880d:
2106                        chip_num = chip_step - 1;
2107                }
2108        }
2109
2110        for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2111                chip = &cfi->chips[chip_num];
2112                otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2113
2114                /* first OTP region */
2115                field = 0;
2116                reg_prot_offset = extp->ProtRegAddr;
2117                reg_fact_groups = 1;
2118                reg_fact_size = 1 << extp->FactProtRegSize;
2119                reg_user_groups = 1;
2120                reg_user_size = 1 << extp->UserProtRegSize;
2121
2122                while (len > 0) {
2123                        /* flash geometry fixup */
2124                        data_offset = reg_prot_offset + 1;
2125                        data_offset *= cfi->interleave * cfi->device_type;
2126                        reg_prot_offset *= cfi->interleave * cfi->device_type;
2127                        reg_fact_size *= cfi->interleave;
2128                        reg_user_size *= cfi->interleave;
2129
2130                        if (user_regs) {
2131                                groups = reg_user_groups;
2132                                groupsize = reg_user_size;
2133                                /* skip over factory reg area */
2134                                groupno = reg_fact_groups;
2135                                data_offset += reg_fact_groups * reg_fact_size;
2136                        } else {
2137                                groups = reg_fact_groups;
2138                                groupsize = reg_fact_size;
2139                                groupno = 0;
2140                        }
2141
2142                        while (len > 0 && groups > 0) {
2143                                if (!action) {
2144                                        /*
2145                                         * Special case: if action is NULL
2146                                         * we fill buf with otp_info records.
2147                                         */
2148                                        struct otp_info *otpinfo;
2149                                        map_word lockword;
2150                                        len -= sizeof(struct otp_info);
2151                                        if (len <= 0)
2152                                                return -ENOSPC;
2153                                        ret = do_otp_read(map, chip,
2154                                                          reg_prot_offset,
2155                                                          (u_char *)&lockword,
2156                                                          map_bankwidth(map),
2157                                                          0, 0,  0);
2158                                        if (ret)
2159                                                return ret;
2160                                        otpinfo = (struct otp_info *)buf;
2161                                        otpinfo->start = from;
2162                                        otpinfo->length = groupsize;
2163                                        otpinfo->locked =
2164                                           !map_word_bitsset(map, lockword,
2165                                                             CMD(1 << groupno));
2166                                        from += groupsize;
2167                                        buf += sizeof(*otpinfo);
2168                                        *retlen += sizeof(*otpinfo);
2169                                } else if (from >= groupsize) {
2170                                        from -= groupsize;
2171                                        data_offset += groupsize;
2172                                } else {
2173                                        int size = groupsize;
2174                                        data_offset += from;
2175                                        size -= from;
2176                                        from = 0;
2177                                        if (size > len)
2178                                                size = len;
2179                                        ret = action(map, chip, data_offset,
2180                                                     buf, size, reg_prot_offset,
2181                                                     groupno, groupsize);
2182                                        if (ret < 0)
2183                                                return ret;
2184                                        buf += size;
2185                                        len -= size;
2186                                        *retlen += size;
2187                                        data_offset += size;
2188                                }
2189                                groupno++;
2190                                groups--;
2191                        }
2192
2193                        /* next OTP region */
2194                        if (++field == extp->NumProtectionFields)
2195                                break;
2196                        reg_prot_offset = otp->ProtRegAddr;
2197                        reg_fact_groups = otp->FactGroups;
2198                        reg_fact_size = 1 << otp->FactProtRegSize;
2199                        reg_user_groups = otp->UserGroups;
2200                        reg_user_size = 1 << otp->UserProtRegSize;
2201                        otp++;
2202                }
2203        }
2204
2205        return 0;
2206}
2207
2208static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2209                                           size_t len, size_t *retlen,
2210                                            u_char *buf)
2211{
2212        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2213                                     buf, do_otp_read, 0);
2214}
2215
2216static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2217                                           size_t len, size_t *retlen,
2218                                            u_char *buf)
2219{
2220        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2221                                     buf, do_otp_read, 1);
2222}
2223
2224static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2225                                            size_t len, size_t *retlen,
2226                                             u_char *buf)
2227{
2228        return cfi_intelext_otp_walk(mtd, from, len, retlen,
2229                                     buf, do_otp_write, 1);
2230}
2231
2232static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2233                                           loff_t from, size_t len)
2234{
2235        size_t retlen;
2236        return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2237                                     NULL, do_otp_lock, 1);
2238}
2239
2240static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2241                                           struct otp_info *buf, size_t len)
2242{
2243        size_t retlen;
2244        int ret;
2245
2246        ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2247        return ret ? : retlen;
2248}
2249
2250static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2251                                           struct otp_info *buf, size_t len)
2252{
2253        size_t retlen;
2254        int ret;
2255
2256        ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2257        return ret ? : retlen;
2258}
2259
2260#endif
2261
2262static void cfi_intelext_save_locks(struct mtd_info *mtd)
2263{
2264        struct mtd_erase_region_info *region;
2265        int block, status, i;
2266        unsigned long adr;
2267        size_t len;
2268
2269        for (i = 0; i < mtd->numeraseregions; i++) {
2270                region = &mtd->eraseregions[i];
2271                if (!region->lockmap)
2272                        continue;
2273
2274                for (block = 0; block < region->numblocks; block++){
2275                        len = region->erasesize;
2276                        adr = region->offset + block * len;
2277
2278                        status = cfi_varsize_frob(mtd,
2279                                        do_getlockstatus_oneblock, adr, len, NULL);
2280                        if (status)
2281                                set_bit(block, region->lockmap);
2282                        else
2283                                clear_bit(block, region->lockmap);
2284                }
2285        }
2286}
2287
2288static int cfi_intelext_suspend(struct mtd_info *mtd)
2289{
2290        struct map_info *map = mtd->priv;
2291        struct cfi_private *cfi = map->fldrv_priv;
2292        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2293        int i;
2294        struct flchip *chip;
2295        int ret = 0;
2296
2297        if ((mtd->flags & MTD_STUPID_LOCK)
2298            && extp && (extp->FeatureSupport & (1 << 5)))
2299                cfi_intelext_save_locks(mtd);
2300
2301        for (i=0; !ret && i<cfi->numchips; i++) {
2302                chip = &cfi->chips[i];
2303
2304                spin_lock(chip->mutex);
2305
2306                switch (chip->state) {
2307                case FL_READY:
2308                case FL_STATUS:
2309                case FL_CFI_QUERY:
2310                case FL_JEDEC_QUERY:
2311                        if (chip->oldstate == FL_READY) {
2312                                /* place the chip in a known state before suspend */
2313                                map_write(map, CMD(0xFF), cfi->chips[i].start);
2314                                chip->oldstate = chip->state;
2315                                chip->state = FL_PM_SUSPENDED;
2316                                /* No need to wake_up() on this state change -
2317                                 * as the whole point is that nobody can do anything
2318                                 * with the chip now anyway.
2319                                 */
2320                        } else {
2321                                /* There seems to be an operation pending. We must wait for it. */
2322                                printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2323                                ret = -EAGAIN;
2324                        }
2325                        break;
2326                default:
2327                        /* Should we actually wait? Once upon a time these routines weren't
2328                           allowed to. Or should we return -EAGAIN, because the upper layers
2329                           ought to have already shut down anything which was using the device
2330                           anyway? The latter for now. */
2331                        printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2332                        ret = -EAGAIN;
2333                case FL_PM_SUSPENDED:
2334                        break;
2335                }
2336                spin_unlock(chip->mutex);
2337        }
2338
2339        /* Unlock the chips again */
2340
2341        if (ret) {
2342                for (i--; i >=0; i--) {
2343                        chip = &cfi->chips[i];
2344
2345                        spin_lock(chip->mutex);
2346
2347                        if (chip->state == FL_PM_SUSPENDED) {
2348                                /* No need to force it into a known state here,
2349                                   because we're returning failure, and it didn't
2350                                   get power cycled */
2351                                chip->state = chip->oldstate;
2352                                chip->oldstate = FL_READY;
2353                                wake_up(&chip->wq);
2354                        }
2355                        spin_unlock(chip->mutex);
2356                }
2357        }
2358
2359        return ret;
2360}
2361
2362static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2363{
2364        struct mtd_erase_region_info *region;
2365        int block, i;
2366        unsigned long adr;
2367        size_t len;
2368
2369        for (i = 0; i < mtd->numeraseregions; i++) {
2370                region = &mtd->eraseregions[i];
2371                if (!region->lockmap)
2372                        continue;
2373
2374                for (block = 0; block < region->numblocks; block++) {
2375                        len = region->erasesize;
2376                        adr = region->offset + block * len;
2377
2378                        if (!test_bit(block, region->lockmap))
2379                                cfi_intelext_unlock(mtd, adr, len);
2380                }
2381        }
2382}
2383
2384static void cfi_intelext_resume(struct mtd_info *mtd)
2385{
2386        struct map_info *map = mtd->priv;
2387        struct cfi_private *cfi = map->fldrv_priv;
2388        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2389        int i;
2390        struct flchip *chip;
2391
2392        for (i=0; i<cfi->numchips; i++) {
2393
2394                chip = &cfi->chips[i];
2395
2396                spin_lock(chip->mutex);
2397
2398                /* Go to known state. Chip may have been power cycled */
2399                if (chip->state == FL_PM_SUSPENDED) {
2400                        map_write(map, CMD(0xFF), cfi->chips[i].start);
2401                        chip->oldstate = chip->state = FL_READY;
2402                        wake_up(&chip->wq);
2403                }
2404
2405                spin_unlock(chip->mutex);
2406        }
2407
2408        if ((mtd->flags & MTD_STUPID_LOCK)
2409            && extp && (extp->FeatureSupport & (1 << 5)))
2410                cfi_intelext_restore_locks(mtd);
2411}
2412
2413static int cfi_intelext_reset(struct mtd_info *mtd)
2414{
2415        struct map_info *map = mtd->priv;
2416        struct cfi_private *cfi = map->fldrv_priv;
2417        int i, ret;
2418
2419        for (i=0; i < cfi->numchips; i++) {
2420                struct flchip *chip = &cfi->chips[i];
2421
2422                /* force the completion of any ongoing operation
2423                   and switch to array mode so any bootloader in
2424                   flash is accessible for soft reboot. */
2425                spin_lock(chip->mutex);
2426                ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2427                if (!ret) {
2428                        map_write(map, CMD(0xff), chip->start);
2429                        chip->state = FL_SHUTDOWN;
2430                }
2431                spin_unlock(chip->mutex);
2432        }
2433
2434        return 0;
2435}
2436
2437static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2438                               void *v)
2439{
2440        struct mtd_info *mtd;
2441
2442        mtd = container_of(nb, struct mtd_info, reboot_notifier);
2443        cfi_intelext_reset(mtd);
2444        return NOTIFY_DONE;
2445}
2446
2447static void cfi_intelext_destroy(struct mtd_info *mtd)
2448{
2449        struct map_info *map = mtd->priv;
2450        struct cfi_private *cfi = map->fldrv_priv;
2451        struct mtd_erase_region_info *region;
2452        int i;
2453        cfi_intelext_reset(mtd);
2454        unregister_reboot_notifier(&mtd->reboot_notifier);
2455        kfree(cfi->cmdset_priv);
2456        kfree(cfi->cfiq);
2457        kfree(cfi->chips[0].priv);
2458        kfree(cfi);
2459        for (i = 0; i < mtd->numeraseregions; i++) {
2460                region = &mtd->eraseregions[i];
2461                if (region->lockmap)
2462                        kfree(region->lockmap);
2463        }
2464        kfree(mtd->eraseregions);
2465}
2466
2467MODULE_LICENSE("GPL");
2468MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2469MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2470MODULE_ALIAS("cfi_cmdset_0003");
2471MODULE_ALIAS("cfi_cmdset_0200");
2472