linux/include/linux/mtd/cfi.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 *
  18 */
  19
  20#ifndef __MTD_CFI_H__
  21#define __MTD_CFI_H__
  22
  23#include <linux/delay.h>
  24#include <linux/types.h>
  25#include <linux/interrupt.h>
  26#include <linux/mtd/flashchip.h>
  27#include <linux/mtd/map.h>
  28#include <linux/mtd/cfi_endian.h>
  29#include <linux/mtd/xip.h>
  30
  31#ifdef CONFIG_MTD_CFI_I1
  32#define cfi_interleave(cfi) 1
  33#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
  34#else
  35#define cfi_interleave_is_1(cfi) (0)
  36#endif
  37
  38#ifdef CONFIG_MTD_CFI_I2
  39# ifdef cfi_interleave
  40#  undef cfi_interleave
  41#  define cfi_interleave(cfi) ((cfi)->interleave)
  42# else
  43#  define cfi_interleave(cfi) 2
  44# endif
  45#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
  46#else
  47#define cfi_interleave_is_2(cfi) (0)
  48#endif
  49
  50#ifdef CONFIG_MTD_CFI_I4
  51# ifdef cfi_interleave
  52#  undef cfi_interleave
  53#  define cfi_interleave(cfi) ((cfi)->interleave)
  54# else
  55#  define cfi_interleave(cfi) 4
  56# endif
  57#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
  58#else
  59#define cfi_interleave_is_4(cfi) (0)
  60#endif
  61
  62#ifdef CONFIG_MTD_CFI_I8
  63# ifdef cfi_interleave
  64#  undef cfi_interleave
  65#  define cfi_interleave(cfi) ((cfi)->interleave)
  66# else
  67#  define cfi_interleave(cfi) 8
  68# endif
  69#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
  70#else
  71#define cfi_interleave_is_8(cfi) (0)
  72#endif
  73
  74#ifndef cfi_interleave
  75#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work.
  76static inline int cfi_interleave(void *cfi)
  77{
  78        BUG();
  79        return 0;
  80}
  81#endif
  82
  83static inline int cfi_interleave_supported(int i)
  84{
  85        switch (i) {
  86#ifdef CONFIG_MTD_CFI_I1
  87        case 1:
  88#endif
  89#ifdef CONFIG_MTD_CFI_I2
  90        case 2:
  91#endif
  92#ifdef CONFIG_MTD_CFI_I4
  93        case 4:
  94#endif
  95#ifdef CONFIG_MTD_CFI_I8
  96        case 8:
  97#endif
  98                return 1;
  99
 100        default:
 101                return 0;
 102        }
 103}
 104
 105
 106/* NB: these values must represents the number of bytes needed to meet the
 107 *     device type (x8, x16, x32).  Eg. a 32 bit device is 4 x 8 bytes.
 108 *     These numbers are used in calculations.
 109 */
 110#define CFI_DEVICETYPE_X8  (8 / 8)
 111#define CFI_DEVICETYPE_X16 (16 / 8)
 112#define CFI_DEVICETYPE_X32 (32 / 8)
 113#define CFI_DEVICETYPE_X64 (64 / 8)
 114
 115
 116/* Device Interface Code Assignments from the "Common Flash Memory Interface
 117 * Publication 100" dated December 1, 2001.
 118 */
 119#define CFI_INTERFACE_X8_ASYNC          0x0000
 120#define CFI_INTERFACE_X16_ASYNC         0x0001
 121#define CFI_INTERFACE_X8_BY_X16_ASYNC   0x0002
 122#define CFI_INTERFACE_X32_ASYNC         0x0003
 123#define CFI_INTERFACE_X16_BY_X32_ASYNC  0x0005
 124#define CFI_INTERFACE_NOT_ALLOWED       0xffff
 125
 126
 127/* NB: We keep these structures in memory in HOST byteorder, except
 128 * where individually noted.
 129 */
 130
 131/* Basic Query Structure */
 132struct cfi_ident {
 133        uint8_t  qry[3];
 134        uint16_t P_ID;
 135        uint16_t P_ADR;
 136        uint16_t A_ID;
 137        uint16_t A_ADR;
 138        uint8_t  VccMin;
 139        uint8_t  VccMax;
 140        uint8_t  VppMin;
 141        uint8_t  VppMax;
 142        uint8_t  WordWriteTimeoutTyp;
 143        uint8_t  BufWriteTimeoutTyp;
 144        uint8_t  BlockEraseTimeoutTyp;
 145        uint8_t  ChipEraseTimeoutTyp;
 146        uint8_t  WordWriteTimeoutMax;
 147        uint8_t  BufWriteTimeoutMax;
 148        uint8_t  BlockEraseTimeoutMax;
 149        uint8_t  ChipEraseTimeoutMax;
 150        uint8_t  DevSize;
 151        uint16_t InterfaceDesc;
 152        uint16_t MaxBufWriteSize;
 153        uint8_t  NumEraseRegions;
 154        uint32_t EraseRegionInfo[0]; /* Not host ordered */
 155} __attribute__((packed));
 156
 157/* Extended Query Structure for both PRI and ALT */
 158
 159struct cfi_extquery {
 160        uint8_t  pri[3];
 161        uint8_t  MajorVersion;
 162        uint8_t  MinorVersion;
 163} __attribute__((packed));
 164
 165/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
 166
 167struct cfi_pri_intelext {
 168        uint8_t  pri[3];
 169        uint8_t  MajorVersion;
 170        uint8_t  MinorVersion;
 171        uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
 172                                    block follows - FIXME - not currently supported */
 173        uint8_t  SuspendCmdSupport;
 174        uint16_t BlkStatusRegMask;
 175        uint8_t  VccOptimal;
 176        uint8_t  VppOptimal;
 177        uint8_t  NumProtectionFields;
 178        uint16_t ProtRegAddr;
 179        uint8_t  FactProtRegSize;
 180        uint8_t  UserProtRegSize;
 181        uint8_t  extra[0];
 182} __attribute__((packed));
 183
 184struct cfi_intelext_otpinfo {
 185        uint32_t ProtRegAddr;
 186        uint16_t FactGroups;
 187        uint8_t  FactProtRegSize;
 188        uint16_t UserGroups;
 189        uint8_t  UserProtRegSize;
 190} __attribute__((packed));
 191
 192struct cfi_intelext_blockinfo {
 193        uint16_t NumIdentBlocks;
 194        uint16_t BlockSize;
 195        uint16_t MinBlockEraseCycles;
 196        uint8_t  BitsPerCell;
 197        uint8_t  BlockCap;
 198} __attribute__((packed));
 199
 200struct cfi_intelext_regioninfo {
 201        uint16_t NumIdentPartitions;
 202        uint8_t  NumOpAllowed;
 203        uint8_t  NumOpAllowedSimProgMode;
 204        uint8_t  NumOpAllowedSimEraMode;
 205        uint8_t  NumBlockTypes;
 206        struct cfi_intelext_blockinfo BlockTypes[1];
 207} __attribute__((packed));
 208
 209struct cfi_intelext_programming_regioninfo {
 210        uint8_t  ProgRegShift;
 211        uint8_t  Reserved1;
 212        uint8_t  ControlValid;
 213        uint8_t  Reserved2;
 214        uint8_t  ControlInvalid;
 215        uint8_t  Reserved3;
 216} __attribute__((packed));
 217
 218/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
 219
 220struct cfi_pri_amdstd {
 221        uint8_t  pri[3];
 222        uint8_t  MajorVersion;
 223        uint8_t  MinorVersion;
 224        uint8_t  SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
 225        uint8_t  EraseSuspend;
 226        uint8_t  BlkProt;
 227        uint8_t  TmpBlkUnprotect;
 228        uint8_t  BlkProtUnprot;
 229        uint8_t  SimultaneousOps;
 230        uint8_t  BurstMode;
 231        uint8_t  PageMode;
 232        uint8_t  VppMin;
 233        uint8_t  VppMax;
 234        uint8_t  TopBottom;
 235} __attribute__((packed));
 236
 237/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
 238
 239struct cfi_pri_atmel {
 240        uint8_t pri[3];
 241        uint8_t MajorVersion;
 242        uint8_t MinorVersion;
 243        uint8_t Features;
 244        uint8_t BottomBoot;
 245        uint8_t BurstMode;
 246        uint8_t PageMode;
 247} __attribute__((packed));
 248
 249struct cfi_pri_query {
 250        uint8_t  NumFields;
 251        uint32_t ProtField[1]; /* Not host ordered */
 252} __attribute__((packed));
 253
 254struct cfi_bri_query {
 255        uint8_t  PageModeReadCap;
 256        uint8_t  NumFields;
 257        uint32_t ConfField[1]; /* Not host ordered */
 258} __attribute__((packed));
 259
 260#define P_ID_NONE               0x0000
 261#define P_ID_INTEL_EXT          0x0001
 262#define P_ID_AMD_STD            0x0002
 263#define P_ID_INTEL_STD          0x0003
 264#define P_ID_AMD_EXT            0x0004
 265#define P_ID_WINBOND            0x0006
 266#define P_ID_ST_ADV             0x0020
 267#define P_ID_MITSUBISHI_STD     0x0100
 268#define P_ID_MITSUBISHI_EXT     0x0101
 269#define P_ID_SST_PAGE           0x0102
 270#define P_ID_SST_OLD            0x0701
 271#define P_ID_INTEL_PERFORMANCE  0x0200
 272#define P_ID_INTEL_DATA         0x0210
 273#define P_ID_RESERVED           0xffff
 274
 275
 276#define CFI_MODE_CFI    1
 277#define CFI_MODE_JEDEC  0
 278
 279struct cfi_private {
 280        uint16_t cmdset;
 281        void *cmdset_priv;
 282        int interleave;
 283        int device_type;
 284        int cfi_mode;           /* Are we a JEDEC device pretending to be CFI? */
 285        int addr_unlock1;
 286        int addr_unlock2;
 287        struct mtd_info *(*cmdset_setup)(struct map_info *);
 288        struct cfi_ident *cfiq; /* For now only one. We insist that all devs
 289                                  must be of the same type. */
 290        int mfr, id;
 291        int numchips;
 292        map_word sector_erase_cmd;
 293        unsigned long chipshift; /* Because they're of the same type */
 294        const char *im_name;     /* inter_module name for cmdset_setup */
 295        struct flchip chips[0];  /* per-chip data structure for each chip */
 296};
 297
 298/*
 299 * Returns the command address according to the given geometry.
 300 */
 301static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
 302                                struct map_info *map, struct cfi_private *cfi)
 303{
 304        unsigned bankwidth = map_bankwidth(map);
 305        unsigned interleave = cfi_interleave(cfi);
 306        unsigned type = cfi->device_type;
 307        uint32_t addr;
 308        
 309        addr = (cmd_ofs * type) * interleave;
 310
 311        /* Modify the unlock address if we are in compatibility mode.
 312         * For 16bit devices on 8 bit busses
 313         * and 32bit devices on 16 bit busses
 314         * set the low bit of the alternating bit sequence of the address.
 315         */
 316        if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
 317                addr |= (type >> 1)*interleave;
 318
 319        return  addr;
 320}
 321
 322/*
 323 * Transforms the CFI command for the given geometry (bus width & interleave).
 324 * It looks too long to be inline, but in the common case it should almost all
 325 * get optimised away.
 326 */
 327static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
 328{
 329        map_word val = { {0} };
 330        int wordwidth, words_per_bus, chip_mode, chips_per_word;
 331        unsigned long onecmd;
 332        int i;
 333
 334        /* We do it this way to give the compiler a fighting chance
 335           of optimising away all the crap for 'bankwidth' larger than
 336           an unsigned long, in the common case where that support is
 337           disabled */
 338        if (map_bankwidth_is_large(map)) {
 339                wordwidth = sizeof(unsigned long);
 340                words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
 341        } else {
 342                wordwidth = map_bankwidth(map);
 343                words_per_bus = 1;
 344        }
 345
 346        chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
 347        chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
 348
 349        /* First, determine what the bit-pattern should be for a single
 350           device, according to chip mode and endianness... */
 351        switch (chip_mode) {
 352        default: BUG();
 353        case 1:
 354                onecmd = cmd;
 355                break;
 356        case 2:
 357                onecmd = cpu_to_cfi16(cmd);
 358                break;
 359        case 4:
 360                onecmd = cpu_to_cfi32(cmd);
 361                break;
 362        }
 363
 364        /* Now replicate it across the size of an unsigned long, or
 365           just to the bus width as appropriate */
 366        switch (chips_per_word) {
 367        default: BUG();
 368#if BITS_PER_LONG >= 64
 369        case 8:
 370                onecmd |= (onecmd << (chip_mode * 32));
 371#endif
 372        case 4:
 373                onecmd |= (onecmd << (chip_mode * 16));
 374        case 2:
 375                onecmd |= (onecmd << (chip_mode * 8));
 376        case 1:
 377                ;
 378        }
 379
 380        /* And finally, for the multi-word case, replicate it
 381           in all words in the structure */
 382        for (i=0; i < words_per_bus; i++) {
 383                val.x[i] = onecmd;
 384        }
 385
 386        return val;
 387}
 388#define CMD(x)  cfi_build_cmd((x), map, cfi)
 389
 390
 391static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
 392                                           struct cfi_private *cfi)
 393{
 394        int wordwidth, words_per_bus, chip_mode, chips_per_word;
 395        unsigned long onestat, res = 0;
 396        int i;
 397
 398        /* We do it this way to give the compiler a fighting chance
 399           of optimising away all the crap for 'bankwidth' larger than
 400           an unsigned long, in the common case where that support is
 401           disabled */
 402        if (map_bankwidth_is_large(map)) {
 403                wordwidth = sizeof(unsigned long);
 404                words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
 405        } else {
 406                wordwidth = map_bankwidth(map);
 407                words_per_bus = 1;
 408        }
 409
 410        chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
 411        chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
 412
 413        onestat = val.x[0];
 414        /* Or all status words together */
 415        for (i=1; i < words_per_bus; i++) {
 416                onestat |= val.x[i];
 417        }
 418
 419        res = onestat;
 420        switch(chips_per_word) {
 421        default: BUG();
 422#if BITS_PER_LONG >= 64
 423        case 8:
 424                res |= (onestat >> (chip_mode * 32));
 425#endif
 426        case 4:
 427                res |= (onestat >> (chip_mode * 16));
 428        case 2:
 429                res |= (onestat >> (chip_mode * 8));
 430        case 1:
 431                ;
 432        }
 433
 434        /* Last, determine what the bit-pattern should be for a single
 435           device, according to chip mode and endianness... */
 436        switch (chip_mode) {
 437        case 1:
 438                break;
 439        case 2:
 440                res = cfi16_to_cpu(res);
 441                break;
 442        case 4:
 443                res = cfi32_to_cpu(res);
 444                break;
 445        default: BUG();
 446        }
 447        return res;
 448}
 449
 450#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
 451
 452
 453/*
 454 * Sends a CFI command to a bank of flash for the given geometry.
 455 *
 456 * Returns the offset in flash where the command was written.
 457 * If prev_val is non-null, it will be set to the value at the command address,
 458 * before the command was written.
 459 */
 460static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
 461                                struct map_info *map, struct cfi_private *cfi,
 462                                int type, map_word *prev_val)
 463{
 464        map_word val;
 465        uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
 466        val = cfi_build_cmd(cmd, map, cfi);
 467
 468        if (prev_val)
 469                *prev_val = map_read(map, addr);
 470
 471        map_write(map, val, addr);
 472
 473        return addr - base;
 474}
 475
 476static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
 477{
 478        map_word val = map_read(map, addr);
 479
 480        if (map_bankwidth_is_1(map)) {
 481                return val.x[0];
 482        } else if (map_bankwidth_is_2(map)) {
 483                return cfi16_to_cpu(val.x[0]);
 484        } else {
 485                /* No point in a 64-bit byteswap since that would just be
 486                   swapping the responses from different chips, and we are
 487                   only interested in one chip (a representative sample) */
 488                return cfi32_to_cpu(val.x[0]);
 489        }
 490}
 491
 492static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
 493{
 494        map_word val = map_read(map, addr);
 495
 496        if (map_bankwidth_is_1(map)) {
 497                return val.x[0] & 0xff;
 498        } else if (map_bankwidth_is_2(map)) {
 499                return cfi16_to_cpu(val.x[0]);
 500        } else {
 501                /* No point in a 64-bit byteswap since that would just be
 502                   swapping the responses from different chips, and we are
 503                   only interested in one chip (a representative sample) */
 504                return cfi32_to_cpu(val.x[0]);
 505        }
 506}
 507
 508static inline void cfi_udelay(int us)
 509{
 510        if (us >= 1000) {
 511                msleep((us+999)/1000);
 512        } else {
 513                udelay(us);
 514                cond_resched();
 515        }
 516}
 517
 518int __xipram cfi_qry_present(struct map_info *map, __u32 base,
 519                             struct cfi_private *cfi);
 520int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
 521                             struct cfi_private *cfi);
 522void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
 523                               struct cfi_private *cfi);
 524
 525struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
 526                             const char* name);
 527struct cfi_fixup {
 528        uint16_t mfr;
 529        uint16_t id;
 530        void (*fixup)(struct mtd_info *mtd);
 531};
 532
 533#define CFI_MFR_ANY             0xFFFF
 534#define CFI_ID_ANY              0xFFFF
 535#define CFI_MFR_CONTINUATION    0x007F
 536
 537#define CFI_MFR_AMD             0x0001
 538#define CFI_MFR_AMIC            0x0037
 539#define CFI_MFR_ATMEL           0x001F
 540#define CFI_MFR_EON             0x001C
 541#define CFI_MFR_FUJITSU         0x0004
 542#define CFI_MFR_HYUNDAI         0x00AD
 543#define CFI_MFR_INTEL           0x0089
 544#define CFI_MFR_MACRONIX        0x00C2
 545#define CFI_MFR_NEC             0x0010
 546#define CFI_MFR_PMC             0x009D
 547#define CFI_MFR_SAMSUNG         0x00EC
 548#define CFI_MFR_SHARP           0x00B0
 549#define CFI_MFR_SST             0x00BF
 550#define CFI_MFR_ST              0x0020 /* STMicroelectronics */
 551#define CFI_MFR_TOSHIBA         0x0098
 552#define CFI_MFR_WINBOND         0x00DA
 553
 554void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
 555
 556typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
 557                              unsigned long adr, int len, void *thunk);
 558
 559int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
 560        loff_t ofs, size_t len, void *thunk);
 561
 562
 563#endif /* __MTD_CFI_H__ */
 564