1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <asm/io.h>
29#include <asm/byteorder.h>
30
31#include <linux/errno.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/mtd/map.h>
36#include <linux/mtd/cfi.h>
37#include <linux/mtd/mtd.h>
38#include <linux/mtd/compatmac.h>
39
40
41static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
43static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
44 unsigned long count, loff_t to, size_t *retlen);
45static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
46static void cfi_staa_sync (struct mtd_info *);
47static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49static int cfi_staa_suspend (struct mtd_info *);
50static void cfi_staa_resume (struct mtd_info *);
51
52static void cfi_staa_destroy(struct mtd_info *);
53
54struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
55
56static struct mtd_info *cfi_staa_setup (struct map_info *);
57
58static struct mtd_chip_driver cfi_staa_chipdrv = {
59 .probe = NULL,
60 .destroy = cfi_staa_destroy,
61 .name = "cfi_cmdset_0020",
62 .module = THIS_MODULE
63};
64
65
66
67
68#ifdef DEBUG_CFI_FEATURES
69static void cfi_tell_features(struct cfi_pri_intelext *extp)
70{
71 int i;
72 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
73 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
74 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
75 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
76 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
77 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
78 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
79 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
80 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
81 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
82 for (i=9; i<32; i++) {
83 if (extp->FeatureSupport & (1<<i))
84 printk(" - Unknown Bit %X: supported\n", i);
85 }
86
87 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
88 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
89 for (i=1; i<8; i++) {
90 if (extp->SuspendCmdSupport & (1<<i))
91 printk(" - Unknown Bit %X: supported\n", i);
92 }
93
94 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
95 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
96 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
97 for (i=2; i<16; i++) {
98 if (extp->BlkStatusRegMask & (1<<i))
99 printk(" - Unknown Bit %X Active: yes\n",i);
100 }
101
102 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
104 if (extp->VppOptimal)
105 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
106 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
107}
108#endif
109
110
111
112
113
114
115
116
117struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
118{
119 struct cfi_private *cfi = map->fldrv_priv;
120 int i;
121
122 if (cfi->cfi_mode) {
123
124
125
126
127
128 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
129 struct cfi_pri_intelext *extp;
130
131 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
132 if (!extp)
133 return NULL;
134
135 if (extp->MajorVersion != '1' ||
136 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
137 printk(KERN_ERR " Unknown ST Microelectronics"
138 " Extended Query version %c.%c.\n",
139 extp->MajorVersion, extp->MinorVersion);
140 kfree(extp);
141 return NULL;
142 }
143
144
145 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
146 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
147
148#ifdef DEBUG_CFI_FEATURES
149
150 cfi_tell_features(extp);
151#endif
152
153
154 cfi->cmdset_priv = extp;
155 }
156
157 for (i=0; i< cfi->numchips; i++) {
158 cfi->chips[i].word_write_time = 128;
159 cfi->chips[i].buffer_write_time = 128;
160 cfi->chips[i].erase_time = 1024;
161 cfi->chips[i].ref_point_counter = 0;
162 init_waitqueue_head(&(cfi->chips[i].wq));
163 }
164
165 return cfi_staa_setup(map);
166}
167EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
168
169static struct mtd_info *cfi_staa_setup(struct map_info *map)
170{
171 struct cfi_private *cfi = map->fldrv_priv;
172 struct mtd_info *mtd;
173 unsigned long offset = 0;
174 int i,j;
175 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
176
177 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
178
179
180 if (!mtd) {
181 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
182 kfree(cfi->cmdset_priv);
183 return NULL;
184 }
185
186 mtd->priv = map;
187 mtd->type = MTD_NORFLASH;
188 mtd->size = devsize * cfi->numchips;
189
190 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
191 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
192 * mtd->numeraseregions, GFP_KERNEL);
193 if (!mtd->eraseregions) {
194 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
195 kfree(cfi->cmdset_priv);
196 kfree(mtd);
197 return NULL;
198 }
199
200 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
201 unsigned long ernum, ersize;
202 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
203 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
204
205 if (mtd->erasesize < ersize) {
206 mtd->erasesize = ersize;
207 }
208 for (j=0; j<cfi->numchips; j++) {
209 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
210 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
211 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
212 }
213 offset += (ersize * ernum);
214 }
215
216 if (offset != devsize) {
217
218 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
219 kfree(mtd->eraseregions);
220 kfree(cfi->cmdset_priv);
221 kfree(mtd);
222 return NULL;
223 }
224
225 for (i=0; i<mtd->numeraseregions;i++){
226 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
227 i,mtd->eraseregions[i].offset,
228 mtd->eraseregions[i].erasesize,
229 mtd->eraseregions[i].numblocks);
230 }
231
232
233 mtd->erase = cfi_staa_erase_varsize;
234 mtd->read = cfi_staa_read;
235 mtd->write = cfi_staa_write_buffers;
236 mtd->writev = cfi_staa_writev;
237 mtd->sync = cfi_staa_sync;
238 mtd->lock = cfi_staa_lock;
239 mtd->unlock = cfi_staa_unlock;
240 mtd->suspend = cfi_staa_suspend;
241 mtd->resume = cfi_staa_resume;
242 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
243 mtd->writesize = 8;
244 map->fldrv = &cfi_staa_chipdrv;
245 __module_get(THIS_MODULE);
246 mtd->name = map->name;
247 return mtd;
248}
249
250
251static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
252{
253 map_word status, status_OK;
254 unsigned long timeo;
255 DECLARE_WAITQUEUE(wait, current);
256 int suspended = 0;
257 unsigned long cmd_addr;
258 struct cfi_private *cfi = map->fldrv_priv;
259
260 adr += chip->start;
261
262
263 cmd_addr = adr & ~(map_bankwidth(map)-1);
264
265
266 status_OK = CMD(0x80);
267
268 timeo = jiffies + HZ;
269 retry:
270 spin_lock_bh(chip->mutex);
271
272
273
274
275 switch (chip->state) {
276 case FL_ERASING:
277 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
278 goto sleep;
279
280 map_write (map, CMD(0xb0), cmd_addr);
281
282
283
284
285
286 map_write(map, CMD(0x70), cmd_addr);
287 chip->oldstate = FL_ERASING;
288 chip->state = FL_ERASE_SUSPENDING;
289
290 for (;;) {
291 status = map_read(map, cmd_addr);
292 if (map_word_andequal(map, status, status_OK, status_OK))
293 break;
294
295 if (time_after(jiffies, timeo)) {
296
297 map_write(map, CMD(0xd0), cmd_addr);
298
299 map_write(map, CMD(0x70), cmd_addr);
300 chip->state = FL_ERASING;
301 spin_unlock_bh(chip->mutex);
302 printk(KERN_ERR "Chip not ready after erase "
303 "suspended: status = 0x%lx\n", status.x[0]);
304 return -EIO;
305 }
306
307 spin_unlock_bh(chip->mutex);
308 cfi_udelay(1);
309 spin_lock_bh(chip->mutex);
310 }
311
312 suspended = 1;
313 map_write(map, CMD(0xff), cmd_addr);
314 chip->state = FL_READY;
315 break;
316
317#if 0
318 case FL_WRITING:
319
320#endif
321
322 case FL_READY:
323 break;
324
325 case FL_CFI_QUERY:
326 case FL_JEDEC_QUERY:
327 map_write(map, CMD(0x70), cmd_addr);
328 chip->state = FL_STATUS;
329
330 case FL_STATUS:
331 status = map_read(map, cmd_addr);
332 if (map_word_andequal(map, status, status_OK, status_OK)) {
333 map_write(map, CMD(0xff), cmd_addr);
334 chip->state = FL_READY;
335 break;
336 }
337
338
339 if (time_after(jiffies, timeo)) {
340 spin_unlock_bh(chip->mutex);
341 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
342 return -EIO;
343 }
344
345
346 spin_unlock_bh(chip->mutex);
347 cfi_udelay(1);
348 goto retry;
349
350 default:
351 sleep:
352
353
354 set_current_state(TASK_UNINTERRUPTIBLE);
355 add_wait_queue(&chip->wq, &wait);
356 spin_unlock_bh(chip->mutex);
357 schedule();
358 remove_wait_queue(&chip->wq, &wait);
359 timeo = jiffies + HZ;
360 goto retry;
361 }
362
363 map_copy_from(map, buf, adr, len);
364
365 if (suspended) {
366 chip->state = chip->oldstate;
367
368
369
370
371
372
373
374
375
376 map_write(map, CMD(0xd0), cmd_addr);
377 map_write(map, CMD(0x70), cmd_addr);
378 }
379
380 wake_up(&chip->wq);
381 spin_unlock_bh(chip->mutex);
382 return 0;
383}
384
385static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
386{
387 struct map_info *map = mtd->priv;
388 struct cfi_private *cfi = map->fldrv_priv;
389 unsigned long ofs;
390 int chipnum;
391 int ret = 0;
392
393
394 chipnum = (from >> cfi->chipshift);
395 ofs = from - (chipnum << cfi->chipshift);
396
397 *retlen = 0;
398
399 while (len) {
400 unsigned long thislen;
401
402 if (chipnum >= cfi->numchips)
403 break;
404
405 if ((len + ofs -1) >> cfi->chipshift)
406 thislen = (1<<cfi->chipshift) - ofs;
407 else
408 thislen = len;
409
410 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
411 if (ret)
412 break;
413
414 *retlen += thislen;
415 len -= thislen;
416 buf += thislen;
417
418 ofs = 0;
419 chipnum++;
420 }
421 return ret;
422}
423
424static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
425 unsigned long adr, const u_char *buf, int len)
426{
427 struct cfi_private *cfi = map->fldrv_priv;
428 map_word status, status_OK;
429 unsigned long cmd_adr, timeo;
430 DECLARE_WAITQUEUE(wait, current);
431 int wbufsize, z;
432
433
434 if (adr & (map_bankwidth(map)-1))
435 return -EINVAL;
436
437 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
438 adr += chip->start;
439 cmd_adr = adr & ~(wbufsize-1);
440
441
442 status_OK = CMD(0x80);
443
444 timeo = jiffies + HZ;
445 retry:
446
447#ifdef DEBUG_CFI_FEATURES
448 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
449#endif
450 spin_lock_bh(chip->mutex);
451
452
453
454
455
456
457 switch (chip->state) {
458 case FL_READY:
459 break;
460
461 case FL_CFI_QUERY:
462 case FL_JEDEC_QUERY:
463 map_write(map, CMD(0x70), cmd_adr);
464 chip->state = FL_STATUS;
465#ifdef DEBUG_CFI_FEATURES
466 printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
467#endif
468
469 case FL_STATUS:
470 status = map_read(map, cmd_adr);
471 if (map_word_andequal(map, status, status_OK, status_OK))
472 break;
473
474 if (time_after(jiffies, timeo)) {
475 spin_unlock_bh(chip->mutex);
476 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
477 status.x[0], map_read(map, cmd_adr).x[0]);
478 return -EIO;
479 }
480
481
482 spin_unlock_bh(chip->mutex);
483 cfi_udelay(1);
484 goto retry;
485
486 default:
487
488
489 set_current_state(TASK_UNINTERRUPTIBLE);
490 add_wait_queue(&chip->wq, &wait);
491 spin_unlock_bh(chip->mutex);
492 schedule();
493 remove_wait_queue(&chip->wq, &wait);
494 timeo = jiffies + HZ;
495 goto retry;
496 }
497
498 ENABLE_VPP(map);
499 map_write(map, CMD(0xe8), cmd_adr);
500 chip->state = FL_WRITING_TO_BUFFER;
501
502 z = 0;
503 for (;;) {
504 status = map_read(map, cmd_adr);
505 if (map_word_andequal(map, status, status_OK, status_OK))
506 break;
507
508 spin_unlock_bh(chip->mutex);
509 cfi_udelay(1);
510 spin_lock_bh(chip->mutex);
511
512 if (++z > 100) {
513
514 DISABLE_VPP(map);
515 map_write(map, CMD(0x70), cmd_adr);
516 chip->state = FL_STATUS;
517 spin_unlock_bh(chip->mutex);
518 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
519 return -EIO;
520 }
521 }
522
523
524 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
525
526
527 for (z = 0; z < len;
528 z += map_bankwidth(map), buf += map_bankwidth(map)) {
529 map_word d;
530 d = map_word_load(map, buf);
531 map_write(map, d, adr+z);
532 }
533
534 map_write(map, CMD(0xd0), cmd_adr);
535 chip->state = FL_WRITING;
536
537 spin_unlock_bh(chip->mutex);
538 cfi_udelay(chip->buffer_write_time);
539 spin_lock_bh(chip->mutex);
540
541 timeo = jiffies + (HZ/2);
542 z = 0;
543 for (;;) {
544 if (chip->state != FL_WRITING) {
545
546 set_current_state(TASK_UNINTERRUPTIBLE);
547 add_wait_queue(&chip->wq, &wait);
548 spin_unlock_bh(chip->mutex);
549 schedule();
550 remove_wait_queue(&chip->wq, &wait);
551 timeo = jiffies + (HZ / 2);
552 spin_lock_bh(chip->mutex);
553 continue;
554 }
555
556 status = map_read(map, cmd_adr);
557 if (map_word_andequal(map, status, status_OK, status_OK))
558 break;
559
560
561 if (time_after(jiffies, timeo)) {
562
563 map_write(map, CMD(0x50), cmd_adr);
564
565 map_write(map, CMD(0x70), adr);
566 chip->state = FL_STATUS;
567 DISABLE_VPP(map);
568 spin_unlock_bh(chip->mutex);
569 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
570 return -EIO;
571 }
572
573
574 spin_unlock_bh(chip->mutex);
575 cfi_udelay(1);
576 z++;
577 spin_lock_bh(chip->mutex);
578 }
579 if (!z) {
580 chip->buffer_write_time--;
581 if (!chip->buffer_write_time)
582 chip->buffer_write_time++;
583 }
584 if (z > 1)
585 chip->buffer_write_time++;
586
587
588 DISABLE_VPP(map);
589 chip->state = FL_STATUS;
590
591
592 if (map_word_bitsset(map, status, CMD(0x3a))) {
593#ifdef DEBUG_CFI_FEATURES
594 printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
595#endif
596
597 map_write(map, CMD(0x50), cmd_adr);
598
599 map_write(map, CMD(0x70), adr);
600 wake_up(&chip->wq);
601 spin_unlock_bh(chip->mutex);
602 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
603 }
604 wake_up(&chip->wq);
605 spin_unlock_bh(chip->mutex);
606
607 return 0;
608}
609
610static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
611 size_t len, size_t *retlen, const u_char *buf)
612{
613 struct map_info *map = mtd->priv;
614 struct cfi_private *cfi = map->fldrv_priv;
615 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
616 int ret = 0;
617 int chipnum;
618 unsigned long ofs;
619
620 *retlen = 0;
621 if (!len)
622 return 0;
623
624 chipnum = to >> cfi->chipshift;
625 ofs = to - (chipnum << cfi->chipshift);
626
627#ifdef DEBUG_CFI_FEATURES
628 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
629 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
630 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
631#endif
632
633
634 while (len > 0) {
635
636 int size = wbufsize - (ofs & (wbufsize-1));
637
638 if (size > len)
639 size = len;
640
641 ret = do_write_buffer(map, &cfi->chips[chipnum],
642 ofs, buf, size);
643 if (ret)
644 return ret;
645
646 ofs += size;
647 buf += size;
648 (*retlen) += size;
649 len -= size;
650
651 if (ofs >> cfi->chipshift) {
652 chipnum ++;
653 ofs = 0;
654 if (chipnum == cfi->numchips)
655 return 0;
656 }
657 }
658
659 return 0;
660}
661
662
663
664
665
666
667#define ECCBUF_SIZE (mtd->writesize)
668#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
669#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
670static int
671cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
672 unsigned long count, loff_t to, size_t *retlen)
673{
674 unsigned long i;
675 size_t totlen = 0, thislen;
676 int ret = 0;
677 size_t buflen = 0;
678 static char *buffer;
679
680 if (!ECCBUF_SIZE) {
681
682
683
684 return -EIO;
685 }
686 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
687 if (!buffer)
688 return -ENOMEM;
689
690 for (i=0; i<count; i++) {
691 size_t elem_len = vecs[i].iov_len;
692 void *elem_base = vecs[i].iov_base;
693 if (!elem_len)
694 continue;
695 if (buflen) {
696 if (buflen + elem_len < ECCBUF_SIZE) {
697 memcpy(buffer+buflen, elem_base, elem_len);
698 buflen += elem_len;
699 continue;
700 }
701 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
702 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
703 totlen += thislen;
704 if (ret || thislen != ECCBUF_SIZE)
705 goto write_error;
706 elem_len -= thislen-buflen;
707 elem_base += thislen-buflen;
708 to += ECCBUF_SIZE;
709 }
710 if (ECCBUF_DIV(elem_len)) {
711 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
712 totlen += thislen;
713 if (ret || thislen != ECCBUF_DIV(elem_len))
714 goto write_error;
715 to += thislen;
716 }
717 buflen = ECCBUF_MOD(elem_len);
718 if (buflen) {
719 memset(buffer, 0xff, ECCBUF_SIZE);
720 memcpy(buffer, elem_base + thislen, buflen);
721 }
722 }
723 if (buflen) {
724
725 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
726 totlen += thislen;
727 if (ret || thislen != ECCBUF_SIZE)
728 goto write_error;
729 }
730write_error:
731 if (retlen)
732 *retlen = totlen;
733 kfree(buffer);
734 return ret;
735}
736
737
738static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
739{
740 struct cfi_private *cfi = map->fldrv_priv;
741 map_word status, status_OK;
742 unsigned long timeo;
743 int retries = 3;
744 DECLARE_WAITQUEUE(wait, current);
745 int ret = 0;
746
747 adr += chip->start;
748
749
750 status_OK = CMD(0x80);
751
752 timeo = jiffies + HZ;
753retry:
754 spin_lock_bh(chip->mutex);
755
756
757 switch (chip->state) {
758 case FL_CFI_QUERY:
759 case FL_JEDEC_QUERY:
760 case FL_READY:
761 map_write(map, CMD(0x70), adr);
762 chip->state = FL_STATUS;
763
764 case FL_STATUS:
765 status = map_read(map, adr);
766 if (map_word_andequal(map, status, status_OK, status_OK))
767 break;
768
769
770 if (time_after(jiffies, timeo)) {
771 spin_unlock_bh(chip->mutex);
772 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
773 return -EIO;
774 }
775
776
777 spin_unlock_bh(chip->mutex);
778 cfi_udelay(1);
779 goto retry;
780
781 default:
782
783
784 set_current_state(TASK_UNINTERRUPTIBLE);
785 add_wait_queue(&chip->wq, &wait);
786 spin_unlock_bh(chip->mutex);
787 schedule();
788 remove_wait_queue(&chip->wq, &wait);
789 timeo = jiffies + HZ;
790 goto retry;
791 }
792
793 ENABLE_VPP(map);
794
795 map_write(map, CMD(0x50), adr);
796
797
798 map_write(map, CMD(0x20), adr);
799 map_write(map, CMD(0xD0), adr);
800 chip->state = FL_ERASING;
801
802 spin_unlock_bh(chip->mutex);
803 msleep(1000);
804 spin_lock_bh(chip->mutex);
805
806
807
808
809 timeo = jiffies + (HZ*20);
810 for (;;) {
811 if (chip->state != FL_ERASING) {
812
813 set_current_state(TASK_UNINTERRUPTIBLE);
814 add_wait_queue(&chip->wq, &wait);
815 spin_unlock_bh(chip->mutex);
816 schedule();
817 remove_wait_queue(&chip->wq, &wait);
818 timeo = jiffies + (HZ*20);
819 spin_lock_bh(chip->mutex);
820 continue;
821 }
822
823 status = map_read(map, adr);
824 if (map_word_andequal(map, status, status_OK, status_OK))
825 break;
826
827
828 if (time_after(jiffies, timeo)) {
829 map_write(map, CMD(0x70), adr);
830 chip->state = FL_STATUS;
831 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
832 DISABLE_VPP(map);
833 spin_unlock_bh(chip->mutex);
834 return -EIO;
835 }
836
837
838 spin_unlock_bh(chip->mutex);
839 cfi_udelay(1);
840 spin_lock_bh(chip->mutex);
841 }
842
843 DISABLE_VPP(map);
844 ret = 0;
845
846
847 map_write(map, CMD(0x70), adr);
848 chip->state = FL_STATUS;
849 status = map_read(map, adr);
850
851
852 if (map_word_bitsset(map, status, CMD(0x3a))) {
853 unsigned char chipstatus = status.x[0];
854 if (!map_word_equal(map, status, CMD(chipstatus))) {
855 int i, w;
856 for (w=0; w<map_words(map); w++) {
857 for (i = 0; i<cfi_interleave(cfi); i++) {
858 chipstatus |= status.x[w] >> (cfi->device_type * 8);
859 }
860 }
861 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
862 status.x[0], chipstatus);
863 }
864
865 map_write(map, CMD(0x50), adr);
866 map_write(map, CMD(0x70), adr);
867
868 if ((chipstatus & 0x30) == 0x30) {
869 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
870 ret = -EIO;
871 } else if (chipstatus & 0x02) {
872
873 ret = -EROFS;
874 } else if (chipstatus & 0x8) {
875
876 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
877 ret = -EIO;
878 } else if (chipstatus & 0x20) {
879 if (retries--) {
880 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
881 timeo = jiffies + HZ;
882 chip->state = FL_STATUS;
883 spin_unlock_bh(chip->mutex);
884 goto retry;
885 }
886 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
887 ret = -EIO;
888 }
889 }
890
891 wake_up(&chip->wq);
892 spin_unlock_bh(chip->mutex);
893 return ret;
894}
895
896int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
897{ struct map_info *map = mtd->priv;
898 struct cfi_private *cfi = map->fldrv_priv;
899 unsigned long adr, len;
900 int chipnum, ret = 0;
901 int i, first;
902 struct mtd_erase_region_info *regions = mtd->eraseregions;
903
904 if (instr->addr > mtd->size)
905 return -EINVAL;
906
907 if ((instr->len + instr->addr) > mtd->size)
908 return -EINVAL;
909
910
911
912
913
914 i = 0;
915
916
917
918
919
920
921
922 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
923 i++;
924 i--;
925
926
927
928
929
930
931
932 if (instr->addr & (regions[i].erasesize-1))
933 return -EINVAL;
934
935
936 first = i;
937
938
939
940
941
942 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
943 i++;
944
945
946
947
948 i--;
949
950 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
951 return -EINVAL;
952
953 chipnum = instr->addr >> cfi->chipshift;
954 adr = instr->addr - (chipnum << cfi->chipshift);
955 len = instr->len;
956
957 i=first;
958
959 while(len) {
960 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
961
962 if (ret)
963 return ret;
964
965 adr += regions[i].erasesize;
966 len -= regions[i].erasesize;
967
968 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
969 i++;
970
971 if (adr >> cfi->chipshift) {
972 adr = 0;
973 chipnum++;
974
975 if (chipnum >= cfi->numchips)
976 break;
977 }
978 }
979
980 instr->state = MTD_ERASE_DONE;
981 mtd_erase_callback(instr);
982
983 return 0;
984}
985
986static void cfi_staa_sync (struct mtd_info *mtd)
987{
988 struct map_info *map = mtd->priv;
989 struct cfi_private *cfi = map->fldrv_priv;
990 int i;
991 struct flchip *chip;
992 int ret = 0;
993 DECLARE_WAITQUEUE(wait, current);
994
995 for (i=0; !ret && i<cfi->numchips; i++) {
996 chip = &cfi->chips[i];
997
998 retry:
999 spin_lock_bh(chip->mutex);
1000
1001 switch(chip->state) {
1002 case FL_READY:
1003 case FL_STATUS:
1004 case FL_CFI_QUERY:
1005 case FL_JEDEC_QUERY:
1006 chip->oldstate = chip->state;
1007 chip->state = FL_SYNCING;
1008
1009
1010
1011
1012 case FL_SYNCING:
1013 spin_unlock_bh(chip->mutex);
1014 break;
1015
1016 default:
1017
1018 add_wait_queue(&chip->wq, &wait);
1019
1020 spin_unlock_bh(chip->mutex);
1021 schedule();
1022 remove_wait_queue(&chip->wq, &wait);
1023
1024 goto retry;
1025 }
1026 }
1027
1028
1029
1030 for (i--; i >=0; i--) {
1031 chip = &cfi->chips[i];
1032
1033 spin_lock_bh(chip->mutex);
1034
1035 if (chip->state == FL_SYNCING) {
1036 chip->state = chip->oldstate;
1037 wake_up(&chip->wq);
1038 }
1039 spin_unlock_bh(chip->mutex);
1040 }
1041}
1042
1043static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1044{
1045 struct cfi_private *cfi = map->fldrv_priv;
1046 map_word status, status_OK;
1047 unsigned long timeo = jiffies + HZ;
1048 DECLARE_WAITQUEUE(wait, current);
1049
1050 adr += chip->start;
1051
1052
1053 status_OK = CMD(0x80);
1054
1055 timeo = jiffies + HZ;
1056retry:
1057 spin_lock_bh(chip->mutex);
1058
1059
1060 switch (chip->state) {
1061 case FL_CFI_QUERY:
1062 case FL_JEDEC_QUERY:
1063 case FL_READY:
1064 map_write(map, CMD(0x70), adr);
1065 chip->state = FL_STATUS;
1066
1067 case FL_STATUS:
1068 status = map_read(map, adr);
1069 if (map_word_andequal(map, status, status_OK, status_OK))
1070 break;
1071
1072
1073 if (time_after(jiffies, timeo)) {
1074 spin_unlock_bh(chip->mutex);
1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1076 return -EIO;
1077 }
1078
1079
1080 spin_unlock_bh(chip->mutex);
1081 cfi_udelay(1);
1082 goto retry;
1083
1084 default:
1085
1086
1087 set_current_state(TASK_UNINTERRUPTIBLE);
1088 add_wait_queue(&chip->wq, &wait);
1089 spin_unlock_bh(chip->mutex);
1090 schedule();
1091 remove_wait_queue(&chip->wq, &wait);
1092 timeo = jiffies + HZ;
1093 goto retry;
1094 }
1095
1096 ENABLE_VPP(map);
1097 map_write(map, CMD(0x60), adr);
1098 map_write(map, CMD(0x01), adr);
1099 chip->state = FL_LOCKING;
1100
1101 spin_unlock_bh(chip->mutex);
1102 msleep(1000);
1103 spin_lock_bh(chip->mutex);
1104
1105
1106
1107
1108 timeo = jiffies + (HZ*2);
1109 for (;;) {
1110
1111 status = map_read(map, adr);
1112 if (map_word_andequal(map, status, status_OK, status_OK))
1113 break;
1114
1115
1116 if (time_after(jiffies, timeo)) {
1117 map_write(map, CMD(0x70), adr);
1118 chip->state = FL_STATUS;
1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1120 DISABLE_VPP(map);
1121 spin_unlock_bh(chip->mutex);
1122 return -EIO;
1123 }
1124
1125
1126 spin_unlock_bh(chip->mutex);
1127 cfi_udelay(1);
1128 spin_lock_bh(chip->mutex);
1129 }
1130
1131
1132 chip->state = FL_STATUS;
1133 DISABLE_VPP(map);
1134 wake_up(&chip->wq);
1135 spin_unlock_bh(chip->mutex);
1136 return 0;
1137}
1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1139{
1140 struct map_info *map = mtd->priv;
1141 struct cfi_private *cfi = map->fldrv_priv;
1142 unsigned long adr;
1143 int chipnum, ret = 0;
1144#ifdef DEBUG_LOCK_BITS
1145 int ofs_factor = cfi->interleave * cfi->device_type;
1146#endif
1147
1148 if (ofs & (mtd->erasesize - 1))
1149 return -EINVAL;
1150
1151 if (len & (mtd->erasesize -1))
1152 return -EINVAL;
1153
1154 if ((len + ofs) > mtd->size)
1155 return -EINVAL;
1156
1157 chipnum = ofs >> cfi->chipshift;
1158 adr = ofs - (chipnum << cfi->chipshift);
1159
1160 while(len) {
1161
1162#ifdef DEBUG_LOCK_BITS
1163 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1164 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1165 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1166#endif
1167
1168 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1169
1170#ifdef DEBUG_LOCK_BITS
1171 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1172 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1173 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1174#endif
1175
1176 if (ret)
1177 return ret;
1178
1179 adr += mtd->erasesize;
1180 len -= mtd->erasesize;
1181
1182 if (adr >> cfi->chipshift) {
1183 adr = 0;
1184 chipnum++;
1185
1186 if (chipnum >= cfi->numchips)
1187 break;
1188 }
1189 }
1190 return 0;
1191}
1192static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1193{
1194 struct cfi_private *cfi = map->fldrv_priv;
1195 map_word status, status_OK;
1196 unsigned long timeo = jiffies + HZ;
1197 DECLARE_WAITQUEUE(wait, current);
1198
1199 adr += chip->start;
1200
1201
1202 status_OK = CMD(0x80);
1203
1204 timeo = jiffies + HZ;
1205retry:
1206 spin_lock_bh(chip->mutex);
1207
1208
1209 switch (chip->state) {
1210 case FL_CFI_QUERY:
1211 case FL_JEDEC_QUERY:
1212 case FL_READY:
1213 map_write(map, CMD(0x70), adr);
1214 chip->state = FL_STATUS;
1215
1216 case FL_STATUS:
1217 status = map_read(map, adr);
1218 if (map_word_andequal(map, status, status_OK, status_OK))
1219 break;
1220
1221
1222 if (time_after(jiffies, timeo)) {
1223 spin_unlock_bh(chip->mutex);
1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1225 return -EIO;
1226 }
1227
1228
1229 spin_unlock_bh(chip->mutex);
1230 cfi_udelay(1);
1231 goto retry;
1232
1233 default:
1234
1235
1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 add_wait_queue(&chip->wq, &wait);
1238 spin_unlock_bh(chip->mutex);
1239 schedule();
1240 remove_wait_queue(&chip->wq, &wait);
1241 timeo = jiffies + HZ;
1242 goto retry;
1243 }
1244
1245 ENABLE_VPP(map);
1246 map_write(map, CMD(0x60), adr);
1247 map_write(map, CMD(0xD0), adr);
1248 chip->state = FL_UNLOCKING;
1249
1250 spin_unlock_bh(chip->mutex);
1251 msleep(1000);
1252 spin_lock_bh(chip->mutex);
1253
1254
1255
1256
1257 timeo = jiffies + (HZ*2);
1258 for (;;) {
1259
1260 status = map_read(map, adr);
1261 if (map_word_andequal(map, status, status_OK, status_OK))
1262 break;
1263
1264
1265 if (time_after(jiffies, timeo)) {
1266 map_write(map, CMD(0x70), adr);
1267 chip->state = FL_STATUS;
1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1269 DISABLE_VPP(map);
1270 spin_unlock_bh(chip->mutex);
1271 return -EIO;
1272 }
1273
1274
1275 spin_unlock_bh(chip->mutex);
1276 cfi_udelay(1);
1277 spin_lock_bh(chip->mutex);
1278 }
1279
1280
1281 chip->state = FL_STATUS;
1282 DISABLE_VPP(map);
1283 wake_up(&chip->wq);
1284 spin_unlock_bh(chip->mutex);
1285 return 0;
1286}
1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1288{
1289 struct map_info *map = mtd->priv;
1290 struct cfi_private *cfi = map->fldrv_priv;
1291 unsigned long adr;
1292 int chipnum, ret = 0;
1293#ifdef DEBUG_LOCK_BITS
1294 int ofs_factor = cfi->interleave * cfi->device_type;
1295#endif
1296
1297 chipnum = ofs >> cfi->chipshift;
1298 adr = ofs - (chipnum << cfi->chipshift);
1299
1300#ifdef DEBUG_LOCK_BITS
1301 {
1302 unsigned long temp_adr = adr;
1303 unsigned long temp_len = len;
1304
1305 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1306 while (temp_len) {
1307 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1308 temp_adr += mtd->erasesize;
1309 temp_len -= mtd->erasesize;
1310 }
1311 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1312 }
1313#endif
1314
1315 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1316
1317#ifdef DEBUG_LOCK_BITS
1318 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1319 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1320 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1321#endif
1322
1323 return ret;
1324}
1325
1326static int cfi_staa_suspend(struct mtd_info *mtd)
1327{
1328 struct map_info *map = mtd->priv;
1329 struct cfi_private *cfi = map->fldrv_priv;
1330 int i;
1331 struct flchip *chip;
1332 int ret = 0;
1333
1334 for (i=0; !ret && i<cfi->numchips; i++) {
1335 chip = &cfi->chips[i];
1336
1337 spin_lock_bh(chip->mutex);
1338
1339 switch(chip->state) {
1340 case FL_READY:
1341 case FL_STATUS:
1342 case FL_CFI_QUERY:
1343 case FL_JEDEC_QUERY:
1344 chip->oldstate = chip->state;
1345 chip->state = FL_PM_SUSPENDED;
1346
1347
1348
1349
1350 case FL_PM_SUSPENDED:
1351 break;
1352
1353 default:
1354 ret = -EAGAIN;
1355 break;
1356 }
1357 spin_unlock_bh(chip->mutex);
1358 }
1359
1360
1361
1362 if (ret) {
1363 for (i--; i >=0; i--) {
1364 chip = &cfi->chips[i];
1365
1366 spin_lock_bh(chip->mutex);
1367
1368 if (chip->state == FL_PM_SUSPENDED) {
1369
1370
1371
1372 chip->state = chip->oldstate;
1373 wake_up(&chip->wq);
1374 }
1375 spin_unlock_bh(chip->mutex);
1376 }
1377 }
1378
1379 return ret;
1380}
1381
1382static void cfi_staa_resume(struct mtd_info *mtd)
1383{
1384 struct map_info *map = mtd->priv;
1385 struct cfi_private *cfi = map->fldrv_priv;
1386 int i;
1387 struct flchip *chip;
1388
1389 for (i=0; i<cfi->numchips; i++) {
1390
1391 chip = &cfi->chips[i];
1392
1393 spin_lock_bh(chip->mutex);
1394
1395
1396 if (chip->state == FL_PM_SUSPENDED) {
1397 map_write(map, CMD(0xFF), 0);
1398 chip->state = FL_READY;
1399 wake_up(&chip->wq);
1400 }
1401
1402 spin_unlock_bh(chip->mutex);
1403 }
1404}
1405
1406static void cfi_staa_destroy(struct mtd_info *mtd)
1407{
1408 struct map_info *map = mtd->priv;
1409 struct cfi_private *cfi = map->fldrv_priv;
1410 kfree(cfi->cmdset_priv);
1411 kfree(cfi);
1412}
1413
1414MODULE_LICENSE("GPL");
1415