1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <asm/io.h>
27#include <asm/byteorder.h>
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/cfi.h>
35#include <linux/mtd/mtd.h>
36
37
38static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41 unsigned long count, loff_t to, size_t *retlen);
42static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43static void cfi_staa_sync (struct mtd_info *);
44static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46static int cfi_staa_suspend (struct mtd_info *);
47static void cfi_staa_resume (struct mtd_info *);
48
49static void cfi_staa_destroy(struct mtd_info *);
50
51struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52
53static struct mtd_info *cfi_staa_setup (struct map_info *);
54
55static struct mtd_chip_driver cfi_staa_chipdrv = {
56 .probe = NULL,
57 .destroy = cfi_staa_destroy,
58 .name = "cfi_cmdset_0020",
59 .module = THIS_MODULE
60};
61
62
63
64
65#ifdef DEBUG_CFI_FEATURES
66static void cfi_tell_features(struct cfi_pri_intelext *extp)
67{
68 int i;
69 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79 for (i=9; i<32; i++) {
80 if (extp->FeatureSupport & (1<<i))
81 printk(" - Unknown Bit %X: supported\n", i);
82 }
83
84 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86 for (i=1; i<8; i++) {
87 if (extp->SuspendCmdSupport & (1<<i))
88 printk(" - Unknown Bit %X: supported\n", i);
89 }
90
91 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94 for (i=2; i<16; i++) {
95 if (extp->BlkStatusRegMask & (1<<i))
96 printk(" - Unknown Bit %X Active: yes\n",i);
97 }
98
99 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101 if (extp->VppOptimal)
102 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104}
105#endif
106
107
108
109
110
111
112
113
114struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115{
116 struct cfi_private *cfi = map->fldrv_priv;
117 int i;
118
119 if (cfi->cfi_mode) {
120
121
122
123
124
125 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126 struct cfi_pri_intelext *extp;
127
128 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129 if (!extp)
130 return NULL;
131
132 if (extp->MajorVersion != '1' ||
133 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134 printk(KERN_ERR " Unknown ST Microelectronics"
135 " Extended Query version %c.%c.\n",
136 extp->MajorVersion, extp->MinorVersion);
137 kfree(extp);
138 return NULL;
139 }
140
141
142 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
143 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
144
145#ifdef DEBUG_CFI_FEATURES
146
147 cfi_tell_features(extp);
148#endif
149
150
151 cfi->cmdset_priv = extp;
152 }
153
154 for (i=0; i< cfi->numchips; i++) {
155 cfi->chips[i].word_write_time = 128;
156 cfi->chips[i].buffer_write_time = 128;
157 cfi->chips[i].erase_time = 1024;
158 cfi->chips[i].ref_point_counter = 0;
159 init_waitqueue_head(&(cfi->chips[i].wq));
160 }
161
162 return cfi_staa_setup(map);
163}
164EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165
166static struct mtd_info *cfi_staa_setup(struct map_info *map)
167{
168 struct cfi_private *cfi = map->fldrv_priv;
169 struct mtd_info *mtd;
170 unsigned long offset = 0;
171 int i,j;
172 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173
174 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175
176
177 if (!mtd) {
178 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
179 kfree(cfi->cmdset_priv);
180 return NULL;
181 }
182
183 mtd->priv = map;
184 mtd->type = MTD_NORFLASH;
185 mtd->size = devsize * cfi->numchips;
186
187 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
188 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
189 * mtd->numeraseregions, GFP_KERNEL);
190 if (!mtd->eraseregions) {
191 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
192 kfree(cfi->cmdset_priv);
193 kfree(mtd);
194 return NULL;
195 }
196
197 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
198 unsigned long ernum, ersize;
199 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
200 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
201
202 if (mtd->erasesize < ersize) {
203 mtd->erasesize = ersize;
204 }
205 for (j=0; j<cfi->numchips; j++) {
206 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
207 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
208 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
209 }
210 offset += (ersize * ernum);
211 }
212
213 if (offset != devsize) {
214
215 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
216 kfree(mtd->eraseregions);
217 kfree(cfi->cmdset_priv);
218 kfree(mtd);
219 return NULL;
220 }
221
222 for (i=0; i<mtd->numeraseregions;i++){
223 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
224 i, (unsigned long long)mtd->eraseregions[i].offset,
225 mtd->eraseregions[i].erasesize,
226 mtd->eraseregions[i].numblocks);
227 }
228
229
230 mtd->erase = cfi_staa_erase_varsize;
231 mtd->read = cfi_staa_read;
232 mtd->write = cfi_staa_write_buffers;
233 mtd->writev = cfi_staa_writev;
234 mtd->sync = cfi_staa_sync;
235 mtd->lock = cfi_staa_lock;
236 mtd->unlock = cfi_staa_unlock;
237 mtd->suspend = cfi_staa_suspend;
238 mtd->resume = cfi_staa_resume;
239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240 mtd->writesize = 8;
241 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
242 map->fldrv = &cfi_staa_chipdrv;
243 __module_get(THIS_MODULE);
244 mtd->name = map->name;
245 return mtd;
246}
247
248
249static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
250{
251 map_word status, status_OK;
252 unsigned long timeo;
253 DECLARE_WAITQUEUE(wait, current);
254 int suspended = 0;
255 unsigned long cmd_addr;
256 struct cfi_private *cfi = map->fldrv_priv;
257
258 adr += chip->start;
259
260
261 cmd_addr = adr & ~(map_bankwidth(map)-1);
262
263
264 status_OK = CMD(0x80);
265
266 timeo = jiffies + HZ;
267 retry:
268 mutex_lock(&chip->mutex);
269
270
271
272
273 switch (chip->state) {
274 case FL_ERASING:
275 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
276 goto sleep;
277
278 map_write (map, CMD(0xb0), cmd_addr);
279
280
281
282
283
284 map_write(map, CMD(0x70), cmd_addr);
285 chip->oldstate = FL_ERASING;
286 chip->state = FL_ERASE_SUSPENDING;
287
288 for (;;) {
289 status = map_read(map, cmd_addr);
290 if (map_word_andequal(map, status, status_OK, status_OK))
291 break;
292
293 if (time_after(jiffies, timeo)) {
294
295 map_write(map, CMD(0xd0), cmd_addr);
296
297 map_write(map, CMD(0x70), cmd_addr);
298 chip->state = FL_ERASING;
299 mutex_unlock(&chip->mutex);
300 printk(KERN_ERR "Chip not ready after erase "
301 "suspended: status = 0x%lx\n", status.x[0]);
302 return -EIO;
303 }
304
305 mutex_unlock(&chip->mutex);
306 cfi_udelay(1);
307 mutex_lock(&chip->mutex);
308 }
309
310 suspended = 1;
311 map_write(map, CMD(0xff), cmd_addr);
312 chip->state = FL_READY;
313 break;
314
315#if 0
316 case FL_WRITING:
317
318#endif
319
320 case FL_READY:
321 break;
322
323 case FL_CFI_QUERY:
324 case FL_JEDEC_QUERY:
325 map_write(map, CMD(0x70), cmd_addr);
326 chip->state = FL_STATUS;
327
328 case FL_STATUS:
329 status = map_read(map, cmd_addr);
330 if (map_word_andequal(map, status, status_OK, status_OK)) {
331 map_write(map, CMD(0xff), cmd_addr);
332 chip->state = FL_READY;
333 break;
334 }
335
336
337 if (time_after(jiffies, timeo)) {
338 mutex_unlock(&chip->mutex);
339 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
340 return -EIO;
341 }
342
343
344 mutex_unlock(&chip->mutex);
345 cfi_udelay(1);
346 goto retry;
347
348 default:
349 sleep:
350
351
352 set_current_state(TASK_UNINTERRUPTIBLE);
353 add_wait_queue(&chip->wq, &wait);
354 mutex_unlock(&chip->mutex);
355 schedule();
356 remove_wait_queue(&chip->wq, &wait);
357 timeo = jiffies + HZ;
358 goto retry;
359 }
360
361 map_copy_from(map, buf, adr, len);
362
363 if (suspended) {
364 chip->state = chip->oldstate;
365
366
367
368
369
370
371
372
373
374 map_write(map, CMD(0xd0), cmd_addr);
375 map_write(map, CMD(0x70), cmd_addr);
376 }
377
378 wake_up(&chip->wq);
379 mutex_unlock(&chip->mutex);
380 return 0;
381}
382
383static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
384{
385 struct map_info *map = mtd->priv;
386 struct cfi_private *cfi = map->fldrv_priv;
387 unsigned long ofs;
388 int chipnum;
389 int ret = 0;
390
391
392 chipnum = (from >> cfi->chipshift);
393 ofs = from - (chipnum << cfi->chipshift);
394
395 *retlen = 0;
396
397 while (len) {
398 unsigned long thislen;
399
400 if (chipnum >= cfi->numchips)
401 break;
402
403 if ((len + ofs -1) >> cfi->chipshift)
404 thislen = (1<<cfi->chipshift) - ofs;
405 else
406 thislen = len;
407
408 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
409 if (ret)
410 break;
411
412 *retlen += thislen;
413 len -= thislen;
414 buf += thislen;
415
416 ofs = 0;
417 chipnum++;
418 }
419 return ret;
420}
421
422static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
423 unsigned long adr, const u_char *buf, int len)
424{
425 struct cfi_private *cfi = map->fldrv_priv;
426 map_word status, status_OK;
427 unsigned long cmd_adr, timeo;
428 DECLARE_WAITQUEUE(wait, current);
429 int wbufsize, z;
430
431
432 if (adr & (map_bankwidth(map)-1))
433 return -EINVAL;
434
435 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
436 adr += chip->start;
437 cmd_adr = adr & ~(wbufsize-1);
438
439
440 status_OK = CMD(0x80);
441
442 timeo = jiffies + HZ;
443 retry:
444
445#ifdef DEBUG_CFI_FEATURES
446 printk("%s: chip->state[%d]\n", __func__, chip->state);
447#endif
448 mutex_lock(&chip->mutex);
449
450
451
452
453
454
455 switch (chip->state) {
456 case FL_READY:
457 break;
458
459 case FL_CFI_QUERY:
460 case FL_JEDEC_QUERY:
461 map_write(map, CMD(0x70), cmd_adr);
462 chip->state = FL_STATUS;
463#ifdef DEBUG_CFI_FEATURES
464 printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
465#endif
466
467 case FL_STATUS:
468 status = map_read(map, cmd_adr);
469 if (map_word_andequal(map, status, status_OK, status_OK))
470 break;
471
472 if (time_after(jiffies, timeo)) {
473 mutex_unlock(&chip->mutex);
474 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
475 status.x[0], map_read(map, cmd_adr).x[0]);
476 return -EIO;
477 }
478
479
480 mutex_unlock(&chip->mutex);
481 cfi_udelay(1);
482 goto retry;
483
484 default:
485
486
487 set_current_state(TASK_UNINTERRUPTIBLE);
488 add_wait_queue(&chip->wq, &wait);
489 mutex_unlock(&chip->mutex);
490 schedule();
491 remove_wait_queue(&chip->wq, &wait);
492 timeo = jiffies + HZ;
493 goto retry;
494 }
495
496 ENABLE_VPP(map);
497 map_write(map, CMD(0xe8), cmd_adr);
498 chip->state = FL_WRITING_TO_BUFFER;
499
500 z = 0;
501 for (;;) {
502 status = map_read(map, cmd_adr);
503 if (map_word_andequal(map, status, status_OK, status_OK))
504 break;
505
506 mutex_unlock(&chip->mutex);
507 cfi_udelay(1);
508 mutex_lock(&chip->mutex);
509
510 if (++z > 100) {
511
512 DISABLE_VPP(map);
513 map_write(map, CMD(0x70), cmd_adr);
514 chip->state = FL_STATUS;
515 mutex_unlock(&chip->mutex);
516 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
517 return -EIO;
518 }
519 }
520
521
522 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
523
524
525 for (z = 0; z < len;
526 z += map_bankwidth(map), buf += map_bankwidth(map)) {
527 map_word d;
528 d = map_word_load(map, buf);
529 map_write(map, d, adr+z);
530 }
531
532 map_write(map, CMD(0xd0), cmd_adr);
533 chip->state = FL_WRITING;
534
535 mutex_unlock(&chip->mutex);
536 cfi_udelay(chip->buffer_write_time);
537 mutex_lock(&chip->mutex);
538
539 timeo = jiffies + (HZ/2);
540 z = 0;
541 for (;;) {
542 if (chip->state != FL_WRITING) {
543
544 set_current_state(TASK_UNINTERRUPTIBLE);
545 add_wait_queue(&chip->wq, &wait);
546 mutex_unlock(&chip->mutex);
547 schedule();
548 remove_wait_queue(&chip->wq, &wait);
549 timeo = jiffies + (HZ / 2);
550 mutex_lock(&chip->mutex);
551 continue;
552 }
553
554 status = map_read(map, cmd_adr);
555 if (map_word_andequal(map, status, status_OK, status_OK))
556 break;
557
558
559 if (time_after(jiffies, timeo)) {
560
561 map_write(map, CMD(0x50), cmd_adr);
562
563 map_write(map, CMD(0x70), adr);
564 chip->state = FL_STATUS;
565 DISABLE_VPP(map);
566 mutex_unlock(&chip->mutex);
567 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
568 return -EIO;
569 }
570
571
572 mutex_unlock(&chip->mutex);
573 cfi_udelay(1);
574 z++;
575 mutex_lock(&chip->mutex);
576 }
577 if (!z) {
578 chip->buffer_write_time--;
579 if (!chip->buffer_write_time)
580 chip->buffer_write_time++;
581 }
582 if (z > 1)
583 chip->buffer_write_time++;
584
585
586 DISABLE_VPP(map);
587 chip->state = FL_STATUS;
588
589
590 if (map_word_bitsset(map, status, CMD(0x3a))) {
591#ifdef DEBUG_CFI_FEATURES
592 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
593#endif
594
595 map_write(map, CMD(0x50), cmd_adr);
596
597 map_write(map, CMD(0x70), adr);
598 wake_up(&chip->wq);
599 mutex_unlock(&chip->mutex);
600 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
601 }
602 wake_up(&chip->wq);
603 mutex_unlock(&chip->mutex);
604
605 return 0;
606}
607
608static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
609 size_t len, size_t *retlen, const u_char *buf)
610{
611 struct map_info *map = mtd->priv;
612 struct cfi_private *cfi = map->fldrv_priv;
613 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
614 int ret = 0;
615 int chipnum;
616 unsigned long ofs;
617
618 *retlen = 0;
619 if (!len)
620 return 0;
621
622 chipnum = to >> cfi->chipshift;
623 ofs = to - (chipnum << cfi->chipshift);
624
625#ifdef DEBUG_CFI_FEATURES
626 printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
627 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
628 printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
629#endif
630
631
632 while (len > 0) {
633
634 int size = wbufsize - (ofs & (wbufsize-1));
635
636 if (size > len)
637 size = len;
638
639 ret = do_write_buffer(map, &cfi->chips[chipnum],
640 ofs, buf, size);
641 if (ret)
642 return ret;
643
644 ofs += size;
645 buf += size;
646 (*retlen) += size;
647 len -= size;
648
649 if (ofs >> cfi->chipshift) {
650 chipnum ++;
651 ofs = 0;
652 if (chipnum == cfi->numchips)
653 return 0;
654 }
655 }
656
657 return 0;
658}
659
660
661
662
663
664
665#define ECCBUF_SIZE (mtd->writesize)
666#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
667#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
668static int
669cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
670 unsigned long count, loff_t to, size_t *retlen)
671{
672 unsigned long i;
673 size_t totlen = 0, thislen;
674 int ret = 0;
675 size_t buflen = 0;
676 static char *buffer;
677
678 if (!ECCBUF_SIZE) {
679
680
681
682 return -EIO;
683 }
684 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
685 if (!buffer)
686 return -ENOMEM;
687
688 for (i=0; i<count; i++) {
689 size_t elem_len = vecs[i].iov_len;
690 void *elem_base = vecs[i].iov_base;
691 if (!elem_len)
692 continue;
693 if (buflen) {
694 if (buflen + elem_len < ECCBUF_SIZE) {
695 memcpy(buffer+buflen, elem_base, elem_len);
696 buflen += elem_len;
697 continue;
698 }
699 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
700 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
701 totlen += thislen;
702 if (ret || thislen != ECCBUF_SIZE)
703 goto write_error;
704 elem_len -= thislen-buflen;
705 elem_base += thislen-buflen;
706 to += ECCBUF_SIZE;
707 }
708 if (ECCBUF_DIV(elem_len)) {
709 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
710 totlen += thislen;
711 if (ret || thislen != ECCBUF_DIV(elem_len))
712 goto write_error;
713 to += thislen;
714 }
715 buflen = ECCBUF_MOD(elem_len);
716 if (buflen) {
717 memset(buffer, 0xff, ECCBUF_SIZE);
718 memcpy(buffer, elem_base + thislen, buflen);
719 }
720 }
721 if (buflen) {
722
723 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
724 totlen += thislen;
725 if (ret || thislen != ECCBUF_SIZE)
726 goto write_error;
727 }
728write_error:
729 if (retlen)
730 *retlen = totlen;
731 kfree(buffer);
732 return ret;
733}
734
735
736static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
737{
738 struct cfi_private *cfi = map->fldrv_priv;
739 map_word status, status_OK;
740 unsigned long timeo;
741 int retries = 3;
742 DECLARE_WAITQUEUE(wait, current);
743 int ret = 0;
744
745 adr += chip->start;
746
747
748 status_OK = CMD(0x80);
749
750 timeo = jiffies + HZ;
751retry:
752 mutex_lock(&chip->mutex);
753
754
755 switch (chip->state) {
756 case FL_CFI_QUERY:
757 case FL_JEDEC_QUERY:
758 case FL_READY:
759 map_write(map, CMD(0x70), adr);
760 chip->state = FL_STATUS;
761
762 case FL_STATUS:
763 status = map_read(map, adr);
764 if (map_word_andequal(map, status, status_OK, status_OK))
765 break;
766
767
768 if (time_after(jiffies, timeo)) {
769 mutex_unlock(&chip->mutex);
770 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
771 return -EIO;
772 }
773
774
775 mutex_unlock(&chip->mutex);
776 cfi_udelay(1);
777 goto retry;
778
779 default:
780
781
782 set_current_state(TASK_UNINTERRUPTIBLE);
783 add_wait_queue(&chip->wq, &wait);
784 mutex_unlock(&chip->mutex);
785 schedule();
786 remove_wait_queue(&chip->wq, &wait);
787 timeo = jiffies + HZ;
788 goto retry;
789 }
790
791 ENABLE_VPP(map);
792
793 map_write(map, CMD(0x50), adr);
794
795
796 map_write(map, CMD(0x20), adr);
797 map_write(map, CMD(0xD0), adr);
798 chip->state = FL_ERASING;
799
800 mutex_unlock(&chip->mutex);
801 msleep(1000);
802 mutex_lock(&chip->mutex);
803
804
805
806
807 timeo = jiffies + (HZ*20);
808 for (;;) {
809 if (chip->state != FL_ERASING) {
810
811 set_current_state(TASK_UNINTERRUPTIBLE);
812 add_wait_queue(&chip->wq, &wait);
813 mutex_unlock(&chip->mutex);
814 schedule();
815 remove_wait_queue(&chip->wq, &wait);
816 timeo = jiffies + (HZ*20);
817 mutex_lock(&chip->mutex);
818 continue;
819 }
820
821 status = map_read(map, adr);
822 if (map_word_andequal(map, status, status_OK, status_OK))
823 break;
824
825
826 if (time_after(jiffies, timeo)) {
827 map_write(map, CMD(0x70), adr);
828 chip->state = FL_STATUS;
829 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
830 DISABLE_VPP(map);
831 mutex_unlock(&chip->mutex);
832 return -EIO;
833 }
834
835
836 mutex_unlock(&chip->mutex);
837 cfi_udelay(1);
838 mutex_lock(&chip->mutex);
839 }
840
841 DISABLE_VPP(map);
842 ret = 0;
843
844
845 map_write(map, CMD(0x70), adr);
846 chip->state = FL_STATUS;
847 status = map_read(map, adr);
848
849
850 if (map_word_bitsset(map, status, CMD(0x3a))) {
851 unsigned char chipstatus = status.x[0];
852 if (!map_word_equal(map, status, CMD(chipstatus))) {
853 int i, w;
854 for (w=0; w<map_words(map); w++) {
855 for (i = 0; i<cfi_interleave(cfi); i++) {
856 chipstatus |= status.x[w] >> (cfi->device_type * 8);
857 }
858 }
859 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
860 status.x[0], chipstatus);
861 }
862
863 map_write(map, CMD(0x50), adr);
864 map_write(map, CMD(0x70), adr);
865
866 if ((chipstatus & 0x30) == 0x30) {
867 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
868 ret = -EIO;
869 } else if (chipstatus & 0x02) {
870
871 ret = -EROFS;
872 } else if (chipstatus & 0x8) {
873
874 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
875 ret = -EIO;
876 } else if (chipstatus & 0x20) {
877 if (retries--) {
878 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
879 timeo = jiffies + HZ;
880 chip->state = FL_STATUS;
881 mutex_unlock(&chip->mutex);
882 goto retry;
883 }
884 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
885 ret = -EIO;
886 }
887 }
888
889 wake_up(&chip->wq);
890 mutex_unlock(&chip->mutex);
891 return ret;
892}
893
894static int cfi_staa_erase_varsize(struct mtd_info *mtd,
895 struct erase_info *instr)
896{ struct map_info *map = mtd->priv;
897 struct cfi_private *cfi = map->fldrv_priv;
898 unsigned long adr, len;
899 int chipnum, ret = 0;
900 int i, first;
901 struct mtd_erase_region_info *regions = mtd->eraseregions;
902
903 if (instr->addr > mtd->size)
904 return -EINVAL;
905
906 if ((instr->len + instr->addr) > mtd->size)
907 return -EINVAL;
908
909
910
911
912
913 i = 0;
914
915
916
917
918
919
920
921 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
922 i++;
923 i--;
924
925
926
927
928
929
930
931 if (instr->addr & (regions[i].erasesize-1))
932 return -EINVAL;
933
934
935 first = i;
936
937
938
939
940
941 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
942 i++;
943
944
945
946
947 i--;
948
949 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
950 return -EINVAL;
951
952 chipnum = instr->addr >> cfi->chipshift;
953 adr = instr->addr - (chipnum << cfi->chipshift);
954 len = instr->len;
955
956 i=first;
957
958 while(len) {
959 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
960
961 if (ret)
962 return ret;
963
964 adr += regions[i].erasesize;
965 len -= regions[i].erasesize;
966
967 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
968 i++;
969
970 if (adr >> cfi->chipshift) {
971 adr = 0;
972 chipnum++;
973
974 if (chipnum >= cfi->numchips)
975 break;
976 }
977 }
978
979 instr->state = MTD_ERASE_DONE;
980 mtd_erase_callback(instr);
981
982 return 0;
983}
984
985static void cfi_staa_sync (struct mtd_info *mtd)
986{
987 struct map_info *map = mtd->priv;
988 struct cfi_private *cfi = map->fldrv_priv;
989 int i;
990 struct flchip *chip;
991 int ret = 0;
992 DECLARE_WAITQUEUE(wait, current);
993
994 for (i=0; !ret && i<cfi->numchips; i++) {
995 chip = &cfi->chips[i];
996
997 retry:
998 mutex_lock(&chip->mutex);
999
1000 switch(chip->state) {
1001 case FL_READY:
1002 case FL_STATUS:
1003 case FL_CFI_QUERY:
1004 case FL_JEDEC_QUERY:
1005 chip->oldstate = chip->state;
1006 chip->state = FL_SYNCING;
1007
1008
1009
1010
1011 case FL_SYNCING:
1012 mutex_unlock(&chip->mutex);
1013 break;
1014
1015 default:
1016
1017 set_current_state(TASK_UNINTERRUPTIBLE);
1018 add_wait_queue(&chip->wq, &wait);
1019
1020 mutex_unlock(&chip->mutex);
1021 schedule();
1022 remove_wait_queue(&chip->wq, &wait);
1023
1024 goto retry;
1025 }
1026 }
1027
1028
1029
1030 for (i--; i >=0; i--) {
1031 chip = &cfi->chips[i];
1032
1033 mutex_lock(&chip->mutex);
1034
1035 if (chip->state == FL_SYNCING) {
1036 chip->state = chip->oldstate;
1037 wake_up(&chip->wq);
1038 }
1039 mutex_unlock(&chip->mutex);
1040 }
1041}
1042
1043static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1044{
1045 struct cfi_private *cfi = map->fldrv_priv;
1046 map_word status, status_OK;
1047 unsigned long timeo = jiffies + HZ;
1048 DECLARE_WAITQUEUE(wait, current);
1049
1050 adr += chip->start;
1051
1052
1053 status_OK = CMD(0x80);
1054
1055 timeo = jiffies + HZ;
1056retry:
1057 mutex_lock(&chip->mutex);
1058
1059
1060 switch (chip->state) {
1061 case FL_CFI_QUERY:
1062 case FL_JEDEC_QUERY:
1063 case FL_READY:
1064 map_write(map, CMD(0x70), adr);
1065 chip->state = FL_STATUS;
1066
1067 case FL_STATUS:
1068 status = map_read(map, adr);
1069 if (map_word_andequal(map, status, status_OK, status_OK))
1070 break;
1071
1072
1073 if (time_after(jiffies, timeo)) {
1074 mutex_unlock(&chip->mutex);
1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1076 return -EIO;
1077 }
1078
1079
1080 mutex_unlock(&chip->mutex);
1081 cfi_udelay(1);
1082 goto retry;
1083
1084 default:
1085
1086
1087 set_current_state(TASK_UNINTERRUPTIBLE);
1088 add_wait_queue(&chip->wq, &wait);
1089 mutex_unlock(&chip->mutex);
1090 schedule();
1091 remove_wait_queue(&chip->wq, &wait);
1092 timeo = jiffies + HZ;
1093 goto retry;
1094 }
1095
1096 ENABLE_VPP(map);
1097 map_write(map, CMD(0x60), adr);
1098 map_write(map, CMD(0x01), adr);
1099 chip->state = FL_LOCKING;
1100
1101 mutex_unlock(&chip->mutex);
1102 msleep(1000);
1103 mutex_lock(&chip->mutex);
1104
1105
1106
1107
1108 timeo = jiffies + (HZ*2);
1109 for (;;) {
1110
1111 status = map_read(map, adr);
1112 if (map_word_andequal(map, status, status_OK, status_OK))
1113 break;
1114
1115
1116 if (time_after(jiffies, timeo)) {
1117 map_write(map, CMD(0x70), adr);
1118 chip->state = FL_STATUS;
1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1120 DISABLE_VPP(map);
1121 mutex_unlock(&chip->mutex);
1122 return -EIO;
1123 }
1124
1125
1126 mutex_unlock(&chip->mutex);
1127 cfi_udelay(1);
1128 mutex_lock(&chip->mutex);
1129 }
1130
1131
1132 chip->state = FL_STATUS;
1133 DISABLE_VPP(map);
1134 wake_up(&chip->wq);
1135 mutex_unlock(&chip->mutex);
1136 return 0;
1137}
1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1139{
1140 struct map_info *map = mtd->priv;
1141 struct cfi_private *cfi = map->fldrv_priv;
1142 unsigned long adr;
1143 int chipnum, ret = 0;
1144#ifdef DEBUG_LOCK_BITS
1145 int ofs_factor = cfi->interleave * cfi->device_type;
1146#endif
1147
1148 if (ofs & (mtd->erasesize - 1))
1149 return -EINVAL;
1150
1151 if (len & (mtd->erasesize -1))
1152 return -EINVAL;
1153
1154 if ((len + ofs) > mtd->size)
1155 return -EINVAL;
1156
1157 chipnum = ofs >> cfi->chipshift;
1158 adr = ofs - (chipnum << cfi->chipshift);
1159
1160 while(len) {
1161
1162#ifdef DEBUG_LOCK_BITS
1163 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1164 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1165 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1166#endif
1167
1168 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1169
1170#ifdef DEBUG_LOCK_BITS
1171 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1172 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1173 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1174#endif
1175
1176 if (ret)
1177 return ret;
1178
1179 adr += mtd->erasesize;
1180 len -= mtd->erasesize;
1181
1182 if (adr >> cfi->chipshift) {
1183 adr = 0;
1184 chipnum++;
1185
1186 if (chipnum >= cfi->numchips)
1187 break;
1188 }
1189 }
1190 return 0;
1191}
1192static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1193{
1194 struct cfi_private *cfi = map->fldrv_priv;
1195 map_word status, status_OK;
1196 unsigned long timeo = jiffies + HZ;
1197 DECLARE_WAITQUEUE(wait, current);
1198
1199 adr += chip->start;
1200
1201
1202 status_OK = CMD(0x80);
1203
1204 timeo = jiffies + HZ;
1205retry:
1206 mutex_lock(&chip->mutex);
1207
1208
1209 switch (chip->state) {
1210 case FL_CFI_QUERY:
1211 case FL_JEDEC_QUERY:
1212 case FL_READY:
1213 map_write(map, CMD(0x70), adr);
1214 chip->state = FL_STATUS;
1215
1216 case FL_STATUS:
1217 status = map_read(map, adr);
1218 if (map_word_andequal(map, status, status_OK, status_OK))
1219 break;
1220
1221
1222 if (time_after(jiffies, timeo)) {
1223 mutex_unlock(&chip->mutex);
1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1225 return -EIO;
1226 }
1227
1228
1229 mutex_unlock(&chip->mutex);
1230 cfi_udelay(1);
1231 goto retry;
1232
1233 default:
1234
1235
1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 add_wait_queue(&chip->wq, &wait);
1238 mutex_unlock(&chip->mutex);
1239 schedule();
1240 remove_wait_queue(&chip->wq, &wait);
1241 timeo = jiffies + HZ;
1242 goto retry;
1243 }
1244
1245 ENABLE_VPP(map);
1246 map_write(map, CMD(0x60), adr);
1247 map_write(map, CMD(0xD0), adr);
1248 chip->state = FL_UNLOCKING;
1249
1250 mutex_unlock(&chip->mutex);
1251 msleep(1000);
1252 mutex_lock(&chip->mutex);
1253
1254
1255
1256
1257 timeo = jiffies + (HZ*2);
1258 for (;;) {
1259
1260 status = map_read(map, adr);
1261 if (map_word_andequal(map, status, status_OK, status_OK))
1262 break;
1263
1264
1265 if (time_after(jiffies, timeo)) {
1266 map_write(map, CMD(0x70), adr);
1267 chip->state = FL_STATUS;
1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1269 DISABLE_VPP(map);
1270 mutex_unlock(&chip->mutex);
1271 return -EIO;
1272 }
1273
1274
1275 mutex_unlock(&chip->mutex);
1276 cfi_udelay(1);
1277 mutex_lock(&chip->mutex);
1278 }
1279
1280
1281 chip->state = FL_STATUS;
1282 DISABLE_VPP(map);
1283 wake_up(&chip->wq);
1284 mutex_unlock(&chip->mutex);
1285 return 0;
1286}
1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1288{
1289 struct map_info *map = mtd->priv;
1290 struct cfi_private *cfi = map->fldrv_priv;
1291 unsigned long adr;
1292 int chipnum, ret = 0;
1293#ifdef DEBUG_LOCK_BITS
1294 int ofs_factor = cfi->interleave * cfi->device_type;
1295#endif
1296
1297 chipnum = ofs >> cfi->chipshift;
1298 adr = ofs - (chipnum << cfi->chipshift);
1299
1300#ifdef DEBUG_LOCK_BITS
1301 {
1302 unsigned long temp_adr = adr;
1303 unsigned long temp_len = len;
1304
1305 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1306 while (temp_len) {
1307 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1308 temp_adr += mtd->erasesize;
1309 temp_len -= mtd->erasesize;
1310 }
1311 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1312 }
1313#endif
1314
1315 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1316
1317#ifdef DEBUG_LOCK_BITS
1318 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1319 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1320 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1321#endif
1322
1323 return ret;
1324}
1325
1326static int cfi_staa_suspend(struct mtd_info *mtd)
1327{
1328 struct map_info *map = mtd->priv;
1329 struct cfi_private *cfi = map->fldrv_priv;
1330 int i;
1331 struct flchip *chip;
1332 int ret = 0;
1333
1334 for (i=0; !ret && i<cfi->numchips; i++) {
1335 chip = &cfi->chips[i];
1336
1337 mutex_lock(&chip->mutex);
1338
1339 switch(chip->state) {
1340 case FL_READY:
1341 case FL_STATUS:
1342 case FL_CFI_QUERY:
1343 case FL_JEDEC_QUERY:
1344 chip->oldstate = chip->state;
1345 chip->state = FL_PM_SUSPENDED;
1346
1347
1348
1349
1350 case FL_PM_SUSPENDED:
1351 break;
1352
1353 default:
1354 ret = -EAGAIN;
1355 break;
1356 }
1357 mutex_unlock(&chip->mutex);
1358 }
1359
1360
1361
1362 if (ret) {
1363 for (i--; i >=0; i--) {
1364 chip = &cfi->chips[i];
1365
1366 mutex_lock(&chip->mutex);
1367
1368 if (chip->state == FL_PM_SUSPENDED) {
1369
1370
1371
1372 chip->state = chip->oldstate;
1373 wake_up(&chip->wq);
1374 }
1375 mutex_unlock(&chip->mutex);
1376 }
1377 }
1378
1379 return ret;
1380}
1381
1382static void cfi_staa_resume(struct mtd_info *mtd)
1383{
1384 struct map_info *map = mtd->priv;
1385 struct cfi_private *cfi = map->fldrv_priv;
1386 int i;
1387 struct flchip *chip;
1388
1389 for (i=0; i<cfi->numchips; i++) {
1390
1391 chip = &cfi->chips[i];
1392
1393 mutex_lock(&chip->mutex);
1394
1395
1396 if (chip->state == FL_PM_SUSPENDED) {
1397 map_write(map, CMD(0xFF), 0);
1398 chip->state = FL_READY;
1399 wake_up(&chip->wq);
1400 }
1401
1402 mutex_unlock(&chip->mutex);
1403 }
1404}
1405
1406static void cfi_staa_destroy(struct mtd_info *mtd)
1407{
1408 struct map_info *map = mtd->priv;
1409 struct cfi_private *cfi = map->fldrv_priv;
1410 kfree(cfi->cmdset_priv);
1411 kfree(cfi);
1412}
1413
1414MODULE_LICENSE("GPL");
1415