1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/map.h>
33#include <linux/mtd/cfi.h>
34#include <linux/mtd/mtd.h>
35
36
37static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
38static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
39static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
40 unsigned long count, loff_t to, size_t *retlen);
41static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
42static void cfi_staa_sync (struct mtd_info *);
43static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
44static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45static int cfi_staa_suspend (struct mtd_info *);
46static void cfi_staa_resume (struct mtd_info *);
47
48static void cfi_staa_destroy(struct mtd_info *);
49
50struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
51
52static struct mtd_info *cfi_staa_setup (struct map_info *);
53
54static struct mtd_chip_driver cfi_staa_chipdrv = {
55 .probe = NULL,
56 .destroy = cfi_staa_destroy,
57 .name = "cfi_cmdset_0020",
58 .module = THIS_MODULE
59};
60
61
62
63
64#ifdef DEBUG_CFI_FEATURES
65static void cfi_tell_features(struct cfi_pri_intelext *extp)
66{
67 int i;
68 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
69 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
70 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
71 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
72 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
73 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
74 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
75 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
76 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
77 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
78 for (i=9; i<32; i++) {
79 if (extp->FeatureSupport & (1<<i))
80 printk(" - Unknown Bit %X: supported\n", i);
81 }
82
83 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
84 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
85 for (i=1; i<8; i++) {
86 if (extp->SuspendCmdSupport & (1<<i))
87 printk(" - Unknown Bit %X: supported\n", i);
88 }
89
90 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
91 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
92 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
93 for (i=2; i<16; i++) {
94 if (extp->BlkStatusRegMask & (1<<i))
95 printk(" - Unknown Bit %X Active: yes\n",i);
96 }
97
98 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
99 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
100 if (extp->VppOptimal)
101 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
103}
104#endif
105
106
107
108
109
110
111
112
113struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
114{
115 struct cfi_private *cfi = map->fldrv_priv;
116 int i;
117
118 if (cfi->cfi_mode) {
119
120
121
122
123
124 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
125 struct cfi_pri_intelext *extp;
126
127 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
128 if (!extp)
129 return NULL;
130
131 if (extp->MajorVersion != '1' ||
132 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
133 printk(KERN_ERR " Unknown ST Microelectronics"
134 " Extended Query version %c.%c.\n",
135 extp->MajorVersion, extp->MinorVersion);
136 kfree(extp);
137 return NULL;
138 }
139
140
141 extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
142 extp->BlkStatusRegMask = cfi32_to_cpu(map,
143 extp->BlkStatusRegMask);
144
145#ifdef DEBUG_CFI_FEATURES
146
147 cfi_tell_features(extp);
148#endif
149
150
151 cfi->cmdset_priv = extp;
152 }
153
154 for (i=0; i< cfi->numchips; i++) {
155 cfi->chips[i].word_write_time = 128;
156 cfi->chips[i].buffer_write_time = 128;
157 cfi->chips[i].erase_time = 1024;
158 cfi->chips[i].ref_point_counter = 0;
159 init_waitqueue_head(&(cfi->chips[i].wq));
160 }
161
162 return cfi_staa_setup(map);
163}
164EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165
166static struct mtd_info *cfi_staa_setup(struct map_info *map)
167{
168 struct cfi_private *cfi = map->fldrv_priv;
169 struct mtd_info *mtd;
170 unsigned long offset = 0;
171 int i,j;
172 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173
174 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175
176
177 if (!mtd) {
178 kfree(cfi->cmdset_priv);
179 return NULL;
180 }
181
182 mtd->priv = map;
183 mtd->type = MTD_NORFLASH;
184 mtd->size = devsize * cfi->numchips;
185
186 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
187 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
188 * mtd->numeraseregions, GFP_KERNEL);
189 if (!mtd->eraseregions) {
190 kfree(cfi->cmdset_priv);
191 kfree(mtd);
192 return NULL;
193 }
194
195 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
196 unsigned long ernum, ersize;
197 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
198 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
199
200 if (mtd->erasesize < ersize) {
201 mtd->erasesize = ersize;
202 }
203 for (j=0; j<cfi->numchips; j++) {
204 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
205 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
206 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
207 }
208 offset += (ersize * ernum);
209 }
210
211 if (offset != devsize) {
212
213 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
214 kfree(mtd->eraseregions);
215 kfree(cfi->cmdset_priv);
216 kfree(mtd);
217 return NULL;
218 }
219
220 for (i=0; i<mtd->numeraseregions;i++){
221 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
222 i, (unsigned long long)mtd->eraseregions[i].offset,
223 mtd->eraseregions[i].erasesize,
224 mtd->eraseregions[i].numblocks);
225 }
226
227
228 mtd->_erase = cfi_staa_erase_varsize;
229 mtd->_read = cfi_staa_read;
230 mtd->_write = cfi_staa_write_buffers;
231 mtd->_writev = cfi_staa_writev;
232 mtd->_sync = cfi_staa_sync;
233 mtd->_lock = cfi_staa_lock;
234 mtd->_unlock = cfi_staa_unlock;
235 mtd->_suspend = cfi_staa_suspend;
236 mtd->_resume = cfi_staa_resume;
237 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
238 mtd->writesize = 8;
239 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
240 map->fldrv = &cfi_staa_chipdrv;
241 __module_get(THIS_MODULE);
242 mtd->name = map->name;
243 return mtd;
244}
245
246
247static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
248{
249 map_word status, status_OK;
250 unsigned long timeo;
251 DECLARE_WAITQUEUE(wait, current);
252 int suspended = 0;
253 unsigned long cmd_addr;
254 struct cfi_private *cfi = map->fldrv_priv;
255
256 adr += chip->start;
257
258
259 cmd_addr = adr & ~(map_bankwidth(map)-1);
260
261
262 status_OK = CMD(0x80);
263
264 timeo = jiffies + HZ;
265 retry:
266 mutex_lock(&chip->mutex);
267
268
269
270
271 switch (chip->state) {
272 case FL_ERASING:
273 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
274 goto sleep;
275
276 map_write (map, CMD(0xb0), cmd_addr);
277
278
279
280
281
282 map_write(map, CMD(0x70), cmd_addr);
283 chip->oldstate = FL_ERASING;
284 chip->state = FL_ERASE_SUSPENDING;
285
286 for (;;) {
287 status = map_read(map, cmd_addr);
288 if (map_word_andequal(map, status, status_OK, status_OK))
289 break;
290
291 if (time_after(jiffies, timeo)) {
292
293 map_write(map, CMD(0xd0), cmd_addr);
294
295 map_write(map, CMD(0x70), cmd_addr);
296 chip->state = FL_ERASING;
297 wake_up(&chip->wq);
298 mutex_unlock(&chip->mutex);
299 printk(KERN_ERR "Chip not ready after erase "
300 "suspended: status = 0x%lx\n", status.x[0]);
301 return -EIO;
302 }
303
304 mutex_unlock(&chip->mutex);
305 cfi_udelay(1);
306 mutex_lock(&chip->mutex);
307 }
308
309 suspended = 1;
310 map_write(map, CMD(0xff), cmd_addr);
311 chip->state = FL_READY;
312 break;
313
314#if 0
315 case FL_WRITING:
316
317#endif
318
319 case FL_READY:
320 break;
321
322 case FL_CFI_QUERY:
323 case FL_JEDEC_QUERY:
324 map_write(map, CMD(0x70), cmd_addr);
325 chip->state = FL_STATUS;
326
327 case FL_STATUS:
328 status = map_read(map, cmd_addr);
329 if (map_word_andequal(map, status, status_OK, status_OK)) {
330 map_write(map, CMD(0xff), cmd_addr);
331 chip->state = FL_READY;
332 break;
333 }
334
335
336 if (time_after(jiffies, timeo)) {
337 mutex_unlock(&chip->mutex);
338 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
339 return -EIO;
340 }
341
342
343 mutex_unlock(&chip->mutex);
344 cfi_udelay(1);
345 goto retry;
346
347 default:
348 sleep:
349
350
351 set_current_state(TASK_UNINTERRUPTIBLE);
352 add_wait_queue(&chip->wq, &wait);
353 mutex_unlock(&chip->mutex);
354 schedule();
355 remove_wait_queue(&chip->wq, &wait);
356 timeo = jiffies + HZ;
357 goto retry;
358 }
359
360 map_copy_from(map, buf, adr, len);
361
362 if (suspended) {
363 chip->state = chip->oldstate;
364
365
366
367
368
369
370
371
372
373 map_write(map, CMD(0xd0), cmd_addr);
374 map_write(map, CMD(0x70), cmd_addr);
375 }
376
377 wake_up(&chip->wq);
378 mutex_unlock(&chip->mutex);
379 return 0;
380}
381
382static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
383{
384 struct map_info *map = mtd->priv;
385 struct cfi_private *cfi = map->fldrv_priv;
386 unsigned long ofs;
387 int chipnum;
388 int ret = 0;
389
390
391 chipnum = (from >> cfi->chipshift);
392 ofs = from - (chipnum << cfi->chipshift);
393
394 while (len) {
395 unsigned long thislen;
396
397 if (chipnum >= cfi->numchips)
398 break;
399
400 if ((len + ofs -1) >> cfi->chipshift)
401 thislen = (1<<cfi->chipshift) - ofs;
402 else
403 thislen = len;
404
405 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
406 if (ret)
407 break;
408
409 *retlen += thislen;
410 len -= thislen;
411 buf += thislen;
412
413 ofs = 0;
414 chipnum++;
415 }
416 return ret;
417}
418
419static int do_write_buffer(struct map_info *map, struct flchip *chip,
420 unsigned long adr, const u_char *buf, int len)
421{
422 struct cfi_private *cfi = map->fldrv_priv;
423 map_word status, status_OK;
424 unsigned long cmd_adr, timeo;
425 DECLARE_WAITQUEUE(wait, current);
426 int wbufsize, z;
427
428
429 if (adr & (map_bankwidth(map)-1))
430 return -EINVAL;
431
432 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
433 adr += chip->start;
434 cmd_adr = adr & ~(wbufsize-1);
435
436
437 status_OK = CMD(0x80);
438
439 timeo = jiffies + HZ;
440 retry:
441
442#ifdef DEBUG_CFI_FEATURES
443 printk("%s: chip->state[%d]\n", __func__, chip->state);
444#endif
445 mutex_lock(&chip->mutex);
446
447
448
449
450
451
452 switch (chip->state) {
453 case FL_READY:
454 break;
455
456 case FL_CFI_QUERY:
457 case FL_JEDEC_QUERY:
458 map_write(map, CMD(0x70), cmd_adr);
459 chip->state = FL_STATUS;
460#ifdef DEBUG_CFI_FEATURES
461 printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
462#endif
463
464 case FL_STATUS:
465 status = map_read(map, cmd_adr);
466 if (map_word_andequal(map, status, status_OK, status_OK))
467 break;
468
469 if (time_after(jiffies, timeo)) {
470 mutex_unlock(&chip->mutex);
471 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
472 status.x[0], map_read(map, cmd_adr).x[0]);
473 return -EIO;
474 }
475
476
477 mutex_unlock(&chip->mutex);
478 cfi_udelay(1);
479 goto retry;
480
481 default:
482
483
484 set_current_state(TASK_UNINTERRUPTIBLE);
485 add_wait_queue(&chip->wq, &wait);
486 mutex_unlock(&chip->mutex);
487 schedule();
488 remove_wait_queue(&chip->wq, &wait);
489 timeo = jiffies + HZ;
490 goto retry;
491 }
492
493 ENABLE_VPP(map);
494 map_write(map, CMD(0xe8), cmd_adr);
495 chip->state = FL_WRITING_TO_BUFFER;
496
497 z = 0;
498 for (;;) {
499 status = map_read(map, cmd_adr);
500 if (map_word_andequal(map, status, status_OK, status_OK))
501 break;
502
503 mutex_unlock(&chip->mutex);
504 cfi_udelay(1);
505 mutex_lock(&chip->mutex);
506
507 if (++z > 100) {
508
509 DISABLE_VPP(map);
510 map_write(map, CMD(0x70), cmd_adr);
511 chip->state = FL_STATUS;
512 mutex_unlock(&chip->mutex);
513 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
514 return -EIO;
515 }
516 }
517
518
519 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
520
521
522 for (z = 0; z < len;
523 z += map_bankwidth(map), buf += map_bankwidth(map)) {
524 map_word d;
525 d = map_word_load(map, buf);
526 map_write(map, d, adr+z);
527 }
528
529 map_write(map, CMD(0xd0), cmd_adr);
530 chip->state = FL_WRITING;
531
532 mutex_unlock(&chip->mutex);
533 cfi_udelay(chip->buffer_write_time);
534 mutex_lock(&chip->mutex);
535
536 timeo = jiffies + (HZ/2);
537 z = 0;
538 for (;;) {
539 if (chip->state != FL_WRITING) {
540
541 set_current_state(TASK_UNINTERRUPTIBLE);
542 add_wait_queue(&chip->wq, &wait);
543 mutex_unlock(&chip->mutex);
544 schedule();
545 remove_wait_queue(&chip->wq, &wait);
546 timeo = jiffies + (HZ / 2);
547 mutex_lock(&chip->mutex);
548 continue;
549 }
550
551 status = map_read(map, cmd_adr);
552 if (map_word_andequal(map, status, status_OK, status_OK))
553 break;
554
555
556 if (time_after(jiffies, timeo)) {
557
558 map_write(map, CMD(0x50), cmd_adr);
559
560 map_write(map, CMD(0x70), adr);
561 chip->state = FL_STATUS;
562 DISABLE_VPP(map);
563 mutex_unlock(&chip->mutex);
564 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
565 return -EIO;
566 }
567
568
569 mutex_unlock(&chip->mutex);
570 cfi_udelay(1);
571 z++;
572 mutex_lock(&chip->mutex);
573 }
574 if (!z) {
575 chip->buffer_write_time--;
576 if (!chip->buffer_write_time)
577 chip->buffer_write_time++;
578 }
579 if (z > 1)
580 chip->buffer_write_time++;
581
582
583 DISABLE_VPP(map);
584 chip->state = FL_STATUS;
585
586
587 if (map_word_bitsset(map, status, CMD(0x3a))) {
588#ifdef DEBUG_CFI_FEATURES
589 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
590#endif
591
592 map_write(map, CMD(0x50), cmd_adr);
593
594 map_write(map, CMD(0x70), adr);
595 wake_up(&chip->wq);
596 mutex_unlock(&chip->mutex);
597 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
598 }
599 wake_up(&chip->wq);
600 mutex_unlock(&chip->mutex);
601
602 return 0;
603}
604
605static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
606 size_t len, size_t *retlen, const u_char *buf)
607{
608 struct map_info *map = mtd->priv;
609 struct cfi_private *cfi = map->fldrv_priv;
610 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
611 int ret = 0;
612 int chipnum;
613 unsigned long ofs;
614
615 chipnum = to >> cfi->chipshift;
616 ofs = to - (chipnum << cfi->chipshift);
617
618#ifdef DEBUG_CFI_FEATURES
619 printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
620 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
621 printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
622#endif
623
624
625 while (len > 0) {
626
627 int size = wbufsize - (ofs & (wbufsize-1));
628
629 if (size > len)
630 size = len;
631
632 ret = do_write_buffer(map, &cfi->chips[chipnum],
633 ofs, buf, size);
634 if (ret)
635 return ret;
636
637 ofs += size;
638 buf += size;
639 (*retlen) += size;
640 len -= size;
641
642 if (ofs >> cfi->chipshift) {
643 chipnum ++;
644 ofs = 0;
645 if (chipnum == cfi->numchips)
646 return 0;
647 }
648 }
649
650 return 0;
651}
652
653
654
655
656
657
658#define ECCBUF_SIZE (mtd->writesize)
659#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
660#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
661static int
662cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
663 unsigned long count, loff_t to, size_t *retlen)
664{
665 unsigned long i;
666 size_t totlen = 0, thislen;
667 int ret = 0;
668 size_t buflen = 0;
669 static char *buffer;
670
671 if (!ECCBUF_SIZE) {
672
673
674
675 return -EIO;
676 }
677 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
678 if (!buffer)
679 return -ENOMEM;
680
681 for (i=0; i<count; i++) {
682 size_t elem_len = vecs[i].iov_len;
683 void *elem_base = vecs[i].iov_base;
684 if (!elem_len)
685 continue;
686 if (buflen) {
687 if (buflen + elem_len < ECCBUF_SIZE) {
688 memcpy(buffer+buflen, elem_base, elem_len);
689 buflen += elem_len;
690 continue;
691 }
692 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
693 ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
694 buffer);
695 totlen += thislen;
696 if (ret || thislen != ECCBUF_SIZE)
697 goto write_error;
698 elem_len -= thislen-buflen;
699 elem_base += thislen-buflen;
700 to += ECCBUF_SIZE;
701 }
702 if (ECCBUF_DIV(elem_len)) {
703 ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
704 &thislen, elem_base);
705 totlen += thislen;
706 if (ret || thislen != ECCBUF_DIV(elem_len))
707 goto write_error;
708 to += thislen;
709 }
710 buflen = ECCBUF_MOD(elem_len);
711 if (buflen) {
712 memset(buffer, 0xff, ECCBUF_SIZE);
713 memcpy(buffer, elem_base + thislen, buflen);
714 }
715 }
716 if (buflen) {
717
718 ret = mtd_write(mtd, to, buflen, &thislen, buffer);
719 totlen += thislen;
720 if (ret || thislen != ECCBUF_SIZE)
721 goto write_error;
722 }
723write_error:
724 if (retlen)
725 *retlen = totlen;
726 kfree(buffer);
727 return ret;
728}
729
730
731static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
732{
733 struct cfi_private *cfi = map->fldrv_priv;
734 map_word status, status_OK;
735 unsigned long timeo;
736 int retries = 3;
737 DECLARE_WAITQUEUE(wait, current);
738 int ret = 0;
739
740 adr += chip->start;
741
742
743 status_OK = CMD(0x80);
744
745 timeo = jiffies + HZ;
746retry:
747 mutex_lock(&chip->mutex);
748
749
750 switch (chip->state) {
751 case FL_CFI_QUERY:
752 case FL_JEDEC_QUERY:
753 case FL_READY:
754 map_write(map, CMD(0x70), adr);
755 chip->state = FL_STATUS;
756
757 case FL_STATUS:
758 status = map_read(map, adr);
759 if (map_word_andequal(map, status, status_OK, status_OK))
760 break;
761
762
763 if (time_after(jiffies, timeo)) {
764 mutex_unlock(&chip->mutex);
765 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
766 return -EIO;
767 }
768
769
770 mutex_unlock(&chip->mutex);
771 cfi_udelay(1);
772 goto retry;
773
774 default:
775
776
777 set_current_state(TASK_UNINTERRUPTIBLE);
778 add_wait_queue(&chip->wq, &wait);
779 mutex_unlock(&chip->mutex);
780 schedule();
781 remove_wait_queue(&chip->wq, &wait);
782 timeo = jiffies + HZ;
783 goto retry;
784 }
785
786 ENABLE_VPP(map);
787
788 map_write(map, CMD(0x50), adr);
789
790
791 map_write(map, CMD(0x20), adr);
792 map_write(map, CMD(0xD0), adr);
793 chip->state = FL_ERASING;
794
795 mutex_unlock(&chip->mutex);
796 msleep(1000);
797 mutex_lock(&chip->mutex);
798
799
800
801
802 timeo = jiffies + (HZ*20);
803 for (;;) {
804 if (chip->state != FL_ERASING) {
805
806 set_current_state(TASK_UNINTERRUPTIBLE);
807 add_wait_queue(&chip->wq, &wait);
808 mutex_unlock(&chip->mutex);
809 schedule();
810 remove_wait_queue(&chip->wq, &wait);
811 timeo = jiffies + (HZ*20);
812 mutex_lock(&chip->mutex);
813 continue;
814 }
815
816 status = map_read(map, adr);
817 if (map_word_andequal(map, status, status_OK, status_OK))
818 break;
819
820
821 if (time_after(jiffies, timeo)) {
822 map_write(map, CMD(0x70), adr);
823 chip->state = FL_STATUS;
824 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
825 DISABLE_VPP(map);
826 mutex_unlock(&chip->mutex);
827 return -EIO;
828 }
829
830
831 mutex_unlock(&chip->mutex);
832 cfi_udelay(1);
833 mutex_lock(&chip->mutex);
834 }
835
836 DISABLE_VPP(map);
837 ret = 0;
838
839
840 map_write(map, CMD(0x70), adr);
841 chip->state = FL_STATUS;
842 status = map_read(map, adr);
843
844
845 if (map_word_bitsset(map, status, CMD(0x3a))) {
846 unsigned char chipstatus = status.x[0];
847 if (!map_word_equal(map, status, CMD(chipstatus))) {
848 int i, w;
849 for (w=0; w<map_words(map); w++) {
850 for (i = 0; i<cfi_interleave(cfi); i++) {
851 chipstatus |= status.x[w] >> (cfi->device_type * 8);
852 }
853 }
854 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
855 status.x[0], chipstatus);
856 }
857
858 map_write(map, CMD(0x50), adr);
859 map_write(map, CMD(0x70), adr);
860
861 if ((chipstatus & 0x30) == 0x30) {
862 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
863 ret = -EIO;
864 } else if (chipstatus & 0x02) {
865
866 ret = -EROFS;
867 } else if (chipstatus & 0x8) {
868
869 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
870 ret = -EIO;
871 } else if (chipstatus & 0x20) {
872 if (retries--) {
873 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
874 timeo = jiffies + HZ;
875 chip->state = FL_STATUS;
876 mutex_unlock(&chip->mutex);
877 goto retry;
878 }
879 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
880 ret = -EIO;
881 }
882 }
883
884 wake_up(&chip->wq);
885 mutex_unlock(&chip->mutex);
886 return ret;
887}
888
889static int cfi_staa_erase_varsize(struct mtd_info *mtd,
890 struct erase_info *instr)
891{ struct map_info *map = mtd->priv;
892 struct cfi_private *cfi = map->fldrv_priv;
893 unsigned long adr, len;
894 int chipnum, ret = 0;
895 int i, first;
896 struct mtd_erase_region_info *regions = mtd->eraseregions;
897
898
899
900
901
902 i = 0;
903
904
905
906
907
908
909
910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
911 i++;
912 i--;
913
914
915
916
917
918
919
920 if (instr->addr & (regions[i].erasesize-1))
921 return -EINVAL;
922
923
924 first = i;
925
926
927
928
929
930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
931 i++;
932
933
934
935
936 i--;
937
938 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
939 return -EINVAL;
940
941 chipnum = instr->addr >> cfi->chipshift;
942 adr = instr->addr - (chipnum << cfi->chipshift);
943 len = instr->len;
944
945 i=first;
946
947 while(len) {
948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
949
950 if (ret)
951 return ret;
952
953 adr += regions[i].erasesize;
954 len -= regions[i].erasesize;
955
956 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
957 i++;
958
959 if (adr >> cfi->chipshift) {
960 adr = 0;
961 chipnum++;
962
963 if (chipnum >= cfi->numchips)
964 break;
965 }
966 }
967
968 instr->state = MTD_ERASE_DONE;
969 mtd_erase_callback(instr);
970
971 return 0;
972}
973
974static void cfi_staa_sync (struct mtd_info *mtd)
975{
976 struct map_info *map = mtd->priv;
977 struct cfi_private *cfi = map->fldrv_priv;
978 int i;
979 struct flchip *chip;
980 int ret = 0;
981 DECLARE_WAITQUEUE(wait, current);
982
983 for (i=0; !ret && i<cfi->numchips; i++) {
984 chip = &cfi->chips[i];
985
986 retry:
987 mutex_lock(&chip->mutex);
988
989 switch(chip->state) {
990 case FL_READY:
991 case FL_STATUS:
992 case FL_CFI_QUERY:
993 case FL_JEDEC_QUERY:
994 chip->oldstate = chip->state;
995 chip->state = FL_SYNCING;
996
997
998
999
1000 case FL_SYNCING:
1001 mutex_unlock(&chip->mutex);
1002 break;
1003
1004 default:
1005
1006 set_current_state(TASK_UNINTERRUPTIBLE);
1007 add_wait_queue(&chip->wq, &wait);
1008
1009 mutex_unlock(&chip->mutex);
1010 schedule();
1011 remove_wait_queue(&chip->wq, &wait);
1012
1013 goto retry;
1014 }
1015 }
1016
1017
1018
1019 for (i--; i >=0; i--) {
1020 chip = &cfi->chips[i];
1021
1022 mutex_lock(&chip->mutex);
1023
1024 if (chip->state == FL_SYNCING) {
1025 chip->state = chip->oldstate;
1026 wake_up(&chip->wq);
1027 }
1028 mutex_unlock(&chip->mutex);
1029 }
1030}
1031
1032static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1033{
1034 struct cfi_private *cfi = map->fldrv_priv;
1035 map_word status, status_OK;
1036 unsigned long timeo = jiffies + HZ;
1037 DECLARE_WAITQUEUE(wait, current);
1038
1039 adr += chip->start;
1040
1041
1042 status_OK = CMD(0x80);
1043
1044 timeo = jiffies + HZ;
1045retry:
1046 mutex_lock(&chip->mutex);
1047
1048
1049 switch (chip->state) {
1050 case FL_CFI_QUERY:
1051 case FL_JEDEC_QUERY:
1052 case FL_READY:
1053 map_write(map, CMD(0x70), adr);
1054 chip->state = FL_STATUS;
1055
1056 case FL_STATUS:
1057 status = map_read(map, adr);
1058 if (map_word_andequal(map, status, status_OK, status_OK))
1059 break;
1060
1061
1062 if (time_after(jiffies, timeo)) {
1063 mutex_unlock(&chip->mutex);
1064 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1065 return -EIO;
1066 }
1067
1068
1069 mutex_unlock(&chip->mutex);
1070 cfi_udelay(1);
1071 goto retry;
1072
1073 default:
1074
1075
1076 set_current_state(TASK_UNINTERRUPTIBLE);
1077 add_wait_queue(&chip->wq, &wait);
1078 mutex_unlock(&chip->mutex);
1079 schedule();
1080 remove_wait_queue(&chip->wq, &wait);
1081 timeo = jiffies + HZ;
1082 goto retry;
1083 }
1084
1085 ENABLE_VPP(map);
1086 map_write(map, CMD(0x60), adr);
1087 map_write(map, CMD(0x01), adr);
1088 chip->state = FL_LOCKING;
1089
1090 mutex_unlock(&chip->mutex);
1091 msleep(1000);
1092 mutex_lock(&chip->mutex);
1093
1094
1095
1096
1097 timeo = jiffies + (HZ*2);
1098 for (;;) {
1099
1100 status = map_read(map, adr);
1101 if (map_word_andequal(map, status, status_OK, status_OK))
1102 break;
1103
1104
1105 if (time_after(jiffies, timeo)) {
1106 map_write(map, CMD(0x70), adr);
1107 chip->state = FL_STATUS;
1108 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1109 DISABLE_VPP(map);
1110 mutex_unlock(&chip->mutex);
1111 return -EIO;
1112 }
1113
1114
1115 mutex_unlock(&chip->mutex);
1116 cfi_udelay(1);
1117 mutex_lock(&chip->mutex);
1118 }
1119
1120
1121 chip->state = FL_STATUS;
1122 DISABLE_VPP(map);
1123 wake_up(&chip->wq);
1124 mutex_unlock(&chip->mutex);
1125 return 0;
1126}
1127static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1128{
1129 struct map_info *map = mtd->priv;
1130 struct cfi_private *cfi = map->fldrv_priv;
1131 unsigned long adr;
1132 int chipnum, ret = 0;
1133#ifdef DEBUG_LOCK_BITS
1134 int ofs_factor = cfi->interleave * cfi->device_type;
1135#endif
1136
1137 if (ofs & (mtd->erasesize - 1))
1138 return -EINVAL;
1139
1140 if (len & (mtd->erasesize -1))
1141 return -EINVAL;
1142
1143 chipnum = ofs >> cfi->chipshift;
1144 adr = ofs - (chipnum << cfi->chipshift);
1145
1146 while(len) {
1147
1148#ifdef DEBUG_LOCK_BITS
1149 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1150 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1151 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152#endif
1153
1154 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1155
1156#ifdef DEBUG_LOCK_BITS
1157 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1158 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1159 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1160#endif
1161
1162 if (ret)
1163 return ret;
1164
1165 adr += mtd->erasesize;
1166 len -= mtd->erasesize;
1167
1168 if (adr >> cfi->chipshift) {
1169 adr = 0;
1170 chipnum++;
1171
1172 if (chipnum >= cfi->numchips)
1173 break;
1174 }
1175 }
1176 return 0;
1177}
1178static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1179{
1180 struct cfi_private *cfi = map->fldrv_priv;
1181 map_word status, status_OK;
1182 unsigned long timeo = jiffies + HZ;
1183 DECLARE_WAITQUEUE(wait, current);
1184
1185 adr += chip->start;
1186
1187
1188 status_OK = CMD(0x80);
1189
1190 timeo = jiffies + HZ;
1191retry:
1192 mutex_lock(&chip->mutex);
1193
1194
1195 switch (chip->state) {
1196 case FL_CFI_QUERY:
1197 case FL_JEDEC_QUERY:
1198 case FL_READY:
1199 map_write(map, CMD(0x70), adr);
1200 chip->state = FL_STATUS;
1201
1202 case FL_STATUS:
1203 status = map_read(map, adr);
1204 if (map_word_andequal(map, status, status_OK, status_OK))
1205 break;
1206
1207
1208 if (time_after(jiffies, timeo)) {
1209 mutex_unlock(&chip->mutex);
1210 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1211 return -EIO;
1212 }
1213
1214
1215 mutex_unlock(&chip->mutex);
1216 cfi_udelay(1);
1217 goto retry;
1218
1219 default:
1220
1221
1222 set_current_state(TASK_UNINTERRUPTIBLE);
1223 add_wait_queue(&chip->wq, &wait);
1224 mutex_unlock(&chip->mutex);
1225 schedule();
1226 remove_wait_queue(&chip->wq, &wait);
1227 timeo = jiffies + HZ;
1228 goto retry;
1229 }
1230
1231 ENABLE_VPP(map);
1232 map_write(map, CMD(0x60), adr);
1233 map_write(map, CMD(0xD0), adr);
1234 chip->state = FL_UNLOCKING;
1235
1236 mutex_unlock(&chip->mutex);
1237 msleep(1000);
1238 mutex_lock(&chip->mutex);
1239
1240
1241
1242
1243 timeo = jiffies + (HZ*2);
1244 for (;;) {
1245
1246 status = map_read(map, adr);
1247 if (map_word_andequal(map, status, status_OK, status_OK))
1248 break;
1249
1250
1251 if (time_after(jiffies, timeo)) {
1252 map_write(map, CMD(0x70), adr);
1253 chip->state = FL_STATUS;
1254 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1255 DISABLE_VPP(map);
1256 mutex_unlock(&chip->mutex);
1257 return -EIO;
1258 }
1259
1260
1261 mutex_unlock(&chip->mutex);
1262 cfi_udelay(1);
1263 mutex_lock(&chip->mutex);
1264 }
1265
1266
1267 chip->state = FL_STATUS;
1268 DISABLE_VPP(map);
1269 wake_up(&chip->wq);
1270 mutex_unlock(&chip->mutex);
1271 return 0;
1272}
1273static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1274{
1275 struct map_info *map = mtd->priv;
1276 struct cfi_private *cfi = map->fldrv_priv;
1277 unsigned long adr;
1278 int chipnum, ret = 0;
1279#ifdef DEBUG_LOCK_BITS
1280 int ofs_factor = cfi->interleave * cfi->device_type;
1281#endif
1282
1283 chipnum = ofs >> cfi->chipshift;
1284 adr = ofs - (chipnum << cfi->chipshift);
1285
1286#ifdef DEBUG_LOCK_BITS
1287 {
1288 unsigned long temp_adr = adr;
1289 unsigned long temp_len = len;
1290
1291 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1292 while (temp_len) {
1293 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1294 temp_adr += mtd->erasesize;
1295 temp_len -= mtd->erasesize;
1296 }
1297 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1298 }
1299#endif
1300
1301 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1302
1303#ifdef DEBUG_LOCK_BITS
1304 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1305 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1306 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307#endif
1308
1309 return ret;
1310}
1311
1312static int cfi_staa_suspend(struct mtd_info *mtd)
1313{
1314 struct map_info *map = mtd->priv;
1315 struct cfi_private *cfi = map->fldrv_priv;
1316 int i;
1317 struct flchip *chip;
1318 int ret = 0;
1319
1320 for (i=0; !ret && i<cfi->numchips; i++) {
1321 chip = &cfi->chips[i];
1322
1323 mutex_lock(&chip->mutex);
1324
1325 switch(chip->state) {
1326 case FL_READY:
1327 case FL_STATUS:
1328 case FL_CFI_QUERY:
1329 case FL_JEDEC_QUERY:
1330 chip->oldstate = chip->state;
1331 chip->state = FL_PM_SUSPENDED;
1332
1333
1334
1335
1336 case FL_PM_SUSPENDED:
1337 break;
1338
1339 default:
1340 ret = -EAGAIN;
1341 break;
1342 }
1343 mutex_unlock(&chip->mutex);
1344 }
1345
1346
1347
1348 if (ret) {
1349 for (i--; i >=0; i--) {
1350 chip = &cfi->chips[i];
1351
1352 mutex_lock(&chip->mutex);
1353
1354 if (chip->state == FL_PM_SUSPENDED) {
1355
1356
1357
1358 chip->state = chip->oldstate;
1359 wake_up(&chip->wq);
1360 }
1361 mutex_unlock(&chip->mutex);
1362 }
1363 }
1364
1365 return ret;
1366}
1367
1368static void cfi_staa_resume(struct mtd_info *mtd)
1369{
1370 struct map_info *map = mtd->priv;
1371 struct cfi_private *cfi = map->fldrv_priv;
1372 int i;
1373 struct flchip *chip;
1374
1375 for (i=0; i<cfi->numchips; i++) {
1376
1377 chip = &cfi->chips[i];
1378
1379 mutex_lock(&chip->mutex);
1380
1381
1382 if (chip->state == FL_PM_SUSPENDED) {
1383 map_write(map, CMD(0xFF), 0);
1384 chip->state = FL_READY;
1385 wake_up(&chip->wq);
1386 }
1387
1388 mutex_unlock(&chip->mutex);
1389 }
1390}
1391
1392static void cfi_staa_destroy(struct mtd_info *mtd)
1393{
1394 struct map_info *map = mtd->priv;
1395 struct cfi_private *cfi = map->fldrv_priv;
1396 kfree(cfi->cmdset_priv);
1397 kfree(cfi);
1398}
1399
1400MODULE_LICENSE("GPL");
1401