1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <asm/io.h>
27#include <asm/byteorder.h>
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/cfi.h>
35#include <linux/mtd/mtd.h>
36
37
38static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41 unsigned long count, loff_t to, size_t *retlen);
42static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43static void cfi_staa_sync (struct mtd_info *);
44static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46static int cfi_staa_suspend (struct mtd_info *);
47static void cfi_staa_resume (struct mtd_info *);
48
49static void cfi_staa_destroy(struct mtd_info *);
50
51struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52
53static struct mtd_info *cfi_staa_setup (struct map_info *);
54
55static struct mtd_chip_driver cfi_staa_chipdrv = {
56 .probe = NULL,
57 .destroy = cfi_staa_destroy,
58 .name = "cfi_cmdset_0020",
59 .module = THIS_MODULE
60};
61
62
63
64
65#ifdef DEBUG_CFI_FEATURES
66static void cfi_tell_features(struct cfi_pri_intelext *extp)
67{
68 int i;
69 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79 for (i=9; i<32; i++) {
80 if (extp->FeatureSupport & (1<<i))
81 printk(" - Unknown Bit %X: supported\n", i);
82 }
83
84 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86 for (i=1; i<8; i++) {
87 if (extp->SuspendCmdSupport & (1<<i))
88 printk(" - Unknown Bit %X: supported\n", i);
89 }
90
91 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94 for (i=2; i<16; i++) {
95 if (extp->BlkStatusRegMask & (1<<i))
96 printk(" - Unknown Bit %X Active: yes\n",i);
97 }
98
99 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101 if (extp->VppOptimal)
102 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104}
105#endif
106
107
108
109
110
111
112
113
114struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115{
116 struct cfi_private *cfi = map->fldrv_priv;
117 int i;
118
119 if (cfi->cfi_mode) {
120
121
122
123
124
125 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126 struct cfi_pri_intelext *extp;
127
128 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129 if (!extp)
130 return NULL;
131
132 if (extp->MajorVersion != '1' ||
133 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134 printk(KERN_ERR " Unknown ST Microelectronics"
135 " Extended Query version %c.%c.\n",
136 extp->MajorVersion, extp->MinorVersion);
137 kfree(extp);
138 return NULL;
139 }
140
141
142 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
143 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
144
145#ifdef DEBUG_CFI_FEATURES
146
147 cfi_tell_features(extp);
148#endif
149
150
151 cfi->cmdset_priv = extp;
152 }
153
154 for (i=0; i< cfi->numchips; i++) {
155 cfi->chips[i].word_write_time = 128;
156 cfi->chips[i].buffer_write_time = 128;
157 cfi->chips[i].erase_time = 1024;
158 cfi->chips[i].ref_point_counter = 0;
159 init_waitqueue_head(&(cfi->chips[i].wq));
160 }
161
162 return cfi_staa_setup(map);
163}
164EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165
166static struct mtd_info *cfi_staa_setup(struct map_info *map)
167{
168 struct cfi_private *cfi = map->fldrv_priv;
169 struct mtd_info *mtd;
170 unsigned long offset = 0;
171 int i,j;
172 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173
174 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175
176
177 if (!mtd) {
178 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
179 kfree(cfi->cmdset_priv);
180 return NULL;
181 }
182
183 mtd->priv = map;
184 mtd->type = MTD_NORFLASH;
185 mtd->size = devsize * cfi->numchips;
186
187 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
188 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
189 * mtd->numeraseregions, GFP_KERNEL);
190 if (!mtd->eraseregions) {
191 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
192 kfree(cfi->cmdset_priv);
193 kfree(mtd);
194 return NULL;
195 }
196
197 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
198 unsigned long ernum, ersize;
199 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
200 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
201
202 if (mtd->erasesize < ersize) {
203 mtd->erasesize = ersize;
204 }
205 for (j=0; j<cfi->numchips; j++) {
206 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
207 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
208 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
209 }
210 offset += (ersize * ernum);
211 }
212
213 if (offset != devsize) {
214
215 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
216 kfree(mtd->eraseregions);
217 kfree(cfi->cmdset_priv);
218 kfree(mtd);
219 return NULL;
220 }
221
222 for (i=0; i<mtd->numeraseregions;i++){
223 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
224 i, (unsigned long long)mtd->eraseregions[i].offset,
225 mtd->eraseregions[i].erasesize,
226 mtd->eraseregions[i].numblocks);
227 }
228
229
230 mtd->erase = cfi_staa_erase_varsize;
231 mtd->read = cfi_staa_read;
232 mtd->write = cfi_staa_write_buffers;
233 mtd->writev = cfi_staa_writev;
234 mtd->sync = cfi_staa_sync;
235 mtd->lock = cfi_staa_lock;
236 mtd->unlock = cfi_staa_unlock;
237 mtd->suspend = cfi_staa_suspend;
238 mtd->resume = cfi_staa_resume;
239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240 mtd->writesize = 8;
241 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
242 map->fldrv = &cfi_staa_chipdrv;
243 __module_get(THIS_MODULE);
244 mtd->name = map->name;
245 return mtd;
246}
247
248
249static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
250{
251 map_word status, status_OK;
252 unsigned long timeo;
253 DECLARE_WAITQUEUE(wait, current);
254 int suspended = 0;
255 unsigned long cmd_addr;
256 struct cfi_private *cfi = map->fldrv_priv;
257
258 adr += chip->start;
259
260
261 cmd_addr = adr & ~(map_bankwidth(map)-1);
262
263
264 status_OK = CMD(0x80);
265
266 timeo = jiffies + HZ;
267 retry:
268 mutex_lock(&chip->mutex);
269
270
271
272
273 switch (chip->state) {
274 case FL_ERASING:
275 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
276 goto sleep;
277
278 map_write (map, CMD(0xb0), cmd_addr);
279
280
281
282
283
284 map_write(map, CMD(0x70), cmd_addr);
285 chip->oldstate = FL_ERASING;
286 chip->state = FL_ERASE_SUSPENDING;
287
288 for (;;) {
289 status = map_read(map, cmd_addr);
290 if (map_word_andequal(map, status, status_OK, status_OK))
291 break;
292
293 if (time_after(jiffies, timeo)) {
294
295 map_write(map, CMD(0xd0), cmd_addr);
296
297 map_write(map, CMD(0x70), cmd_addr);
298 chip->state = FL_ERASING;
299 wake_up(&chip->wq);
300 mutex_unlock(&chip->mutex);
301 printk(KERN_ERR "Chip not ready after erase "
302 "suspended: status = 0x%lx\n", status.x[0]);
303 return -EIO;
304 }
305
306 mutex_unlock(&chip->mutex);
307 cfi_udelay(1);
308 mutex_lock(&chip->mutex);
309 }
310
311 suspended = 1;
312 map_write(map, CMD(0xff), cmd_addr);
313 chip->state = FL_READY;
314 break;
315
316#if 0
317 case FL_WRITING:
318
319#endif
320
321 case FL_READY:
322 break;
323
324 case FL_CFI_QUERY:
325 case FL_JEDEC_QUERY:
326 map_write(map, CMD(0x70), cmd_addr);
327 chip->state = FL_STATUS;
328
329 case FL_STATUS:
330 status = map_read(map, cmd_addr);
331 if (map_word_andequal(map, status, status_OK, status_OK)) {
332 map_write(map, CMD(0xff), cmd_addr);
333 chip->state = FL_READY;
334 break;
335 }
336
337
338 if (time_after(jiffies, timeo)) {
339 mutex_unlock(&chip->mutex);
340 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
341 return -EIO;
342 }
343
344
345 mutex_unlock(&chip->mutex);
346 cfi_udelay(1);
347 goto retry;
348
349 default:
350 sleep:
351
352
353 set_current_state(TASK_UNINTERRUPTIBLE);
354 add_wait_queue(&chip->wq, &wait);
355 mutex_unlock(&chip->mutex);
356 schedule();
357 remove_wait_queue(&chip->wq, &wait);
358 timeo = jiffies + HZ;
359 goto retry;
360 }
361
362 map_copy_from(map, buf, adr, len);
363
364 if (suspended) {
365 chip->state = chip->oldstate;
366
367
368
369
370
371
372
373
374
375 map_write(map, CMD(0xd0), cmd_addr);
376 map_write(map, CMD(0x70), cmd_addr);
377 }
378
379 wake_up(&chip->wq);
380 mutex_unlock(&chip->mutex);
381 return 0;
382}
383
384static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
385{
386 struct map_info *map = mtd->priv;
387 struct cfi_private *cfi = map->fldrv_priv;
388 unsigned long ofs;
389 int chipnum;
390 int ret = 0;
391
392
393 chipnum = (from >> cfi->chipshift);
394 ofs = from - (chipnum << cfi->chipshift);
395
396 *retlen = 0;
397
398 while (len) {
399 unsigned long thislen;
400
401 if (chipnum >= cfi->numchips)
402 break;
403
404 if ((len + ofs -1) >> cfi->chipshift)
405 thislen = (1<<cfi->chipshift) - ofs;
406 else
407 thislen = len;
408
409 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
410 if (ret)
411 break;
412
413 *retlen += thislen;
414 len -= thislen;
415 buf += thislen;
416
417 ofs = 0;
418 chipnum++;
419 }
420 return ret;
421}
422
423static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
424 unsigned long adr, const u_char *buf, int len)
425{
426 struct cfi_private *cfi = map->fldrv_priv;
427 map_word status, status_OK;
428 unsigned long cmd_adr, timeo;
429 DECLARE_WAITQUEUE(wait, current);
430 int wbufsize, z;
431
432
433 if (adr & (map_bankwidth(map)-1))
434 return -EINVAL;
435
436 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
437 adr += chip->start;
438 cmd_adr = adr & ~(wbufsize-1);
439
440
441 status_OK = CMD(0x80);
442
443 timeo = jiffies + HZ;
444 retry:
445
446#ifdef DEBUG_CFI_FEATURES
447 printk("%s: chip->state[%d]\n", __func__, chip->state);
448#endif
449 mutex_lock(&chip->mutex);
450
451
452
453
454
455
456 switch (chip->state) {
457 case FL_READY:
458 break;
459
460 case FL_CFI_QUERY:
461 case FL_JEDEC_QUERY:
462 map_write(map, CMD(0x70), cmd_adr);
463 chip->state = FL_STATUS;
464#ifdef DEBUG_CFI_FEATURES
465 printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
466#endif
467
468 case FL_STATUS:
469 status = map_read(map, cmd_adr);
470 if (map_word_andequal(map, status, status_OK, status_OK))
471 break;
472
473 if (time_after(jiffies, timeo)) {
474 mutex_unlock(&chip->mutex);
475 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
476 status.x[0], map_read(map, cmd_adr).x[0]);
477 return -EIO;
478 }
479
480
481 mutex_unlock(&chip->mutex);
482 cfi_udelay(1);
483 goto retry;
484
485 default:
486
487
488 set_current_state(TASK_UNINTERRUPTIBLE);
489 add_wait_queue(&chip->wq, &wait);
490 mutex_unlock(&chip->mutex);
491 schedule();
492 remove_wait_queue(&chip->wq, &wait);
493 timeo = jiffies + HZ;
494 goto retry;
495 }
496
497 ENABLE_VPP(map);
498 map_write(map, CMD(0xe8), cmd_adr);
499 chip->state = FL_WRITING_TO_BUFFER;
500
501 z = 0;
502 for (;;) {
503 status = map_read(map, cmd_adr);
504 if (map_word_andequal(map, status, status_OK, status_OK))
505 break;
506
507 mutex_unlock(&chip->mutex);
508 cfi_udelay(1);
509 mutex_lock(&chip->mutex);
510
511 if (++z > 100) {
512
513 DISABLE_VPP(map);
514 map_write(map, CMD(0x70), cmd_adr);
515 chip->state = FL_STATUS;
516 mutex_unlock(&chip->mutex);
517 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
518 return -EIO;
519 }
520 }
521
522
523 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
524
525
526 for (z = 0; z < len;
527 z += map_bankwidth(map), buf += map_bankwidth(map)) {
528 map_word d;
529 d = map_word_load(map, buf);
530 map_write(map, d, adr+z);
531 }
532
533 map_write(map, CMD(0xd0), cmd_adr);
534 chip->state = FL_WRITING;
535
536 mutex_unlock(&chip->mutex);
537 cfi_udelay(chip->buffer_write_time);
538 mutex_lock(&chip->mutex);
539
540 timeo = jiffies + (HZ/2);
541 z = 0;
542 for (;;) {
543 if (chip->state != FL_WRITING) {
544
545 set_current_state(TASK_UNINTERRUPTIBLE);
546 add_wait_queue(&chip->wq, &wait);
547 mutex_unlock(&chip->mutex);
548 schedule();
549 remove_wait_queue(&chip->wq, &wait);
550 timeo = jiffies + (HZ / 2);
551 mutex_lock(&chip->mutex);
552 continue;
553 }
554
555 status = map_read(map, cmd_adr);
556 if (map_word_andequal(map, status, status_OK, status_OK))
557 break;
558
559
560 if (time_after(jiffies, timeo)) {
561
562 map_write(map, CMD(0x50), cmd_adr);
563
564 map_write(map, CMD(0x70), adr);
565 chip->state = FL_STATUS;
566 DISABLE_VPP(map);
567 mutex_unlock(&chip->mutex);
568 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
569 return -EIO;
570 }
571
572
573 mutex_unlock(&chip->mutex);
574 cfi_udelay(1);
575 z++;
576 mutex_lock(&chip->mutex);
577 }
578 if (!z) {
579 chip->buffer_write_time--;
580 if (!chip->buffer_write_time)
581 chip->buffer_write_time++;
582 }
583 if (z > 1)
584 chip->buffer_write_time++;
585
586
587 DISABLE_VPP(map);
588 chip->state = FL_STATUS;
589
590
591 if (map_word_bitsset(map, status, CMD(0x3a))) {
592#ifdef DEBUG_CFI_FEATURES
593 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
594#endif
595
596 map_write(map, CMD(0x50), cmd_adr);
597
598 map_write(map, CMD(0x70), adr);
599 wake_up(&chip->wq);
600 mutex_unlock(&chip->mutex);
601 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
602 }
603 wake_up(&chip->wq);
604 mutex_unlock(&chip->mutex);
605
606 return 0;
607}
608
609static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
610 size_t len, size_t *retlen, const u_char *buf)
611{
612 struct map_info *map = mtd->priv;
613 struct cfi_private *cfi = map->fldrv_priv;
614 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
615 int ret = 0;
616 int chipnum;
617 unsigned long ofs;
618
619 *retlen = 0;
620 if (!len)
621 return 0;
622
623 chipnum = to >> cfi->chipshift;
624 ofs = to - (chipnum << cfi->chipshift);
625
626#ifdef DEBUG_CFI_FEATURES
627 printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
628 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
629 printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
630#endif
631
632
633 while (len > 0) {
634
635 int size = wbufsize - (ofs & (wbufsize-1));
636
637 if (size > len)
638 size = len;
639
640 ret = do_write_buffer(map, &cfi->chips[chipnum],
641 ofs, buf, size);
642 if (ret)
643 return ret;
644
645 ofs += size;
646 buf += size;
647 (*retlen) += size;
648 len -= size;
649
650 if (ofs >> cfi->chipshift) {
651 chipnum ++;
652 ofs = 0;
653 if (chipnum == cfi->numchips)
654 return 0;
655 }
656 }
657
658 return 0;
659}
660
661
662
663
664
665
666#define ECCBUF_SIZE (mtd->writesize)
667#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
668#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
669static int
670cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
671 unsigned long count, loff_t to, size_t *retlen)
672{
673 unsigned long i;
674 size_t totlen = 0, thislen;
675 int ret = 0;
676 size_t buflen = 0;
677 static char *buffer;
678
679 if (!ECCBUF_SIZE) {
680
681
682
683 return -EIO;
684 }
685 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
686 if (!buffer)
687 return -ENOMEM;
688
689 for (i=0; i<count; i++) {
690 size_t elem_len = vecs[i].iov_len;
691 void *elem_base = vecs[i].iov_base;
692 if (!elem_len)
693 continue;
694 if (buflen) {
695 if (buflen + elem_len < ECCBUF_SIZE) {
696 memcpy(buffer+buflen, elem_base, elem_len);
697 buflen += elem_len;
698 continue;
699 }
700 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
701 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
702 totlen += thislen;
703 if (ret || thislen != ECCBUF_SIZE)
704 goto write_error;
705 elem_len -= thislen-buflen;
706 elem_base += thislen-buflen;
707 to += ECCBUF_SIZE;
708 }
709 if (ECCBUF_DIV(elem_len)) {
710 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
711 totlen += thislen;
712 if (ret || thislen != ECCBUF_DIV(elem_len))
713 goto write_error;
714 to += thislen;
715 }
716 buflen = ECCBUF_MOD(elem_len);
717 if (buflen) {
718 memset(buffer, 0xff, ECCBUF_SIZE);
719 memcpy(buffer, elem_base + thislen, buflen);
720 }
721 }
722 if (buflen) {
723
724 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
725 totlen += thislen;
726 if (ret || thislen != ECCBUF_SIZE)
727 goto write_error;
728 }
729write_error:
730 if (retlen)
731 *retlen = totlen;
732 kfree(buffer);
733 return ret;
734}
735
736
737static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
738{
739 struct cfi_private *cfi = map->fldrv_priv;
740 map_word status, status_OK;
741 unsigned long timeo;
742 int retries = 3;
743 DECLARE_WAITQUEUE(wait, current);
744 int ret = 0;
745
746 adr += chip->start;
747
748
749 status_OK = CMD(0x80);
750
751 timeo = jiffies + HZ;
752retry:
753 mutex_lock(&chip->mutex);
754
755
756 switch (chip->state) {
757 case FL_CFI_QUERY:
758 case FL_JEDEC_QUERY:
759 case FL_READY:
760 map_write(map, CMD(0x70), adr);
761 chip->state = FL_STATUS;
762
763 case FL_STATUS:
764 status = map_read(map, adr);
765 if (map_word_andequal(map, status, status_OK, status_OK))
766 break;
767
768
769 if (time_after(jiffies, timeo)) {
770 mutex_unlock(&chip->mutex);
771 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
772 return -EIO;
773 }
774
775
776 mutex_unlock(&chip->mutex);
777 cfi_udelay(1);
778 goto retry;
779
780 default:
781
782
783 set_current_state(TASK_UNINTERRUPTIBLE);
784 add_wait_queue(&chip->wq, &wait);
785 mutex_unlock(&chip->mutex);
786 schedule();
787 remove_wait_queue(&chip->wq, &wait);
788 timeo = jiffies + HZ;
789 goto retry;
790 }
791
792 ENABLE_VPP(map);
793
794 map_write(map, CMD(0x50), adr);
795
796
797 map_write(map, CMD(0x20), adr);
798 map_write(map, CMD(0xD0), adr);
799 chip->state = FL_ERASING;
800
801 mutex_unlock(&chip->mutex);
802 msleep(1000);
803 mutex_lock(&chip->mutex);
804
805
806
807
808 timeo = jiffies + (HZ*20);
809 for (;;) {
810 if (chip->state != FL_ERASING) {
811
812 set_current_state(TASK_UNINTERRUPTIBLE);
813 add_wait_queue(&chip->wq, &wait);
814 mutex_unlock(&chip->mutex);
815 schedule();
816 remove_wait_queue(&chip->wq, &wait);
817 timeo = jiffies + (HZ*20);
818 mutex_lock(&chip->mutex);
819 continue;
820 }
821
822 status = map_read(map, adr);
823 if (map_word_andequal(map, status, status_OK, status_OK))
824 break;
825
826
827 if (time_after(jiffies, timeo)) {
828 map_write(map, CMD(0x70), adr);
829 chip->state = FL_STATUS;
830 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
831 DISABLE_VPP(map);
832 mutex_unlock(&chip->mutex);
833 return -EIO;
834 }
835
836
837 mutex_unlock(&chip->mutex);
838 cfi_udelay(1);
839 mutex_lock(&chip->mutex);
840 }
841
842 DISABLE_VPP(map);
843 ret = 0;
844
845
846 map_write(map, CMD(0x70), adr);
847 chip->state = FL_STATUS;
848 status = map_read(map, adr);
849
850
851 if (map_word_bitsset(map, status, CMD(0x3a))) {
852 unsigned char chipstatus = status.x[0];
853 if (!map_word_equal(map, status, CMD(chipstatus))) {
854 int i, w;
855 for (w=0; w<map_words(map); w++) {
856 for (i = 0; i<cfi_interleave(cfi); i++) {
857 chipstatus |= status.x[w] >> (cfi->device_type * 8);
858 }
859 }
860 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
861 status.x[0], chipstatus);
862 }
863
864 map_write(map, CMD(0x50), adr);
865 map_write(map, CMD(0x70), adr);
866
867 if ((chipstatus & 0x30) == 0x30) {
868 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
869 ret = -EIO;
870 } else if (chipstatus & 0x02) {
871
872 ret = -EROFS;
873 } else if (chipstatus & 0x8) {
874
875 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
876 ret = -EIO;
877 } else if (chipstatus & 0x20) {
878 if (retries--) {
879 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
880 timeo = jiffies + HZ;
881 chip->state = FL_STATUS;
882 mutex_unlock(&chip->mutex);
883 goto retry;
884 }
885 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
886 ret = -EIO;
887 }
888 }
889
890 wake_up(&chip->wq);
891 mutex_unlock(&chip->mutex);
892 return ret;
893}
894
895static int cfi_staa_erase_varsize(struct mtd_info *mtd,
896 struct erase_info *instr)
897{ struct map_info *map = mtd->priv;
898 struct cfi_private *cfi = map->fldrv_priv;
899 unsigned long adr, len;
900 int chipnum, ret = 0;
901 int i, first;
902 struct mtd_erase_region_info *regions = mtd->eraseregions;
903
904 if (instr->addr > mtd->size)
905 return -EINVAL;
906
907 if ((instr->len + instr->addr) > mtd->size)
908 return -EINVAL;
909
910
911
912
913
914 i = 0;
915
916
917
918
919
920
921
922 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
923 i++;
924 i--;
925
926
927
928
929
930
931
932 if (instr->addr & (regions[i].erasesize-1))
933 return -EINVAL;
934
935
936 first = i;
937
938
939
940
941
942 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
943 i++;
944
945
946
947
948 i--;
949
950 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
951 return -EINVAL;
952
953 chipnum = instr->addr >> cfi->chipshift;
954 adr = instr->addr - (chipnum << cfi->chipshift);
955 len = instr->len;
956
957 i=first;
958
959 while(len) {
960 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
961
962 if (ret)
963 return ret;
964
965 adr += regions[i].erasesize;
966 len -= regions[i].erasesize;
967
968 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
969 i++;
970
971 if (adr >> cfi->chipshift) {
972 adr = 0;
973 chipnum++;
974
975 if (chipnum >= cfi->numchips)
976 break;
977 }
978 }
979
980 instr->state = MTD_ERASE_DONE;
981 mtd_erase_callback(instr);
982
983 return 0;
984}
985
986static void cfi_staa_sync (struct mtd_info *mtd)
987{
988 struct map_info *map = mtd->priv;
989 struct cfi_private *cfi = map->fldrv_priv;
990 int i;
991 struct flchip *chip;
992 int ret = 0;
993 DECLARE_WAITQUEUE(wait, current);
994
995 for (i=0; !ret && i<cfi->numchips; i++) {
996 chip = &cfi->chips[i];
997
998 retry:
999 mutex_lock(&chip->mutex);
1000
1001 switch(chip->state) {
1002 case FL_READY:
1003 case FL_STATUS:
1004 case FL_CFI_QUERY:
1005 case FL_JEDEC_QUERY:
1006 chip->oldstate = chip->state;
1007 chip->state = FL_SYNCING;
1008
1009
1010
1011
1012 case FL_SYNCING:
1013 mutex_unlock(&chip->mutex);
1014 break;
1015
1016 default:
1017
1018 set_current_state(TASK_UNINTERRUPTIBLE);
1019 add_wait_queue(&chip->wq, &wait);
1020
1021 mutex_unlock(&chip->mutex);
1022 schedule();
1023 remove_wait_queue(&chip->wq, &wait);
1024
1025 goto retry;
1026 }
1027 }
1028
1029
1030
1031 for (i--; i >=0; i--) {
1032 chip = &cfi->chips[i];
1033
1034 mutex_lock(&chip->mutex);
1035
1036 if (chip->state == FL_SYNCING) {
1037 chip->state = chip->oldstate;
1038 wake_up(&chip->wq);
1039 }
1040 mutex_unlock(&chip->mutex);
1041 }
1042}
1043
1044static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1045{
1046 struct cfi_private *cfi = map->fldrv_priv;
1047 map_word status, status_OK;
1048 unsigned long timeo = jiffies + HZ;
1049 DECLARE_WAITQUEUE(wait, current);
1050
1051 adr += chip->start;
1052
1053
1054 status_OK = CMD(0x80);
1055
1056 timeo = jiffies + HZ;
1057retry:
1058 mutex_lock(&chip->mutex);
1059
1060
1061 switch (chip->state) {
1062 case FL_CFI_QUERY:
1063 case FL_JEDEC_QUERY:
1064 case FL_READY:
1065 map_write(map, CMD(0x70), adr);
1066 chip->state = FL_STATUS;
1067
1068 case FL_STATUS:
1069 status = map_read(map, adr);
1070 if (map_word_andequal(map, status, status_OK, status_OK))
1071 break;
1072
1073
1074 if (time_after(jiffies, timeo)) {
1075 mutex_unlock(&chip->mutex);
1076 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1077 return -EIO;
1078 }
1079
1080
1081 mutex_unlock(&chip->mutex);
1082 cfi_udelay(1);
1083 goto retry;
1084
1085 default:
1086
1087
1088 set_current_state(TASK_UNINTERRUPTIBLE);
1089 add_wait_queue(&chip->wq, &wait);
1090 mutex_unlock(&chip->mutex);
1091 schedule();
1092 remove_wait_queue(&chip->wq, &wait);
1093 timeo = jiffies + HZ;
1094 goto retry;
1095 }
1096
1097 ENABLE_VPP(map);
1098 map_write(map, CMD(0x60), adr);
1099 map_write(map, CMD(0x01), adr);
1100 chip->state = FL_LOCKING;
1101
1102 mutex_unlock(&chip->mutex);
1103 msleep(1000);
1104 mutex_lock(&chip->mutex);
1105
1106
1107
1108
1109 timeo = jiffies + (HZ*2);
1110 for (;;) {
1111
1112 status = map_read(map, adr);
1113 if (map_word_andequal(map, status, status_OK, status_OK))
1114 break;
1115
1116
1117 if (time_after(jiffies, timeo)) {
1118 map_write(map, CMD(0x70), adr);
1119 chip->state = FL_STATUS;
1120 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1121 DISABLE_VPP(map);
1122 mutex_unlock(&chip->mutex);
1123 return -EIO;
1124 }
1125
1126
1127 mutex_unlock(&chip->mutex);
1128 cfi_udelay(1);
1129 mutex_lock(&chip->mutex);
1130 }
1131
1132
1133 chip->state = FL_STATUS;
1134 DISABLE_VPP(map);
1135 wake_up(&chip->wq);
1136 mutex_unlock(&chip->mutex);
1137 return 0;
1138}
1139static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1140{
1141 struct map_info *map = mtd->priv;
1142 struct cfi_private *cfi = map->fldrv_priv;
1143 unsigned long adr;
1144 int chipnum, ret = 0;
1145#ifdef DEBUG_LOCK_BITS
1146 int ofs_factor = cfi->interleave * cfi->device_type;
1147#endif
1148
1149 if (ofs & (mtd->erasesize - 1))
1150 return -EINVAL;
1151
1152 if (len & (mtd->erasesize -1))
1153 return -EINVAL;
1154
1155 if ((len + ofs) > mtd->size)
1156 return -EINVAL;
1157
1158 chipnum = ofs >> cfi->chipshift;
1159 adr = ofs - (chipnum << cfi->chipshift);
1160
1161 while(len) {
1162
1163#ifdef DEBUG_LOCK_BITS
1164 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1166 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1167#endif
1168
1169 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1170
1171#ifdef DEBUG_LOCK_BITS
1172 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1173 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1174 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1175#endif
1176
1177 if (ret)
1178 return ret;
1179
1180 adr += mtd->erasesize;
1181 len -= mtd->erasesize;
1182
1183 if (adr >> cfi->chipshift) {
1184 adr = 0;
1185 chipnum++;
1186
1187 if (chipnum >= cfi->numchips)
1188 break;
1189 }
1190 }
1191 return 0;
1192}
1193static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1194{
1195 struct cfi_private *cfi = map->fldrv_priv;
1196 map_word status, status_OK;
1197 unsigned long timeo = jiffies + HZ;
1198 DECLARE_WAITQUEUE(wait, current);
1199
1200 adr += chip->start;
1201
1202
1203 status_OK = CMD(0x80);
1204
1205 timeo = jiffies + HZ;
1206retry:
1207 mutex_lock(&chip->mutex);
1208
1209
1210 switch (chip->state) {
1211 case FL_CFI_QUERY:
1212 case FL_JEDEC_QUERY:
1213 case FL_READY:
1214 map_write(map, CMD(0x70), adr);
1215 chip->state = FL_STATUS;
1216
1217 case FL_STATUS:
1218 status = map_read(map, adr);
1219 if (map_word_andequal(map, status, status_OK, status_OK))
1220 break;
1221
1222
1223 if (time_after(jiffies, timeo)) {
1224 mutex_unlock(&chip->mutex);
1225 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1226 return -EIO;
1227 }
1228
1229
1230 mutex_unlock(&chip->mutex);
1231 cfi_udelay(1);
1232 goto retry;
1233
1234 default:
1235
1236
1237 set_current_state(TASK_UNINTERRUPTIBLE);
1238 add_wait_queue(&chip->wq, &wait);
1239 mutex_unlock(&chip->mutex);
1240 schedule();
1241 remove_wait_queue(&chip->wq, &wait);
1242 timeo = jiffies + HZ;
1243 goto retry;
1244 }
1245
1246 ENABLE_VPP(map);
1247 map_write(map, CMD(0x60), adr);
1248 map_write(map, CMD(0xD0), adr);
1249 chip->state = FL_UNLOCKING;
1250
1251 mutex_unlock(&chip->mutex);
1252 msleep(1000);
1253 mutex_lock(&chip->mutex);
1254
1255
1256
1257
1258 timeo = jiffies + (HZ*2);
1259 for (;;) {
1260
1261 status = map_read(map, adr);
1262 if (map_word_andequal(map, status, status_OK, status_OK))
1263 break;
1264
1265
1266 if (time_after(jiffies, timeo)) {
1267 map_write(map, CMD(0x70), adr);
1268 chip->state = FL_STATUS;
1269 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1270 DISABLE_VPP(map);
1271 mutex_unlock(&chip->mutex);
1272 return -EIO;
1273 }
1274
1275
1276 mutex_unlock(&chip->mutex);
1277 cfi_udelay(1);
1278 mutex_lock(&chip->mutex);
1279 }
1280
1281
1282 chip->state = FL_STATUS;
1283 DISABLE_VPP(map);
1284 wake_up(&chip->wq);
1285 mutex_unlock(&chip->mutex);
1286 return 0;
1287}
1288static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1289{
1290 struct map_info *map = mtd->priv;
1291 struct cfi_private *cfi = map->fldrv_priv;
1292 unsigned long adr;
1293 int chipnum, ret = 0;
1294#ifdef DEBUG_LOCK_BITS
1295 int ofs_factor = cfi->interleave * cfi->device_type;
1296#endif
1297
1298 chipnum = ofs >> cfi->chipshift;
1299 adr = ofs - (chipnum << cfi->chipshift);
1300
1301#ifdef DEBUG_LOCK_BITS
1302 {
1303 unsigned long temp_adr = adr;
1304 unsigned long temp_len = len;
1305
1306 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307 while (temp_len) {
1308 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1309 temp_adr += mtd->erasesize;
1310 temp_len -= mtd->erasesize;
1311 }
1312 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1313 }
1314#endif
1315
1316 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1317
1318#ifdef DEBUG_LOCK_BITS
1319 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1320 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1321 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1322#endif
1323
1324 return ret;
1325}
1326
1327static int cfi_staa_suspend(struct mtd_info *mtd)
1328{
1329 struct map_info *map = mtd->priv;
1330 struct cfi_private *cfi = map->fldrv_priv;
1331 int i;
1332 struct flchip *chip;
1333 int ret = 0;
1334
1335 for (i=0; !ret && i<cfi->numchips; i++) {
1336 chip = &cfi->chips[i];
1337
1338 mutex_lock(&chip->mutex);
1339
1340 switch(chip->state) {
1341 case FL_READY:
1342 case FL_STATUS:
1343 case FL_CFI_QUERY:
1344 case FL_JEDEC_QUERY:
1345 chip->oldstate = chip->state;
1346 chip->state = FL_PM_SUSPENDED;
1347
1348
1349
1350
1351 case FL_PM_SUSPENDED:
1352 break;
1353
1354 default:
1355 ret = -EAGAIN;
1356 break;
1357 }
1358 mutex_unlock(&chip->mutex);
1359 }
1360
1361
1362
1363 if (ret) {
1364 for (i--; i >=0; i--) {
1365 chip = &cfi->chips[i];
1366
1367 mutex_lock(&chip->mutex);
1368
1369 if (chip->state == FL_PM_SUSPENDED) {
1370
1371
1372
1373 chip->state = chip->oldstate;
1374 wake_up(&chip->wq);
1375 }
1376 mutex_unlock(&chip->mutex);
1377 }
1378 }
1379
1380 return ret;
1381}
1382
1383static void cfi_staa_resume(struct mtd_info *mtd)
1384{
1385 struct map_info *map = mtd->priv;
1386 struct cfi_private *cfi = map->fldrv_priv;
1387 int i;
1388 struct flchip *chip;
1389
1390 for (i=0; i<cfi->numchips; i++) {
1391
1392 chip = &cfi->chips[i];
1393
1394 mutex_lock(&chip->mutex);
1395
1396
1397 if (chip->state == FL_PM_SUSPENDED) {
1398 map_write(map, CMD(0xFF), 0);
1399 chip->state = FL_READY;
1400 wake_up(&chip->wq);
1401 }
1402
1403 mutex_unlock(&chip->mutex);
1404 }
1405}
1406
1407static void cfi_staa_destroy(struct mtd_info *mtd)
1408{
1409 struct map_info *map = mtd->priv;
1410 struct cfi_private *cfi = map->fldrv_priv;
1411 kfree(cfi->cmdset_priv);
1412 kfree(cfi);
1413}
1414
1415MODULE_LICENSE("GPL");
1416