1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/reboot.h>
35#include <linux/of.h>
36#include <linux/of_platform.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
40#include <linux/mtd/xip.h>
41
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_RETRIES 3
46
47#define SST49LF004B 0x0060
48#define SST49LF040B 0x0050
49#define SST49LF008A 0x005a
50#define AT49BV6416 0x00d6
51
52
53
54
55
56#define CFI_SR_DRB BIT(7)
57#define CFI_SR_ESB BIT(5)
58#define CFI_SR_PSB BIT(4)
59#define CFI_SR_WBASB BIT(3)
60#define CFI_SR_SLSB BIT(1)
61
62static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64#if !FORCE_WORD_WRITE
65static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66#endif
67static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
68static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
69static void cfi_amdstd_sync (struct mtd_info *);
70static int cfi_amdstd_suspend (struct mtd_info *);
71static void cfi_amdstd_resume (struct mtd_info *);
72static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
73static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
74 size_t *, struct otp_info *);
75static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
76 size_t *, struct otp_info *);
77static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
78static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
79 size_t *, u_char *);
80static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
81 size_t *, u_char *);
82static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
83 size_t *, u_char *);
84static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
85
86static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
87 size_t *retlen, const u_char *buf);
88
89static void cfi_amdstd_destroy(struct mtd_info *);
90
91struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
92static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
93
94static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
95static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
96#include "fwh_lock.h"
97
98static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
99static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
100
101static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
102static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
104
105static struct mtd_chip_driver cfi_amdstd_chipdrv = {
106 .probe = NULL,
107 .destroy = cfi_amdstd_destroy,
108 .name = "cfi_cmdset_0002",
109 .module = THIS_MODULE
110};
111
112
113
114
115
116
117static int cfi_use_status_reg(struct cfi_private *cfi)
118{
119 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
120 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
121
122 return extp->MinorVersion >= '5' &&
123 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
124}
125
126static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
127 unsigned long adr)
128{
129 struct cfi_private *cfi = map->fldrv_priv;
130 map_word status;
131
132 if (!cfi_use_status_reg(cfi))
133 return 0;
134
135 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
136 cfi->device_type, NULL);
137 status = map_read(map, adr);
138
139
140 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
141 return 0;
142
143 if (map_word_bitsset(map, status, CMD(0x3a))) {
144 unsigned long chipstatus = MERGESTATUS(status);
145
146 if (chipstatus & CFI_SR_ESB)
147 pr_err("%s erase operation failed, status %lx\n",
148 map->name, chipstatus);
149 if (chipstatus & CFI_SR_PSB)
150 pr_err("%s program operation failed, status %lx\n",
151 map->name, chipstatus);
152 if (chipstatus & CFI_SR_WBASB)
153 pr_err("%s buffer program command aborted, status %lx\n",
154 map->name, chipstatus);
155 if (chipstatus & CFI_SR_SLSB)
156 pr_err("%s sector write protected, status %lx\n",
157 map->name, chipstatus);
158
159
160 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
161 return 1;
162 }
163 return 0;
164}
165
166
167
168
169#ifdef DEBUG_CFI_FEATURES
170static void cfi_tell_features(struct cfi_pri_amdstd *extp)
171{
172 const char* erase_suspend[3] = {
173 "Not supported", "Read only", "Read/write"
174 };
175 const char* top_bottom[6] = {
176 "No WP", "8x8KiB sectors at top & bottom, no WP",
177 "Bottom boot", "Top boot",
178 "Uniform, Bottom WP", "Uniform, Top WP"
179 };
180
181 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
182 printk(" Address sensitive unlock: %s\n",
183 (extp->SiliconRevision & 1) ? "Not required" : "Required");
184
185 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
186 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
187 else
188 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
189
190 if (extp->BlkProt == 0)
191 printk(" Block protection: Not supported\n");
192 else
193 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
194
195
196 printk(" Temporary block unprotect: %s\n",
197 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
198 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
199 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
200 printk(" Burst mode: %s\n",
201 extp->BurstMode ? "Supported" : "Not supported");
202 if (extp->PageMode == 0)
203 printk(" Page mode: Not supported\n");
204 else
205 printk(" Page mode: %d word page\n", extp->PageMode << 2);
206
207 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
208 extp->VppMin >> 4, extp->VppMin & 0xf);
209 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
210 extp->VppMax >> 4, extp->VppMax & 0xf);
211
212 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
213 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
214 else
215 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
216}
217#endif
218
219#ifdef AMD_BOOTLOC_BUG
220
221static void fixup_amd_bootblock(struct mtd_info *mtd)
222{
223 struct map_info *map = mtd->priv;
224 struct cfi_private *cfi = map->fldrv_priv;
225 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
226 __u8 major = extp->MajorVersion;
227 __u8 minor = extp->MinorVersion;
228
229 if (((major << 8) | minor) < 0x3131) {
230
231
232 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
233 map->name, cfi->mfr, cfi->id);
234
235
236
237
238
239
240 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
241
242
243
244
245
246
247
248
249
250
251 (cfi->mfr == CFI_MFR_MACRONIX)) {
252 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
253 " detected\n", map->name);
254 extp->TopBottom = 2;
255 } else
256 if (cfi->id & 0x80) {
257 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
258 extp->TopBottom = 3;
259 } else {
260 extp->TopBottom = 2;
261 }
262
263 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
264 " deduced %s from Device ID\n", map->name, major, minor,
265 extp->TopBottom == 2 ? "bottom" : "top");
266 }
267}
268#endif
269
270#if !FORCE_WORD_WRITE
271static void fixup_use_write_buffers(struct mtd_info *mtd)
272{
273 struct map_info *map = mtd->priv;
274 struct cfi_private *cfi = map->fldrv_priv;
275 if (cfi->cfiq->BufWriteTimeoutTyp) {
276 pr_debug("Using buffer write method\n");
277 mtd->_write = cfi_amdstd_write_buffers;
278 }
279}
280#endif
281
282
283static void fixup_convert_atmel_pri(struct mtd_info *mtd)
284{
285 struct map_info *map = mtd->priv;
286 struct cfi_private *cfi = map->fldrv_priv;
287 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
288 struct cfi_pri_atmel atmel_pri;
289
290 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
291 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
292
293 if (atmel_pri.Features & 0x02)
294 extp->EraseSuspend = 2;
295
296
297 if (cfi->id == AT49BV6416) {
298 if (atmel_pri.BottomBoot)
299 extp->TopBottom = 3;
300 else
301 extp->TopBottom = 2;
302 } else {
303 if (atmel_pri.BottomBoot)
304 extp->TopBottom = 2;
305 else
306 extp->TopBottom = 3;
307 }
308
309
310 cfi->cfiq->BufWriteTimeoutTyp = 0;
311 cfi->cfiq->BufWriteTimeoutMax = 0;
312}
313
314static void fixup_use_secsi(struct mtd_info *mtd)
315{
316
317 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
318 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
319}
320
321static void fixup_use_erase_chip(struct mtd_info *mtd)
322{
323 struct map_info *map = mtd->priv;
324 struct cfi_private *cfi = map->fldrv_priv;
325 if ((cfi->cfiq->NumEraseRegions == 1) &&
326 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
327 mtd->_erase = cfi_amdstd_erase_chip;
328 }
329
330}
331
332
333
334
335
336static void fixup_use_atmel_lock(struct mtd_info *mtd)
337{
338 mtd->_lock = cfi_atmel_lock;
339 mtd->_unlock = cfi_atmel_unlock;
340 mtd->flags |= MTD_POWERUP_LOCK;
341}
342
343static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
344{
345 struct map_info *map = mtd->priv;
346 struct cfi_private *cfi = map->fldrv_priv;
347
348
349
350
351
352
353
354 cfi->cfiq->NumEraseRegions = 1;
355}
356
357static void fixup_sst39vf(struct mtd_info *mtd)
358{
359 struct map_info *map = mtd->priv;
360 struct cfi_private *cfi = map->fldrv_priv;
361
362 fixup_old_sst_eraseregion(mtd);
363
364 cfi->addr_unlock1 = 0x5555;
365 cfi->addr_unlock2 = 0x2AAA;
366}
367
368static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
369{
370 struct map_info *map = mtd->priv;
371 struct cfi_private *cfi = map->fldrv_priv;
372
373 fixup_old_sst_eraseregion(mtd);
374
375 cfi->addr_unlock1 = 0x555;
376 cfi->addr_unlock2 = 0x2AA;
377
378 cfi->sector_erase_cmd = CMD(0x50);
379}
380
381static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
382{
383 struct map_info *map = mtd->priv;
384 struct cfi_private *cfi = map->fldrv_priv;
385
386 fixup_sst39vf_rev_b(mtd);
387
388
389
390
391
392 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
393 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
394 mtd->name);
395}
396
397static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
398{
399 struct map_info *map = mtd->priv;
400 struct cfi_private *cfi = map->fldrv_priv;
401
402 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
403 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
404 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
405 mtd->name);
406 }
407}
408
409static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
410{
411 struct map_info *map = mtd->priv;
412 struct cfi_private *cfi = map->fldrv_priv;
413
414 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
415 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
416 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
417 mtd->name);
418 }
419}
420
421static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
422{
423 struct map_info *map = mtd->priv;
424 struct cfi_private *cfi = map->fldrv_priv;
425
426
427
428
429
430 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
431 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
432 mtd->name);
433}
434
435
436static struct cfi_fixup cfi_nopri_fixup_table[] = {
437 { CFI_MFR_SST, 0x234a, fixup_sst39vf },
438 { CFI_MFR_SST, 0x234b, fixup_sst39vf },
439 { CFI_MFR_SST, 0x235a, fixup_sst39vf },
440 { CFI_MFR_SST, 0x235b, fixup_sst39vf },
441 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b },
442 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b },
443 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b },
444 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b },
445 { 0, 0, NULL }
446};
447
448static struct cfi_fixup cfi_fixup_table[] = {
449 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
450#ifdef AMD_BOOTLOC_BUG
451 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
452 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
453 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
454#endif
455 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
456 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
457 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
458 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
459 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
460 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
461 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
462 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
463 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
464 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
465 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
466 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize },
467 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize },
468 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize },
469 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize },
470#if !FORCE_WORD_WRITE
471 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
472#endif
473 { 0, 0, NULL }
474};
475static struct cfi_fixup jedec_fixup_table[] = {
476 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
477 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
478 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
479 { 0, 0, NULL }
480};
481
482static struct cfi_fixup fixup_table[] = {
483
484
485
486
487
488 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
489 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
490 { 0, 0, NULL }
491};
492
493
494static void cfi_fixup_major_minor(struct cfi_private *cfi,
495 struct cfi_pri_amdstd *extp)
496{
497 if (cfi->mfr == CFI_MFR_SAMSUNG) {
498 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
499 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
500
501
502
503
504
505 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
506 " Extended Query version to 1.%c\n",
507 extp->MinorVersion);
508 extp->MajorVersion = '1';
509 }
510 }
511
512
513
514
515 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
516 extp->MajorVersion = '1';
517 extp->MinorVersion = '0';
518 }
519}
520
521static int is_m29ew(struct cfi_private *cfi)
522{
523 if (cfi->mfr == CFI_MFR_INTEL &&
524 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
525 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
526 return 1;
527 return 0;
528}
529
530
531
532
533
534
535
536
537
538
539
540static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
541 unsigned long adr)
542{
543 struct cfi_private *cfi = map->fldrv_priv;
544
545 if (is_m29ew(cfi))
546 map_write(map, CMD(0xF0), adr);
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
574{
575
576
577
578
579 if (is_m29ew(cfi))
580 cfi_udelay(500);
581}
582
583struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
584{
585 struct cfi_private *cfi = map->fldrv_priv;
586 struct device_node __maybe_unused *np = map->device_node;
587 struct mtd_info *mtd;
588 int i;
589
590 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
591 if (!mtd)
592 return NULL;
593 mtd->priv = map;
594 mtd->type = MTD_NORFLASH;
595
596
597 mtd->_erase = cfi_amdstd_erase_varsize;
598 mtd->_write = cfi_amdstd_write_words;
599 mtd->_read = cfi_amdstd_read;
600 mtd->_sync = cfi_amdstd_sync;
601 mtd->_suspend = cfi_amdstd_suspend;
602 mtd->_resume = cfi_amdstd_resume;
603 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
604 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
605 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
606 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
607 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
608 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
609 mtd->flags = MTD_CAP_NORFLASH;
610 mtd->name = map->name;
611 mtd->writesize = 1;
612 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
613
614 pr_debug("MTD %s(): write buffer size %d\n", __func__,
615 mtd->writebufsize);
616
617 mtd->_panic_write = cfi_amdstd_panic_write;
618 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
619
620 if (cfi->cfi_mode==CFI_MODE_CFI){
621 unsigned char bootloc;
622 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
623 struct cfi_pri_amdstd *extp;
624
625 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
626 if (extp) {
627
628
629
630
631 cfi_fixup_major_minor(cfi, extp);
632
633
634
635
636
637
638
639
640 if (extp->MajorVersion != '1' ||
641 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
642 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
643 "version %c.%c (%#02x/%#02x).\n",
644 extp->MajorVersion, extp->MinorVersion,
645 extp->MajorVersion, extp->MinorVersion);
646 kfree(extp);
647 kfree(mtd);
648 return NULL;
649 }
650
651 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
652 extp->MajorVersion, extp->MinorVersion);
653
654
655 cfi->cmdset_priv = extp;
656
657
658 cfi_fixup(mtd, cfi_fixup_table);
659
660#ifdef DEBUG_CFI_FEATURES
661
662 cfi_tell_features(extp);
663#endif
664
665#ifdef CONFIG_OF
666 if (np && of_property_read_bool(
667 np, "use-advanced-sector-protection")
668 && extp->BlkProtUnprot == 8) {
669 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
670 mtd->_lock = cfi_ppb_lock;
671 mtd->_unlock = cfi_ppb_unlock;
672 mtd->_is_locked = cfi_ppb_is_locked;
673 }
674#endif
675
676 bootloc = extp->TopBottom;
677 if ((bootloc < 2) || (bootloc > 5)) {
678 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
679 "bank location (%d). Assuming bottom.\n",
680 map->name, bootloc);
681 bootloc = 2;
682 }
683
684 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
685 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
686
687 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
688 int j = (cfi->cfiq->NumEraseRegions-1)-i;
689
690 swap(cfi->cfiq->EraseRegionInfo[i],
691 cfi->cfiq->EraseRegionInfo[j]);
692 }
693 }
694
695 cfi->addr_unlock1 = 0x555;
696 cfi->addr_unlock2 = 0x2aa;
697 }
698 cfi_fixup(mtd, cfi_nopri_fixup_table);
699
700 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
701 kfree(mtd);
702 return NULL;
703 }
704
705 }
706 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
707
708 cfi_fixup(mtd, jedec_fixup_table);
709 }
710
711 cfi_fixup(mtd, fixup_table);
712
713 for (i=0; i< cfi->numchips; i++) {
714 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
715 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
716 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
717
718
719
720
721
722
723 if (cfi->cfiq->BufWriteTimeoutTyp &&
724 cfi->cfiq->BufWriteTimeoutMax)
725 cfi->chips[i].buffer_write_time_max =
726 1 << (cfi->cfiq->BufWriteTimeoutTyp +
727 cfi->cfiq->BufWriteTimeoutMax);
728 else
729 cfi->chips[i].buffer_write_time_max = 0;
730
731 cfi->chips[i].buffer_write_time_max =
732 max(cfi->chips[i].buffer_write_time_max, 2000);
733
734 cfi->chips[i].ref_point_counter = 0;
735 init_waitqueue_head(&(cfi->chips[i].wq));
736 }
737
738 map->fldrv = &cfi_amdstd_chipdrv;
739
740 return cfi_amdstd_setup(mtd);
741}
742struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
743struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
744EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
745EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
746EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
747
748static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
749{
750 struct map_info *map = mtd->priv;
751 struct cfi_private *cfi = map->fldrv_priv;
752 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
753 unsigned long offset = 0;
754 int i,j;
755
756 printk(KERN_NOTICE "number of %s chips: %d\n",
757 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
758
759 mtd->size = devsize * cfi->numchips;
760
761 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
762 mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
763 sizeof(struct mtd_erase_region_info),
764 GFP_KERNEL);
765 if (!mtd->eraseregions)
766 goto setup_err;
767
768 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
769 unsigned long ernum, ersize;
770 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
771 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
772
773 if (mtd->erasesize < ersize) {
774 mtd->erasesize = ersize;
775 }
776 for (j=0; j<cfi->numchips; j++) {
777 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
778 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
779 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
780 }
781 offset += (ersize * ernum);
782 }
783 if (offset != devsize) {
784
785 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
786 goto setup_err;
787 }
788
789 __module_get(THIS_MODULE);
790 register_reboot_notifier(&mtd->reboot_notifier);
791 return mtd;
792
793 setup_err:
794 kfree(mtd->eraseregions);
795 kfree(mtd);
796 kfree(cfi->cmdset_priv);
797 return NULL;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
812 unsigned long addr)
813{
814 struct cfi_private *cfi = map->fldrv_priv;
815 map_word d, t;
816
817 if (cfi_use_status_reg(cfi)) {
818 map_word ready = CMD(CFI_SR_DRB);
819
820
821
822
823 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
824 cfi->device_type, NULL);
825 d = map_read(map, addr);
826
827 return map_word_andequal(map, d, ready, ready);
828 }
829
830 d = map_read(map, addr);
831 t = map_read(map, addr);
832
833 return map_word_equal(map, d, t);
834}
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851static int __xipram chip_good(struct map_info *map, struct flchip *chip,
852 unsigned long addr, map_word expected)
853{
854 struct cfi_private *cfi = map->fldrv_priv;
855 map_word oldd, curd;
856
857 if (cfi_use_status_reg(cfi)) {
858 map_word ready = CMD(CFI_SR_DRB);
859
860
861
862
863
864 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
865 cfi->device_type, NULL);
866 curd = map_read(map, addr);
867
868 return map_word_andequal(map, curd, ready, ready);
869 }
870
871 oldd = map_read(map, addr);
872 curd = map_read(map, addr);
873
874 return map_word_equal(map, oldd, curd) &&
875 map_word_equal(map, curd, expected);
876}
877
878static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
879{
880 DECLARE_WAITQUEUE(wait, current);
881 struct cfi_private *cfi = map->fldrv_priv;
882 unsigned long timeo;
883 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
884
885 resettime:
886 timeo = jiffies + HZ;
887 retry:
888 switch (chip->state) {
889
890 case FL_STATUS:
891 for (;;) {
892 if (chip_ready(map, chip, adr))
893 break;
894
895 if (time_after(jiffies, timeo)) {
896 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
897 return -EIO;
898 }
899 mutex_unlock(&chip->mutex);
900 cfi_udelay(1);
901 mutex_lock(&chip->mutex);
902
903 goto retry;
904 }
905
906 case FL_READY:
907 case FL_CFI_QUERY:
908 case FL_JEDEC_QUERY:
909 return 0;
910
911 case FL_ERASING:
912 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
913 !(mode == FL_READY || mode == FL_POINT ||
914 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
915 goto sleep;
916
917
918 if ((adr & chip->in_progress_block_mask) ==
919 chip->in_progress_block_addr)
920 goto sleep;
921
922
923
924
925 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
926 chip->oldstate = FL_ERASING;
927 chip->state = FL_ERASE_SUSPENDING;
928 chip->erase_suspended = 1;
929 for (;;) {
930 if (chip_ready(map, chip, adr))
931 break;
932
933 if (time_after(jiffies, timeo)) {
934
935
936
937
938
939 put_chip(map, chip, adr);
940 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
941 return -EIO;
942 }
943
944 mutex_unlock(&chip->mutex);
945 cfi_udelay(1);
946 mutex_lock(&chip->mutex);
947
948
949 }
950 chip->state = FL_READY;
951 return 0;
952
953 case FL_XIP_WHILE_ERASING:
954 if (mode != FL_READY && mode != FL_POINT &&
955 (!cfip || !(cfip->EraseSuspend&2)))
956 goto sleep;
957 chip->oldstate = chip->state;
958 chip->state = FL_READY;
959 return 0;
960
961 case FL_SHUTDOWN:
962
963 return -EIO;
964
965 case FL_POINT:
966
967 if (mode == FL_READY && chip->oldstate == FL_READY)
968 return 0;
969
970
971 default:
972 sleep:
973 set_current_state(TASK_UNINTERRUPTIBLE);
974 add_wait_queue(&chip->wq, &wait);
975 mutex_unlock(&chip->mutex);
976 schedule();
977 remove_wait_queue(&chip->wq, &wait);
978 mutex_lock(&chip->mutex);
979 goto resettime;
980 }
981}
982
983
984static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
985{
986 struct cfi_private *cfi = map->fldrv_priv;
987
988 switch(chip->oldstate) {
989 case FL_ERASING:
990 cfi_fixup_m29ew_erase_suspend(map,
991 chip->in_progress_block_addr);
992 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
993 cfi_fixup_m29ew_delay_after_resume(cfi);
994 chip->oldstate = FL_READY;
995 chip->state = FL_ERASING;
996 break;
997
998 case FL_XIP_WHILE_ERASING:
999 chip->state = chip->oldstate;
1000 chip->oldstate = FL_READY;
1001 break;
1002
1003 case FL_READY:
1004 case FL_STATUS:
1005 break;
1006 default:
1007 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1008 }
1009 wake_up(&chip->wq);
1010}
1011
1012#ifdef CONFIG_MTD_XIP
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static void xip_disable(struct map_info *map, struct flchip *chip,
1026 unsigned long adr)
1027{
1028
1029 (void) map_read(map, adr);
1030 local_irq_disable();
1031}
1032
1033static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1034 unsigned long adr)
1035{
1036 struct cfi_private *cfi = map->fldrv_priv;
1037
1038 if (chip->state != FL_POINT && chip->state != FL_READY) {
1039 map_write(map, CMD(0xf0), adr);
1040 chip->state = FL_READY;
1041 }
1042 (void) map_read(map, adr);
1043 xip_iprefetch();
1044 local_irq_enable();
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1061 unsigned long adr, int usec)
1062{
1063 struct cfi_private *cfi = map->fldrv_priv;
1064 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1065 map_word status, OK = CMD(0x80);
1066 unsigned long suspended, start = xip_currtime();
1067 flstate_t oldstate;
1068
1069 do {
1070 cpu_relax();
1071 if (xip_irqpending() && extp &&
1072 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1073 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 map_write(map, CMD(0xb0), adr);
1085 usec -= xip_elapsed_since(start);
1086 suspended = xip_currtime();
1087 do {
1088 if (xip_elapsed_since(suspended) > 100000) {
1089
1090
1091
1092
1093
1094
1095 return;
1096 }
1097 status = map_read(map, adr);
1098 } while (!map_word_andequal(map, status, OK, OK));
1099
1100
1101 oldstate = chip->state;
1102 if (!map_word_bitsset(map, status, CMD(0x40)))
1103 break;
1104 chip->state = FL_XIP_WHILE_ERASING;
1105 chip->erase_suspended = 1;
1106 map_write(map, CMD(0xf0), adr);
1107 (void) map_read(map, adr);
1108 xip_iprefetch();
1109 local_irq_enable();
1110 mutex_unlock(&chip->mutex);
1111 xip_iprefetch();
1112 cond_resched();
1113
1114
1115
1116
1117
1118
1119
1120 mutex_lock(&chip->mutex);
1121 while (chip->state != FL_XIP_WHILE_ERASING) {
1122 DECLARE_WAITQUEUE(wait, current);
1123 set_current_state(TASK_UNINTERRUPTIBLE);
1124 add_wait_queue(&chip->wq, &wait);
1125 mutex_unlock(&chip->mutex);
1126 schedule();
1127 remove_wait_queue(&chip->wq, &wait);
1128 mutex_lock(&chip->mutex);
1129 }
1130
1131 local_irq_disable();
1132
1133
1134 cfi_fixup_m29ew_erase_suspend(map, adr);
1135
1136 map_write(map, cfi->sector_erase_cmd, adr);
1137 chip->state = oldstate;
1138 start = xip_currtime();
1139 } else if (usec >= 1000000/HZ) {
1140
1141
1142
1143
1144
1145 xip_cpu_idle();
1146 }
1147 status = map_read(map, adr);
1148 } while (!map_word_andequal(map, status, OK, OK)
1149 && xip_elapsed_since(start) < usec);
1150}
1151
1152#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1153
1154
1155
1156
1157
1158
1159
1160
1161#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1162 INVALIDATE_CACHED_RANGE(map, from, size)
1163
1164#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1165 UDELAY(map, chip, adr, usec)
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184#else
1185
1186#define xip_disable(map, chip, adr)
1187#define xip_enable(map, chip, adr)
1188#define XIP_INVAL_CACHED_RANGE(x...)
1189
1190#define UDELAY(map, chip, adr, usec) \
1191do { \
1192 mutex_unlock(&chip->mutex); \
1193 cfi_udelay(usec); \
1194 mutex_lock(&chip->mutex); \
1195} while (0)
1196
1197#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1198do { \
1199 mutex_unlock(&chip->mutex); \
1200 INVALIDATE_CACHED_RANGE(map, adr, len); \
1201 cfi_udelay(usec); \
1202 mutex_lock(&chip->mutex); \
1203} while (0)
1204
1205#endif
1206
1207static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1208{
1209 unsigned long cmd_addr;
1210 struct cfi_private *cfi = map->fldrv_priv;
1211 int ret;
1212
1213 adr += chip->start;
1214
1215
1216 cmd_addr = adr & ~(map_bankwidth(map)-1);
1217
1218 mutex_lock(&chip->mutex);
1219 ret = get_chip(map, chip, cmd_addr, FL_READY);
1220 if (ret) {
1221 mutex_unlock(&chip->mutex);
1222 return ret;
1223 }
1224
1225 if (chip->state != FL_POINT && chip->state != FL_READY) {
1226 map_write(map, CMD(0xf0), cmd_addr);
1227 chip->state = FL_READY;
1228 }
1229
1230 map_copy_from(map, buf, adr, len);
1231
1232 put_chip(map, chip, cmd_addr);
1233
1234 mutex_unlock(&chip->mutex);
1235 return 0;
1236}
1237
1238
1239static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1240{
1241 struct map_info *map = mtd->priv;
1242 struct cfi_private *cfi = map->fldrv_priv;
1243 unsigned long ofs;
1244 int chipnum;
1245 int ret = 0;
1246
1247
1248 chipnum = (from >> cfi->chipshift);
1249 ofs = from - (chipnum << cfi->chipshift);
1250
1251 while (len) {
1252 unsigned long thislen;
1253
1254 if (chipnum >= cfi->numchips)
1255 break;
1256
1257 if ((len + ofs -1) >> cfi->chipshift)
1258 thislen = (1<<cfi->chipshift) - ofs;
1259 else
1260 thislen = len;
1261
1262 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1263 if (ret)
1264 break;
1265
1266 *retlen += thislen;
1267 len -= thislen;
1268 buf += thislen;
1269
1270 ofs = 0;
1271 chipnum++;
1272 }
1273 return ret;
1274}
1275
1276typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1277 loff_t adr, size_t len, u_char *buf, size_t grouplen);
1278
1279static inline void otp_enter(struct map_info *map, struct flchip *chip,
1280 loff_t adr, size_t len)
1281{
1282 struct cfi_private *cfi = map->fldrv_priv;
1283
1284 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1285 cfi->device_type, NULL);
1286 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1287 cfi->device_type, NULL);
1288 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1289 cfi->device_type, NULL);
1290
1291 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1292}
1293
1294static inline void otp_exit(struct map_info *map, struct flchip *chip,
1295 loff_t adr, size_t len)
1296{
1297 struct cfi_private *cfi = map->fldrv_priv;
1298
1299 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1300 cfi->device_type, NULL);
1301 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1302 cfi->device_type, NULL);
1303 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1304 cfi->device_type, NULL);
1305 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1306 cfi->device_type, NULL);
1307
1308 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1309}
1310
1311static inline int do_read_secsi_onechip(struct map_info *map,
1312 struct flchip *chip, loff_t adr,
1313 size_t len, u_char *buf,
1314 size_t grouplen)
1315{
1316 DECLARE_WAITQUEUE(wait, current);
1317
1318 retry:
1319 mutex_lock(&chip->mutex);
1320
1321 if (chip->state != FL_READY){
1322 set_current_state(TASK_UNINTERRUPTIBLE);
1323 add_wait_queue(&chip->wq, &wait);
1324
1325 mutex_unlock(&chip->mutex);
1326
1327 schedule();
1328 remove_wait_queue(&chip->wq, &wait);
1329
1330 goto retry;
1331 }
1332
1333 adr += chip->start;
1334
1335 chip->state = FL_READY;
1336
1337 otp_enter(map, chip, adr, len);
1338 map_copy_from(map, buf, adr, len);
1339 otp_exit(map, chip, adr, len);
1340
1341 wake_up(&chip->wq);
1342 mutex_unlock(&chip->mutex);
1343
1344 return 0;
1345}
1346
1347static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1348{
1349 struct map_info *map = mtd->priv;
1350 struct cfi_private *cfi = map->fldrv_priv;
1351 unsigned long ofs;
1352 int chipnum;
1353 int ret = 0;
1354
1355
1356
1357 chipnum=from>>3;
1358 ofs=from & 7;
1359
1360 while (len) {
1361 unsigned long thislen;
1362
1363 if (chipnum >= cfi->numchips)
1364 break;
1365
1366 if ((len + ofs -1) >> 3)
1367 thislen = (1<<3) - ofs;
1368 else
1369 thislen = len;
1370
1371 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1372 thislen, buf, 0);
1373 if (ret)
1374 break;
1375
1376 *retlen += thislen;
1377 len -= thislen;
1378 buf += thislen;
1379
1380 ofs = 0;
1381 chipnum++;
1382 }
1383 return ret;
1384}
1385
1386static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1387 unsigned long adr, map_word datum,
1388 int mode);
1389
1390static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1391 size_t len, u_char *buf, size_t grouplen)
1392{
1393 int ret;
1394 while (len) {
1395 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1396 int gap = adr - bus_ofs;
1397 int n = min_t(int, len, map_bankwidth(map) - gap);
1398 map_word datum = map_word_ff(map);
1399
1400 if (n != map_bankwidth(map)) {
1401
1402 otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1403 datum = map_read(map, bus_ofs);
1404 otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1405 }
1406
1407 datum = map_word_load_partial(map, datum, buf, gap, n);
1408 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1409 if (ret)
1410 return ret;
1411
1412 adr += n;
1413 buf += n;
1414 len -= n;
1415 }
1416
1417 return 0;
1418}
1419
1420static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1421 size_t len, u_char *buf, size_t grouplen)
1422{
1423 struct cfi_private *cfi = map->fldrv_priv;
1424 uint8_t lockreg;
1425 unsigned long timeo;
1426 int ret;
1427
1428
1429 if ((adr != 0) || (len != grouplen))
1430 return -EINVAL;
1431
1432 mutex_lock(&chip->mutex);
1433 ret = get_chip(map, chip, chip->start, FL_LOCKING);
1434 if (ret) {
1435 mutex_unlock(&chip->mutex);
1436 return ret;
1437 }
1438 chip->state = FL_LOCKING;
1439
1440
1441 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1442 cfi->device_type, NULL);
1443 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1444 cfi->device_type, NULL);
1445 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1446 cfi->device_type, NULL);
1447
1448
1449 lockreg = cfi_read_query(map, 0);
1450
1451
1452 lockreg &= ~0x01;
1453
1454
1455
1456 map_write(map, CMD(0xA0), chip->start);
1457 map_write(map, CMD(lockreg), chip->start);
1458
1459
1460 timeo = jiffies + msecs_to_jiffies(2);
1461 for (;;) {
1462 if (chip_ready(map, chip, adr))
1463 break;
1464
1465 if (time_after(jiffies, timeo)) {
1466 pr_err("Waiting for chip to be ready timed out.\n");
1467 ret = -EIO;
1468 break;
1469 }
1470 UDELAY(map, chip, 0, 1);
1471 }
1472
1473
1474 map_write(map, CMD(0x90), chip->start);
1475 map_write(map, CMD(0x00), chip->start);
1476
1477 chip->state = FL_READY;
1478 put_chip(map, chip, chip->start);
1479 mutex_unlock(&chip->mutex);
1480
1481 return ret;
1482}
1483
1484static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1485 size_t *retlen, u_char *buf,
1486 otp_op_t action, int user_regs)
1487{
1488 struct map_info *map = mtd->priv;
1489 struct cfi_private *cfi = map->fldrv_priv;
1490 int ofs_factor = cfi->interleave * cfi->device_type;
1491 unsigned long base;
1492 int chipnum;
1493 struct flchip *chip;
1494 uint8_t otp, lockreg;
1495 int ret;
1496
1497 size_t user_size, factory_size, otpsize;
1498 loff_t user_offset, factory_offset, otpoffset;
1499 int user_locked = 0, otplocked;
1500
1501 *retlen = 0;
1502
1503 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1504 chip = &cfi->chips[chipnum];
1505 factory_size = 0;
1506 user_size = 0;
1507
1508
1509 if (is_m29ew(cfi)) {
1510 base = chip->start;
1511
1512
1513
1514 mutex_lock(&chip->mutex);
1515 ret = get_chip(map, chip, base, FL_CFI_QUERY);
1516 if (ret) {
1517 mutex_unlock(&chip->mutex);
1518 return ret;
1519 }
1520 cfi_qry_mode_on(base, map, cfi);
1521 otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1522 cfi_qry_mode_off(base, map, cfi);
1523 put_chip(map, chip, base);
1524 mutex_unlock(&chip->mutex);
1525
1526 if (otp & 0x80) {
1527
1528 factory_offset = 0;
1529 factory_size = 0x100;
1530 } else {
1531
1532 user_offset = 0;
1533 user_size = 0x100;
1534
1535 mutex_lock(&chip->mutex);
1536 ret = get_chip(map, chip, base, FL_LOCKING);
1537 if (ret) {
1538 mutex_unlock(&chip->mutex);
1539 return ret;
1540 }
1541
1542
1543 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1544 chip->start, map, cfi,
1545 cfi->device_type, NULL);
1546 cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1547 chip->start, map, cfi,
1548 cfi->device_type, NULL);
1549 cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1550 chip->start, map, cfi,
1551 cfi->device_type, NULL);
1552
1553 lockreg = cfi_read_query(map, 0);
1554
1555 map_write(map, CMD(0x90), chip->start);
1556 map_write(map, CMD(0x00), chip->start);
1557 put_chip(map, chip, chip->start);
1558 mutex_unlock(&chip->mutex);
1559
1560 user_locked = ((lockreg & 0x01) == 0x00);
1561 }
1562 }
1563
1564 otpsize = user_regs ? user_size : factory_size;
1565 if (!otpsize)
1566 continue;
1567 otpoffset = user_regs ? user_offset : factory_offset;
1568 otplocked = user_regs ? user_locked : 1;
1569
1570 if (!action) {
1571
1572 struct otp_info *otpinfo;
1573 len -= sizeof(*otpinfo);
1574 if (len <= 0)
1575 return -ENOSPC;
1576 otpinfo = (struct otp_info *)buf;
1577 otpinfo->start = from;
1578 otpinfo->length = otpsize;
1579 otpinfo->locked = otplocked;
1580 buf += sizeof(*otpinfo);
1581 *retlen += sizeof(*otpinfo);
1582 from += otpsize;
1583 } else if ((from < otpsize) && (len > 0)) {
1584 size_t size;
1585 size = (len < otpsize - from) ? len : otpsize - from;
1586 ret = action(map, chip, otpoffset + from, size, buf,
1587 otpsize);
1588 if (ret < 0)
1589 return ret;
1590
1591 buf += size;
1592 len -= size;
1593 *retlen += size;
1594 from = 0;
1595 } else {
1596 from -= otpsize;
1597 }
1598 }
1599 return 0;
1600}
1601
1602static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1603 size_t *retlen, struct otp_info *buf)
1604{
1605 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1606 NULL, 0);
1607}
1608
1609static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1610 size_t *retlen, struct otp_info *buf)
1611{
1612 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1613 NULL, 1);
1614}
1615
1616static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1617 size_t len, size_t *retlen,
1618 u_char *buf)
1619{
1620 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1621 buf, do_read_secsi_onechip, 0);
1622}
1623
1624static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1625 size_t len, size_t *retlen,
1626 u_char *buf)
1627{
1628 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1629 buf, do_read_secsi_onechip, 1);
1630}
1631
1632static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1633 size_t len, size_t *retlen,
1634 u_char *buf)
1635{
1636 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1637 do_otp_write, 1);
1638}
1639
1640static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1641 size_t len)
1642{
1643 size_t retlen;
1644 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1645 do_otp_lock, 1);
1646}
1647
1648static int __xipram do_write_oneword_once(struct map_info *map,
1649 struct flchip *chip,
1650 unsigned long adr, map_word datum,
1651 int mode, struct cfi_private *cfi)
1652{
1653 unsigned long timeo = jiffies + HZ;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 unsigned long uWriteTimeout = (HZ / 1000) + 1;
1664 int ret = 0;
1665
1666 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1667 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1668 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1669 map_write(map, datum, adr);
1670 chip->state = mode;
1671
1672 INVALIDATE_CACHE_UDELAY(map, chip,
1673 adr, map_bankwidth(map),
1674 chip->word_write_time);
1675
1676
1677 timeo = jiffies + uWriteTimeout;
1678 for (;;) {
1679 if (chip->state != mode) {
1680
1681 DECLARE_WAITQUEUE(wait, current);
1682
1683 set_current_state(TASK_UNINTERRUPTIBLE);
1684 add_wait_queue(&chip->wq, &wait);
1685 mutex_unlock(&chip->mutex);
1686 schedule();
1687 remove_wait_queue(&chip->wq, &wait);
1688 timeo = jiffies + (HZ / 2);
1689 mutex_lock(&chip->mutex);
1690 continue;
1691 }
1692
1693
1694
1695
1696
1697 if (time_after(jiffies, timeo) &&
1698 !chip_good(map, chip, adr, datum)) {
1699 xip_enable(map, chip, adr);
1700 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1701 xip_disable(map, chip, adr);
1702 ret = -EIO;
1703 break;
1704 }
1705
1706 if (chip_good(map, chip, adr, datum)) {
1707 if (cfi_check_err_status(map, chip, adr))
1708 ret = -EIO;
1709 break;
1710 }
1711
1712
1713 UDELAY(map, chip, adr, 1);
1714 }
1715
1716 return ret;
1717}
1718
1719static int __xipram do_write_oneword_start(struct map_info *map,
1720 struct flchip *chip,
1721 unsigned long adr, int mode)
1722{
1723 int ret;
1724
1725 mutex_lock(&chip->mutex);
1726
1727 ret = get_chip(map, chip, adr, mode);
1728 if (ret) {
1729 mutex_unlock(&chip->mutex);
1730 return ret;
1731 }
1732
1733 if (mode == FL_OTP_WRITE)
1734 otp_enter(map, chip, adr, map_bankwidth(map));
1735
1736 return ret;
1737}
1738
1739static void __xipram do_write_oneword_done(struct map_info *map,
1740 struct flchip *chip,
1741 unsigned long adr, int mode)
1742{
1743 if (mode == FL_OTP_WRITE)
1744 otp_exit(map, chip, adr, map_bankwidth(map));
1745
1746 chip->state = FL_READY;
1747 DISABLE_VPP(map);
1748 put_chip(map, chip, adr);
1749
1750 mutex_unlock(&chip->mutex);
1751}
1752
1753static int __xipram do_write_oneword_retry(struct map_info *map,
1754 struct flchip *chip,
1755 unsigned long adr, map_word datum,
1756 int mode)
1757{
1758 struct cfi_private *cfi = map->fldrv_priv;
1759 int ret = 0;
1760 map_word oldd;
1761 int retry_cnt = 0;
1762
1763
1764
1765
1766
1767
1768
1769 oldd = map_read(map, adr);
1770 if (map_word_equal(map, oldd, datum)) {
1771 pr_debug("MTD %s(): NOP\n", __func__);
1772 return ret;
1773 }
1774
1775 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1776 ENABLE_VPP(map);
1777 xip_disable(map, chip, adr);
1778
1779 retry:
1780 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1781 if (ret) {
1782
1783 map_write(map, CMD(0xF0), chip->start);
1784
1785
1786 if (++retry_cnt <= MAX_RETRIES) {
1787 ret = 0;
1788 goto retry;
1789 }
1790 }
1791 xip_enable(map, chip, adr);
1792
1793 return ret;
1794}
1795
1796static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1797 unsigned long adr, map_word datum,
1798 int mode)
1799{
1800 int ret;
1801
1802 adr += chip->start;
1803
1804 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1805 datum.x[0]);
1806
1807 ret = do_write_oneword_start(map, chip, adr, mode);
1808 if (ret)
1809 return ret;
1810
1811 ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1812
1813 do_write_oneword_done(map, chip, adr, mode);
1814
1815 return ret;
1816}
1817
1818
1819static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1820 size_t *retlen, const u_char *buf)
1821{
1822 struct map_info *map = mtd->priv;
1823 struct cfi_private *cfi = map->fldrv_priv;
1824 int ret;
1825 int chipnum;
1826 unsigned long ofs, chipstart;
1827 DECLARE_WAITQUEUE(wait, current);
1828
1829 chipnum = to >> cfi->chipshift;
1830 ofs = to - (chipnum << cfi->chipshift);
1831 chipstart = cfi->chips[chipnum].start;
1832
1833
1834 if (ofs & (map_bankwidth(map)-1)) {
1835 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1836 int i = ofs - bus_ofs;
1837 int n = 0;
1838 map_word tmp_buf;
1839
1840 retry:
1841 mutex_lock(&cfi->chips[chipnum].mutex);
1842
1843 if (cfi->chips[chipnum].state != FL_READY) {
1844 set_current_state(TASK_UNINTERRUPTIBLE);
1845 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1846
1847 mutex_unlock(&cfi->chips[chipnum].mutex);
1848
1849 schedule();
1850 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1851 goto retry;
1852 }
1853
1854
1855 tmp_buf = map_read(map, bus_ofs+chipstart);
1856
1857 mutex_unlock(&cfi->chips[chipnum].mutex);
1858
1859
1860 n = min_t(int, len, map_bankwidth(map)-i);
1861
1862 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1863
1864 ret = do_write_oneword(map, &cfi->chips[chipnum],
1865 bus_ofs, tmp_buf, FL_WRITING);
1866 if (ret)
1867 return ret;
1868
1869 ofs += n;
1870 buf += n;
1871 (*retlen) += n;
1872 len -= n;
1873
1874 if (ofs >> cfi->chipshift) {
1875 chipnum ++;
1876 ofs = 0;
1877 if (chipnum == cfi->numchips)
1878 return 0;
1879 }
1880 }
1881
1882
1883 while(len >= map_bankwidth(map)) {
1884 map_word datum;
1885
1886 datum = map_word_load(map, buf);
1887
1888 ret = do_write_oneword(map, &cfi->chips[chipnum],
1889 ofs, datum, FL_WRITING);
1890 if (ret)
1891 return ret;
1892
1893 ofs += map_bankwidth(map);
1894 buf += map_bankwidth(map);
1895 (*retlen) += map_bankwidth(map);
1896 len -= map_bankwidth(map);
1897
1898 if (ofs >> cfi->chipshift) {
1899 chipnum ++;
1900 ofs = 0;
1901 if (chipnum == cfi->numchips)
1902 return 0;
1903 chipstart = cfi->chips[chipnum].start;
1904 }
1905 }
1906
1907
1908 if (len & (map_bankwidth(map)-1)) {
1909 map_word tmp_buf;
1910
1911 retry1:
1912 mutex_lock(&cfi->chips[chipnum].mutex);
1913
1914 if (cfi->chips[chipnum].state != FL_READY) {
1915 set_current_state(TASK_UNINTERRUPTIBLE);
1916 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1917
1918 mutex_unlock(&cfi->chips[chipnum].mutex);
1919
1920 schedule();
1921 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1922 goto retry1;
1923 }
1924
1925 tmp_buf = map_read(map, ofs + chipstart);
1926
1927 mutex_unlock(&cfi->chips[chipnum].mutex);
1928
1929 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1930
1931 ret = do_write_oneword(map, &cfi->chips[chipnum],
1932 ofs, tmp_buf, FL_WRITING);
1933 if (ret)
1934 return ret;
1935
1936 (*retlen) += len;
1937 }
1938
1939 return 0;
1940}
1941
1942#if !FORCE_WORD_WRITE
1943static int __xipram do_write_buffer_wait(struct map_info *map,
1944 struct flchip *chip, unsigned long adr,
1945 map_word datum)
1946{
1947 unsigned long timeo;
1948 unsigned long u_write_timeout;
1949 int ret = 0;
1950
1951
1952
1953
1954
1955 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1956 timeo = jiffies + u_write_timeout;
1957
1958 for (;;) {
1959 if (chip->state != FL_WRITING) {
1960
1961 DECLARE_WAITQUEUE(wait, current);
1962
1963 set_current_state(TASK_UNINTERRUPTIBLE);
1964 add_wait_queue(&chip->wq, &wait);
1965 mutex_unlock(&chip->mutex);
1966 schedule();
1967 remove_wait_queue(&chip->wq, &wait);
1968 timeo = jiffies + (HZ / 2);
1969 mutex_lock(&chip->mutex);
1970 continue;
1971 }
1972
1973
1974
1975
1976
1977 if (time_after(jiffies, timeo) &&
1978 !chip_good(map, chip, adr, datum)) {
1979 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1980 __func__, adr);
1981 ret = -EIO;
1982 break;
1983 }
1984
1985 if (chip_good(map, chip, adr, datum)) {
1986 if (cfi_check_err_status(map, chip, adr))
1987 ret = -EIO;
1988 break;
1989 }
1990
1991
1992 UDELAY(map, chip, adr, 1);
1993 }
1994
1995 return ret;
1996}
1997
1998static void __xipram do_write_buffer_reset(struct map_info *map,
1999 struct flchip *chip,
2000 struct cfi_private *cfi)
2001{
2002
2003
2004
2005
2006
2007
2008
2009
2010 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2011 cfi->device_type, NULL);
2012 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2013 cfi->device_type, NULL);
2014 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2015 cfi->device_type, NULL);
2016
2017
2018}
2019
2020
2021
2022
2023static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2024 unsigned long adr, const u_char *buf,
2025 int len)
2026{
2027 struct cfi_private *cfi = map->fldrv_priv;
2028 int ret;
2029 unsigned long cmd_adr;
2030 int z, words;
2031 map_word datum;
2032
2033 adr += chip->start;
2034 cmd_adr = adr;
2035
2036 mutex_lock(&chip->mutex);
2037 ret = get_chip(map, chip, adr, FL_WRITING);
2038 if (ret) {
2039 mutex_unlock(&chip->mutex);
2040 return ret;
2041 }
2042
2043 datum = map_word_load(map, buf);
2044
2045 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2046 __func__, adr, datum.x[0]);
2047
2048 XIP_INVAL_CACHED_RANGE(map, adr, len);
2049 ENABLE_VPP(map);
2050 xip_disable(map, chip, cmd_adr);
2051
2052 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2053 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2054
2055
2056 map_write(map, CMD(0x25), cmd_adr);
2057
2058 chip->state = FL_WRITING_TO_BUFFER;
2059
2060
2061 words = len / map_bankwidth(map);
2062 map_write(map, CMD(words - 1), cmd_adr);
2063
2064 z = 0;
2065 while(z < words * map_bankwidth(map)) {
2066 datum = map_word_load(map, buf);
2067 map_write(map, datum, adr + z);
2068
2069 z += map_bankwidth(map);
2070 buf += map_bankwidth(map);
2071 }
2072 z -= map_bankwidth(map);
2073
2074 adr += z;
2075
2076
2077 map_write(map, CMD(0x29), cmd_adr);
2078 chip->state = FL_WRITING;
2079
2080 INVALIDATE_CACHE_UDELAY(map, chip,
2081 adr, map_bankwidth(map),
2082 chip->word_write_time);
2083
2084 ret = do_write_buffer_wait(map, chip, adr, datum);
2085 if (ret)
2086 do_write_buffer_reset(map, chip, cfi);
2087
2088 xip_enable(map, chip, adr);
2089
2090 chip->state = FL_READY;
2091 DISABLE_VPP(map);
2092 put_chip(map, chip, adr);
2093 mutex_unlock(&chip->mutex);
2094
2095 return ret;
2096}
2097
2098
2099static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2100 size_t *retlen, const u_char *buf)
2101{
2102 struct map_info *map = mtd->priv;
2103 struct cfi_private *cfi = map->fldrv_priv;
2104 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2105 int ret;
2106 int chipnum;
2107 unsigned long ofs;
2108
2109 chipnum = to >> cfi->chipshift;
2110 ofs = to - (chipnum << cfi->chipshift);
2111
2112
2113 if (ofs & (map_bankwidth(map)-1)) {
2114 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2115 if (local_len > len)
2116 local_len = len;
2117 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2118 local_len, retlen, buf);
2119 if (ret)
2120 return ret;
2121 ofs += local_len;
2122 buf += local_len;
2123 len -= local_len;
2124
2125 if (ofs >> cfi->chipshift) {
2126 chipnum ++;
2127 ofs = 0;
2128 if (chipnum == cfi->numchips)
2129 return 0;
2130 }
2131 }
2132
2133
2134 while (len >= map_bankwidth(map) * 2) {
2135
2136 int size = wbufsize - (ofs & (wbufsize-1));
2137
2138 if (size > len)
2139 size = len;
2140 if (size % map_bankwidth(map))
2141 size -= size % map_bankwidth(map);
2142
2143 ret = do_write_buffer(map, &cfi->chips[chipnum],
2144 ofs, buf, size);
2145 if (ret)
2146 return ret;
2147
2148 ofs += size;
2149 buf += size;
2150 (*retlen) += size;
2151 len -= size;
2152
2153 if (ofs >> cfi->chipshift) {
2154 chipnum ++;
2155 ofs = 0;
2156 if (chipnum == cfi->numchips)
2157 return 0;
2158 }
2159 }
2160
2161 if (len) {
2162 size_t retlen_dregs = 0;
2163
2164 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2165 len, &retlen_dregs, buf);
2166
2167 *retlen += retlen_dregs;
2168 return ret;
2169 }
2170
2171 return 0;
2172}
2173#endif
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2184 unsigned long adr)
2185{
2186 struct cfi_private *cfi = map->fldrv_priv;
2187 int retries = 10;
2188 int i;
2189
2190
2191
2192
2193
2194 if (chip->state == FL_READY && chip_ready(map, chip, adr))
2195 return 0;
2196
2197
2198
2199
2200
2201
2202
2203 while (retries > 0) {
2204 const unsigned long timeo = (HZ / 1000) + 1;
2205
2206
2207 map_write(map, CMD(0xF0), chip->start);
2208
2209
2210 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2211 if (chip_ready(map, chip, adr))
2212 return 0;
2213
2214 udelay(1);
2215 }
2216
2217 retries--;
2218 }
2219
2220
2221 return -EBUSY;
2222}
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2236 unsigned long adr, map_word datum)
2237{
2238 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2239 struct cfi_private *cfi = map->fldrv_priv;
2240 int retry_cnt = 0;
2241 map_word oldd;
2242 int ret;
2243 int i;
2244
2245 adr += chip->start;
2246
2247 ret = cfi_amdstd_panic_wait(map, chip, adr);
2248 if (ret)
2249 return ret;
2250
2251 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2252 __func__, adr, datum.x[0]);
2253
2254
2255
2256
2257
2258
2259
2260 oldd = map_read(map, adr);
2261 if (map_word_equal(map, oldd, datum)) {
2262 pr_debug("MTD %s(): NOP\n", __func__);
2263 goto op_done;
2264 }
2265
2266 ENABLE_VPP(map);
2267
2268retry:
2269 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2270 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2271 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2272 map_write(map, datum, adr);
2273
2274 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2275 if (chip_ready(map, chip, adr))
2276 break;
2277
2278 udelay(1);
2279 }
2280
2281 if (!chip_good(map, chip, adr, datum) ||
2282 cfi_check_err_status(map, chip, adr)) {
2283
2284 map_write(map, CMD(0xF0), chip->start);
2285
2286
2287 if (++retry_cnt <= MAX_RETRIES)
2288 goto retry;
2289
2290 ret = -EIO;
2291 }
2292
2293op_done:
2294 DISABLE_VPP(map);
2295 return ret;
2296}
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2312 size_t *retlen, const u_char *buf)
2313{
2314 struct map_info *map = mtd->priv;
2315 struct cfi_private *cfi = map->fldrv_priv;
2316 unsigned long ofs, chipstart;
2317 int ret;
2318 int chipnum;
2319
2320 chipnum = to >> cfi->chipshift;
2321 ofs = to - (chipnum << cfi->chipshift);
2322 chipstart = cfi->chips[chipnum].start;
2323
2324
2325 if (ofs & (map_bankwidth(map) - 1)) {
2326 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2327 int i = ofs - bus_ofs;
2328 int n = 0;
2329 map_word tmp_buf;
2330
2331 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2332 if (ret)
2333 return ret;
2334
2335
2336 tmp_buf = map_read(map, bus_ofs + chipstart);
2337
2338
2339 n = min_t(int, len, map_bankwidth(map) - i);
2340
2341 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2342
2343 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2344 bus_ofs, tmp_buf);
2345 if (ret)
2346 return ret;
2347
2348 ofs += n;
2349 buf += n;
2350 (*retlen) += n;
2351 len -= n;
2352
2353 if (ofs >> cfi->chipshift) {
2354 chipnum++;
2355 ofs = 0;
2356 if (chipnum == cfi->numchips)
2357 return 0;
2358 }
2359 }
2360
2361
2362 while (len >= map_bankwidth(map)) {
2363 map_word datum;
2364
2365 datum = map_word_load(map, buf);
2366
2367 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2368 ofs, datum);
2369 if (ret)
2370 return ret;
2371
2372 ofs += map_bankwidth(map);
2373 buf += map_bankwidth(map);
2374 (*retlen) += map_bankwidth(map);
2375 len -= map_bankwidth(map);
2376
2377 if (ofs >> cfi->chipshift) {
2378 chipnum++;
2379 ofs = 0;
2380 if (chipnum == cfi->numchips)
2381 return 0;
2382
2383 chipstart = cfi->chips[chipnum].start;
2384 }
2385 }
2386
2387
2388 if (len & (map_bankwidth(map) - 1)) {
2389 map_word tmp_buf;
2390
2391 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2392 if (ret)
2393 return ret;
2394
2395 tmp_buf = map_read(map, ofs + chipstart);
2396
2397 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2398
2399 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2400 ofs, tmp_buf);
2401 if (ret)
2402 return ret;
2403
2404 (*retlen) += len;
2405 }
2406
2407 return 0;
2408}
2409
2410
2411
2412
2413
2414
2415static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2416{
2417 struct cfi_private *cfi = map->fldrv_priv;
2418 unsigned long timeo = jiffies + HZ;
2419 unsigned long int adr;
2420 DECLARE_WAITQUEUE(wait, current);
2421 int ret;
2422 int retry_cnt = 0;
2423
2424 adr = cfi->addr_unlock1;
2425
2426 mutex_lock(&chip->mutex);
2427 ret = get_chip(map, chip, adr, FL_ERASING);
2428 if (ret) {
2429 mutex_unlock(&chip->mutex);
2430 return ret;
2431 }
2432
2433 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2434 __func__, chip->start);
2435
2436 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2437 ENABLE_VPP(map);
2438 xip_disable(map, chip, adr);
2439
2440 retry:
2441 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2442 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2443 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2444 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2445 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2446 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2447
2448 chip->state = FL_ERASING;
2449 chip->erase_suspended = 0;
2450 chip->in_progress_block_addr = adr;
2451 chip->in_progress_block_mask = ~(map->size - 1);
2452
2453 INVALIDATE_CACHE_UDELAY(map, chip,
2454 adr, map->size,
2455 chip->erase_time*500);
2456
2457 timeo = jiffies + (HZ*20);
2458
2459 for (;;) {
2460 if (chip->state != FL_ERASING) {
2461
2462 set_current_state(TASK_UNINTERRUPTIBLE);
2463 add_wait_queue(&chip->wq, &wait);
2464 mutex_unlock(&chip->mutex);
2465 schedule();
2466 remove_wait_queue(&chip->wq, &wait);
2467 mutex_lock(&chip->mutex);
2468 continue;
2469 }
2470 if (chip->erase_suspended) {
2471
2472
2473 timeo = jiffies + (HZ*20);
2474 chip->erase_suspended = 0;
2475 }
2476
2477 if (chip_good(map, chip, adr, map_word_ff(map))) {
2478 if (cfi_check_err_status(map, chip, adr))
2479 ret = -EIO;
2480 break;
2481 }
2482
2483 if (time_after(jiffies, timeo)) {
2484 printk(KERN_WARNING "MTD %s(): software timeout\n",
2485 __func__);
2486 ret = -EIO;
2487 break;
2488 }
2489
2490
2491 UDELAY(map, chip, adr, 1000000/HZ);
2492 }
2493
2494 if (ret) {
2495
2496 map_write(map, CMD(0xF0), chip->start);
2497
2498
2499 if (++retry_cnt <= MAX_RETRIES) {
2500 ret = 0;
2501 goto retry;
2502 }
2503 }
2504
2505 chip->state = FL_READY;
2506 xip_enable(map, chip, adr);
2507 DISABLE_VPP(map);
2508 put_chip(map, chip, adr);
2509 mutex_unlock(&chip->mutex);
2510
2511 return ret;
2512}
2513
2514
2515static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2516{
2517 struct cfi_private *cfi = map->fldrv_priv;
2518 unsigned long timeo = jiffies + HZ;
2519 DECLARE_WAITQUEUE(wait, current);
2520 int ret;
2521 int retry_cnt = 0;
2522
2523 adr += chip->start;
2524
2525 mutex_lock(&chip->mutex);
2526 ret = get_chip(map, chip, adr, FL_ERASING);
2527 if (ret) {
2528 mutex_unlock(&chip->mutex);
2529 return ret;
2530 }
2531
2532 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2533 __func__, adr);
2534
2535 XIP_INVAL_CACHED_RANGE(map, adr, len);
2536 ENABLE_VPP(map);
2537 xip_disable(map, chip, adr);
2538
2539 retry:
2540 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2541 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2542 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2543 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2544 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2545 map_write(map, cfi->sector_erase_cmd, adr);
2546
2547 chip->state = FL_ERASING;
2548 chip->erase_suspended = 0;
2549 chip->in_progress_block_addr = adr;
2550 chip->in_progress_block_mask = ~(len - 1);
2551
2552 INVALIDATE_CACHE_UDELAY(map, chip,
2553 adr, len,
2554 chip->erase_time*500);
2555
2556 timeo = jiffies + (HZ*20);
2557
2558 for (;;) {
2559 if (chip->state != FL_ERASING) {
2560
2561 set_current_state(TASK_UNINTERRUPTIBLE);
2562 add_wait_queue(&chip->wq, &wait);
2563 mutex_unlock(&chip->mutex);
2564 schedule();
2565 remove_wait_queue(&chip->wq, &wait);
2566 mutex_lock(&chip->mutex);
2567 continue;
2568 }
2569 if (chip->erase_suspended) {
2570
2571
2572 timeo = jiffies + (HZ*20);
2573 chip->erase_suspended = 0;
2574 }
2575
2576 if (chip_good(map, chip, adr, map_word_ff(map))) {
2577 if (cfi_check_err_status(map, chip, adr))
2578 ret = -EIO;
2579 break;
2580 }
2581
2582 if (time_after(jiffies, timeo)) {
2583 printk(KERN_WARNING "MTD %s(): software timeout\n",
2584 __func__);
2585 ret = -EIO;
2586 break;
2587 }
2588
2589
2590 UDELAY(map, chip, adr, 1000000/HZ);
2591 }
2592
2593 if (ret) {
2594
2595 map_write(map, CMD(0xF0), chip->start);
2596
2597
2598 if (++retry_cnt <= MAX_RETRIES) {
2599 ret = 0;
2600 goto retry;
2601 }
2602 }
2603
2604 chip->state = FL_READY;
2605 xip_enable(map, chip, adr);
2606 DISABLE_VPP(map);
2607 put_chip(map, chip, adr);
2608 mutex_unlock(&chip->mutex);
2609 return ret;
2610}
2611
2612
2613static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2614{
2615 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2616 instr->len, NULL);
2617}
2618
2619
2620static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2621{
2622 struct map_info *map = mtd->priv;
2623 struct cfi_private *cfi = map->fldrv_priv;
2624
2625 if (instr->addr != 0)
2626 return -EINVAL;
2627
2628 if (instr->len != mtd->size)
2629 return -EINVAL;
2630
2631 return do_erase_chip(map, &cfi->chips[0]);
2632}
2633
2634static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2635 unsigned long adr, int len, void *thunk)
2636{
2637 struct cfi_private *cfi = map->fldrv_priv;
2638 int ret;
2639
2640 mutex_lock(&chip->mutex);
2641 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2642 if (ret)
2643 goto out_unlock;
2644 chip->state = FL_LOCKING;
2645
2646 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2647
2648 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2649 cfi->device_type, NULL);
2650 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2651 cfi->device_type, NULL);
2652 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2653 cfi->device_type, NULL);
2654 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2655 cfi->device_type, NULL);
2656 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2657 cfi->device_type, NULL);
2658 map_write(map, CMD(0x40), chip->start + adr);
2659
2660 chip->state = FL_READY;
2661 put_chip(map, chip, adr + chip->start);
2662 ret = 0;
2663
2664out_unlock:
2665 mutex_unlock(&chip->mutex);
2666 return ret;
2667}
2668
2669static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2670 unsigned long adr, int len, void *thunk)
2671{
2672 struct cfi_private *cfi = map->fldrv_priv;
2673 int ret;
2674
2675 mutex_lock(&chip->mutex);
2676 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2677 if (ret)
2678 goto out_unlock;
2679 chip->state = FL_UNLOCKING;
2680
2681 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2682
2683 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2684 cfi->device_type, NULL);
2685 map_write(map, CMD(0x70), adr);
2686
2687 chip->state = FL_READY;
2688 put_chip(map, chip, adr + chip->start);
2689 ret = 0;
2690
2691out_unlock:
2692 mutex_unlock(&chip->mutex);
2693 return ret;
2694}
2695
2696static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2697{
2698 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2699}
2700
2701static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2702{
2703 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2704}
2705
2706
2707
2708
2709
2710struct ppb_lock {
2711 struct flchip *chip;
2712 unsigned long adr;
2713 int locked;
2714};
2715
2716#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2717#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2718#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2719
2720static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2721 struct flchip *chip,
2722 unsigned long adr, int len, void *thunk)
2723{
2724 struct cfi_private *cfi = map->fldrv_priv;
2725 unsigned long timeo;
2726 int ret;
2727
2728 adr += chip->start;
2729 mutex_lock(&chip->mutex);
2730 ret = get_chip(map, chip, adr, FL_LOCKING);
2731 if (ret) {
2732 mutex_unlock(&chip->mutex);
2733 return ret;
2734 }
2735
2736 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2737
2738 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2739 cfi->device_type, NULL);
2740 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2741 cfi->device_type, NULL);
2742
2743 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2744 cfi->device_type, NULL);
2745
2746 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2747 chip->state = FL_LOCKING;
2748 map_write(map, CMD(0xA0), adr);
2749 map_write(map, CMD(0x00), adr);
2750 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2751
2752
2753
2754
2755 chip->state = FL_UNLOCKING;
2756 map_write(map, CMD(0x80), chip->start);
2757 map_write(map, CMD(0x30), chip->start);
2758 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2759 chip->state = FL_JEDEC_QUERY;
2760
2761 ret = !cfi_read_query(map, adr);
2762 } else
2763 BUG();
2764
2765
2766
2767
2768 timeo = jiffies + msecs_to_jiffies(2000);
2769 for (;;) {
2770 if (chip_ready(map, chip, adr))
2771 break;
2772
2773 if (time_after(jiffies, timeo)) {
2774 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2775 ret = -EIO;
2776 break;
2777 }
2778
2779 UDELAY(map, chip, adr, 1);
2780 }
2781
2782
2783 map_write(map, CMD(0x90), chip->start);
2784 map_write(map, CMD(0x00), chip->start);
2785
2786 chip->state = FL_READY;
2787 put_chip(map, chip, adr);
2788 mutex_unlock(&chip->mutex);
2789
2790 return ret;
2791}
2792
2793static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2794 uint64_t len)
2795{
2796 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2797 DO_XXLOCK_ONEBLOCK_LOCK);
2798}
2799
2800static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2801 uint64_t len)
2802{
2803 struct mtd_erase_region_info *regions = mtd->eraseregions;
2804 struct map_info *map = mtd->priv;
2805 struct cfi_private *cfi = map->fldrv_priv;
2806 struct ppb_lock *sect;
2807 unsigned long adr;
2808 loff_t offset;
2809 uint64_t length;
2810 int chipnum;
2811 int i;
2812 int sectors;
2813 int ret;
2814 int max_sectors;
2815
2816
2817
2818
2819
2820
2821
2822 max_sectors = 0;
2823 for (i = 0; i < mtd->numeraseregions; i++)
2824 max_sectors += regions[i].numblocks;
2825
2826 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2827 if (!sect)
2828 return -ENOMEM;
2829
2830
2831
2832
2833
2834 i = 0;
2835 chipnum = 0;
2836 adr = 0;
2837 sectors = 0;
2838 offset = 0;
2839 length = mtd->size;
2840
2841 while (length) {
2842 int size = regions[i].erasesize;
2843
2844
2845
2846
2847
2848
2849 if ((offset < ofs) || (offset >= (ofs + len))) {
2850 sect[sectors].chip = &cfi->chips[chipnum];
2851 sect[sectors].adr = adr;
2852 sect[sectors].locked = do_ppb_xxlock(
2853 map, &cfi->chips[chipnum], adr, 0,
2854 DO_XXLOCK_ONEBLOCK_GETLOCK);
2855 }
2856
2857 adr += size;
2858 offset += size;
2859 length -= size;
2860
2861 if (offset == regions[i].offset + size * regions[i].numblocks)
2862 i++;
2863
2864 if (adr >> cfi->chipshift) {
2865 if (offset >= (ofs + len))
2866 break;
2867 adr = 0;
2868 chipnum++;
2869
2870 if (chipnum >= cfi->numchips)
2871 break;
2872 }
2873
2874 sectors++;
2875 if (sectors >= max_sectors) {
2876 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2877 max_sectors);
2878 kfree(sect);
2879 return -EINVAL;
2880 }
2881 }
2882
2883
2884 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2885 DO_XXLOCK_ONEBLOCK_UNLOCK);
2886 if (ret) {
2887 kfree(sect);
2888 return ret;
2889 }
2890
2891
2892
2893
2894
2895 for (i = 0; i < sectors; i++) {
2896 if (sect[i].locked)
2897 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2898 DO_XXLOCK_ONEBLOCK_LOCK);
2899 }
2900
2901 kfree(sect);
2902 return ret;
2903}
2904
2905static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2906 uint64_t len)
2907{
2908 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2909 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2910}
2911
2912static void cfi_amdstd_sync (struct mtd_info *mtd)
2913{
2914 struct map_info *map = mtd->priv;
2915 struct cfi_private *cfi = map->fldrv_priv;
2916 int i;
2917 struct flchip *chip;
2918 int ret = 0;
2919 DECLARE_WAITQUEUE(wait, current);
2920
2921 for (i=0; !ret && i<cfi->numchips; i++) {
2922 chip = &cfi->chips[i];
2923
2924 retry:
2925 mutex_lock(&chip->mutex);
2926
2927 switch(chip->state) {
2928 case FL_READY:
2929 case FL_STATUS:
2930 case FL_CFI_QUERY:
2931 case FL_JEDEC_QUERY:
2932 chip->oldstate = chip->state;
2933 chip->state = FL_SYNCING;
2934
2935
2936
2937
2938
2939 case FL_SYNCING:
2940 mutex_unlock(&chip->mutex);
2941 break;
2942
2943 default:
2944
2945 set_current_state(TASK_UNINTERRUPTIBLE);
2946 add_wait_queue(&chip->wq, &wait);
2947
2948 mutex_unlock(&chip->mutex);
2949
2950 schedule();
2951
2952 remove_wait_queue(&chip->wq, &wait);
2953
2954 goto retry;
2955 }
2956 }
2957
2958
2959
2960 for (i--; i >=0; i--) {
2961 chip = &cfi->chips[i];
2962
2963 mutex_lock(&chip->mutex);
2964
2965 if (chip->state == FL_SYNCING) {
2966 chip->state = chip->oldstate;
2967 wake_up(&chip->wq);
2968 }
2969 mutex_unlock(&chip->mutex);
2970 }
2971}
2972
2973
2974static int cfi_amdstd_suspend(struct mtd_info *mtd)
2975{
2976 struct map_info *map = mtd->priv;
2977 struct cfi_private *cfi = map->fldrv_priv;
2978 int i;
2979 struct flchip *chip;
2980 int ret = 0;
2981
2982 for (i=0; !ret && i<cfi->numchips; i++) {
2983 chip = &cfi->chips[i];
2984
2985 mutex_lock(&chip->mutex);
2986
2987 switch(chip->state) {
2988 case FL_READY:
2989 case FL_STATUS:
2990 case FL_CFI_QUERY:
2991 case FL_JEDEC_QUERY:
2992 chip->oldstate = chip->state;
2993 chip->state = FL_PM_SUSPENDED;
2994
2995
2996
2997
2998 case FL_PM_SUSPENDED:
2999 break;
3000
3001 default:
3002 ret = -EAGAIN;
3003 break;
3004 }
3005 mutex_unlock(&chip->mutex);
3006 }
3007
3008
3009
3010 if (ret) {
3011 for (i--; i >=0; i--) {
3012 chip = &cfi->chips[i];
3013
3014 mutex_lock(&chip->mutex);
3015
3016 if (chip->state == FL_PM_SUSPENDED) {
3017 chip->state = chip->oldstate;
3018 wake_up(&chip->wq);
3019 }
3020 mutex_unlock(&chip->mutex);
3021 }
3022 }
3023
3024 return ret;
3025}
3026
3027
3028static void cfi_amdstd_resume(struct mtd_info *mtd)
3029{
3030 struct map_info *map = mtd->priv;
3031 struct cfi_private *cfi = map->fldrv_priv;
3032 int i;
3033 struct flchip *chip;
3034
3035 for (i=0; i<cfi->numchips; i++) {
3036
3037 chip = &cfi->chips[i];
3038
3039 mutex_lock(&chip->mutex);
3040
3041 if (chip->state == FL_PM_SUSPENDED) {
3042 chip->state = FL_READY;
3043 map_write(map, CMD(0xF0), chip->start);
3044 wake_up(&chip->wq);
3045 }
3046 else
3047 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3048
3049 mutex_unlock(&chip->mutex);
3050 }
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060static int cfi_amdstd_reset(struct mtd_info *mtd)
3061{
3062 struct map_info *map = mtd->priv;
3063 struct cfi_private *cfi = map->fldrv_priv;
3064 int i, ret;
3065 struct flchip *chip;
3066
3067 for (i = 0; i < cfi->numchips; i++) {
3068
3069 chip = &cfi->chips[i];
3070
3071 mutex_lock(&chip->mutex);
3072
3073 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3074 if (!ret) {
3075 map_write(map, CMD(0xF0), chip->start);
3076 chip->state = FL_SHUTDOWN;
3077 put_chip(map, chip, chip->start);
3078 }
3079
3080 mutex_unlock(&chip->mutex);
3081 }
3082
3083 return 0;
3084}
3085
3086
3087static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3088 void *v)
3089{
3090 struct mtd_info *mtd;
3091
3092 mtd = container_of(nb, struct mtd_info, reboot_notifier);
3093 cfi_amdstd_reset(mtd);
3094 return NOTIFY_DONE;
3095}
3096
3097
3098static void cfi_amdstd_destroy(struct mtd_info *mtd)
3099{
3100 struct map_info *map = mtd->priv;
3101 struct cfi_private *cfi = map->fldrv_priv;
3102
3103 cfi_amdstd_reset(mtd);
3104 unregister_reboot_notifier(&mtd->reboot_notifier);
3105 kfree(cfi->cmdset_priv);
3106 kfree(cfi->cfiq);
3107 kfree(cfi);
3108 kfree(mtd->eraseregions);
3109}
3110
3111MODULE_LICENSE("GPL");
3112MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3113MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3114MODULE_ALIAS("cfi_cmdset_0006");
3115MODULE_ALIAS("cfi_cmdset_0701");
3116