1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/reboot.h>
35#include <linux/of.h>
36#include <linux/of_platform.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
40#include <linux/mtd/xip.h>
41
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_RETRIES 3
46
47#define SST49LF004B 0x0060
48#define SST49LF040B 0x0050
49#define SST49LF008A 0x005a
50#define AT49BV6416 0x00d6
51
52
53
54
55
56#define CFI_SR_DRB BIT(7)
57#define CFI_SR_ESB BIT(5)
58#define CFI_SR_PSB BIT(4)
59#define CFI_SR_WBASB BIT(3)
60#define CFI_SR_SLSB BIT(1)
61
62static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
66static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
67static void cfi_amdstd_sync (struct mtd_info *);
68static int cfi_amdstd_suspend (struct mtd_info *);
69static void cfi_amdstd_resume (struct mtd_info *);
70static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
71static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
72 size_t *, struct otp_info *);
73static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
74 size_t *, struct otp_info *);
75static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
77 size_t *, u_char *);
78static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
79 size_t *, u_char *);
80static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
81 size_t *, u_char *);
82static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
83
84static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
85 size_t *retlen, const u_char *buf);
86
87static void cfi_amdstd_destroy(struct mtd_info *);
88
89struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
90static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
91
92static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94#include "fwh_lock.h"
95
96static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
97static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
98
99static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
100static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
101static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
102
103static struct mtd_chip_driver cfi_amdstd_chipdrv = {
104 .probe = NULL,
105 .destroy = cfi_amdstd_destroy,
106 .name = "cfi_cmdset_0002",
107 .module = THIS_MODULE
108};
109
110
111
112
113
114
115static int cfi_use_status_reg(struct cfi_private *cfi)
116{
117 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
118 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
119
120 return extp->MinorVersion >= '5' &&
121 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
122}
123
124static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
125 unsigned long adr)
126{
127 struct cfi_private *cfi = map->fldrv_priv;
128 map_word status;
129
130 if (!cfi_use_status_reg(cfi))
131 return;
132
133 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
134 cfi->device_type, NULL);
135 status = map_read(map, adr);
136
137 if (map_word_bitsset(map, status, CMD(0x3a))) {
138 unsigned long chipstatus = MERGESTATUS(status);
139
140 if (chipstatus & CFI_SR_ESB)
141 pr_err("%s erase operation failed, status %lx\n",
142 map->name, chipstatus);
143 if (chipstatus & CFI_SR_PSB)
144 pr_err("%s program operation failed, status %lx\n",
145 map->name, chipstatus);
146 if (chipstatus & CFI_SR_WBASB)
147 pr_err("%s buffer program command aborted, status %lx\n",
148 map->name, chipstatus);
149 if (chipstatus & CFI_SR_SLSB)
150 pr_err("%s sector write protected, status %lx\n",
151 map->name, chipstatus);
152 }
153}
154
155
156
157
158#ifdef DEBUG_CFI_FEATURES
159static void cfi_tell_features(struct cfi_pri_amdstd *extp)
160{
161 const char* erase_suspend[3] = {
162 "Not supported", "Read only", "Read/write"
163 };
164 const char* top_bottom[6] = {
165 "No WP", "8x8KiB sectors at top & bottom, no WP",
166 "Bottom boot", "Top boot",
167 "Uniform, Bottom WP", "Uniform, Top WP"
168 };
169
170 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
171 printk(" Address sensitive unlock: %s\n",
172 (extp->SiliconRevision & 1) ? "Not required" : "Required");
173
174 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
175 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
176 else
177 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
178
179 if (extp->BlkProt == 0)
180 printk(" Block protection: Not supported\n");
181 else
182 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
183
184
185 printk(" Temporary block unprotect: %s\n",
186 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
187 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
188 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
189 printk(" Burst mode: %s\n",
190 extp->BurstMode ? "Supported" : "Not supported");
191 if (extp->PageMode == 0)
192 printk(" Page mode: Not supported\n");
193 else
194 printk(" Page mode: %d word page\n", extp->PageMode << 2);
195
196 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
197 extp->VppMin >> 4, extp->VppMin & 0xf);
198 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
199 extp->VppMax >> 4, extp->VppMax & 0xf);
200
201 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
202 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
203 else
204 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
205}
206#endif
207
208#ifdef AMD_BOOTLOC_BUG
209
210static void fixup_amd_bootblock(struct mtd_info *mtd)
211{
212 struct map_info *map = mtd->priv;
213 struct cfi_private *cfi = map->fldrv_priv;
214 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
215 __u8 major = extp->MajorVersion;
216 __u8 minor = extp->MinorVersion;
217
218 if (((major << 8) | minor) < 0x3131) {
219
220
221 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
222 map->name, cfi->mfr, cfi->id);
223
224
225
226
227
228
229 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
230
231
232
233
234
235
236
237
238
239
240 (cfi->mfr == CFI_MFR_MACRONIX)) {
241 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
242 " detected\n", map->name);
243 extp->TopBottom = 2;
244 } else
245 if (cfi->id & 0x80) {
246 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
247 extp->TopBottom = 3;
248 } else {
249 extp->TopBottom = 2;
250 }
251
252 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
253 " deduced %s from Device ID\n", map->name, major, minor,
254 extp->TopBottom == 2 ? "bottom" : "top");
255 }
256}
257#endif
258
259static void fixup_use_write_buffers(struct mtd_info *mtd)
260{
261 struct map_info *map = mtd->priv;
262 struct cfi_private *cfi = map->fldrv_priv;
263 if (cfi->cfiq->BufWriteTimeoutTyp) {
264 pr_debug("Using buffer write method\n");
265 mtd->_write = cfi_amdstd_write_buffers;
266 }
267}
268
269
270static void fixup_convert_atmel_pri(struct mtd_info *mtd)
271{
272 struct map_info *map = mtd->priv;
273 struct cfi_private *cfi = map->fldrv_priv;
274 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
275 struct cfi_pri_atmel atmel_pri;
276
277 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
278 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
279
280 if (atmel_pri.Features & 0x02)
281 extp->EraseSuspend = 2;
282
283
284 if (cfi->id == AT49BV6416) {
285 if (atmel_pri.BottomBoot)
286 extp->TopBottom = 3;
287 else
288 extp->TopBottom = 2;
289 } else {
290 if (atmel_pri.BottomBoot)
291 extp->TopBottom = 2;
292 else
293 extp->TopBottom = 3;
294 }
295
296
297 cfi->cfiq->BufWriteTimeoutTyp = 0;
298 cfi->cfiq->BufWriteTimeoutMax = 0;
299}
300
301static void fixup_use_secsi(struct mtd_info *mtd)
302{
303
304 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
305 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
306}
307
308static void fixup_use_erase_chip(struct mtd_info *mtd)
309{
310 struct map_info *map = mtd->priv;
311 struct cfi_private *cfi = map->fldrv_priv;
312 if ((cfi->cfiq->NumEraseRegions == 1) &&
313 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
314 mtd->_erase = cfi_amdstd_erase_chip;
315 }
316
317}
318
319
320
321
322
323static void fixup_use_atmel_lock(struct mtd_info *mtd)
324{
325 mtd->_lock = cfi_atmel_lock;
326 mtd->_unlock = cfi_atmel_unlock;
327 mtd->flags |= MTD_POWERUP_LOCK;
328}
329
330static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
331{
332 struct map_info *map = mtd->priv;
333 struct cfi_private *cfi = map->fldrv_priv;
334
335
336
337
338
339
340
341 cfi->cfiq->NumEraseRegions = 1;
342}
343
344static void fixup_sst39vf(struct mtd_info *mtd)
345{
346 struct map_info *map = mtd->priv;
347 struct cfi_private *cfi = map->fldrv_priv;
348
349 fixup_old_sst_eraseregion(mtd);
350
351 cfi->addr_unlock1 = 0x5555;
352 cfi->addr_unlock2 = 0x2AAA;
353}
354
355static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
356{
357 struct map_info *map = mtd->priv;
358 struct cfi_private *cfi = map->fldrv_priv;
359
360 fixup_old_sst_eraseregion(mtd);
361
362 cfi->addr_unlock1 = 0x555;
363 cfi->addr_unlock2 = 0x2AA;
364
365 cfi->sector_erase_cmd = CMD(0x50);
366}
367
368static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
369{
370 struct map_info *map = mtd->priv;
371 struct cfi_private *cfi = map->fldrv_priv;
372
373 fixup_sst39vf_rev_b(mtd);
374
375
376
377
378
379 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
380 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
381 mtd->name);
382}
383
384static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
385{
386 struct map_info *map = mtd->priv;
387 struct cfi_private *cfi = map->fldrv_priv;
388
389 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
390 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
391 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
392 mtd->name);
393 }
394}
395
396static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
397{
398 struct map_info *map = mtd->priv;
399 struct cfi_private *cfi = map->fldrv_priv;
400
401 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
402 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
403 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
404 mtd->name);
405 }
406}
407
408static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
409{
410 struct map_info *map = mtd->priv;
411 struct cfi_private *cfi = map->fldrv_priv;
412
413
414
415
416
417 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
418 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
419 mtd->name);
420}
421
422
423static struct cfi_fixup cfi_nopri_fixup_table[] = {
424 { CFI_MFR_SST, 0x234a, fixup_sst39vf },
425 { CFI_MFR_SST, 0x234b, fixup_sst39vf },
426 { CFI_MFR_SST, 0x235a, fixup_sst39vf },
427 { CFI_MFR_SST, 0x235b, fixup_sst39vf },
428 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b },
429 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b },
430 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b },
431 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b },
432 { 0, 0, NULL }
433};
434
435static struct cfi_fixup cfi_fixup_table[] = {
436 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
437#ifdef AMD_BOOTLOC_BUG
438 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
439 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
440 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
441#endif
442 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
443 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
444 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
445 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
446 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
447 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
448 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
449 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
450 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
451 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
452 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
453 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize },
454 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize },
455 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize },
456 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize },
457#if !FORCE_WORD_WRITE
458 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
459#endif
460 { 0, 0, NULL }
461};
462static struct cfi_fixup jedec_fixup_table[] = {
463 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
464 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
465 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
466 { 0, 0, NULL }
467};
468
469static struct cfi_fixup fixup_table[] = {
470
471
472
473
474
475 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
476 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
477 { 0, 0, NULL }
478};
479
480
481static void cfi_fixup_major_minor(struct cfi_private *cfi,
482 struct cfi_pri_amdstd *extp)
483{
484 if (cfi->mfr == CFI_MFR_SAMSUNG) {
485 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
486 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
487
488
489
490
491
492 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
493 " Extended Query version to 1.%c\n",
494 extp->MinorVersion);
495 extp->MajorVersion = '1';
496 }
497 }
498
499
500
501
502 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
503 extp->MajorVersion = '1';
504 extp->MinorVersion = '0';
505 }
506}
507
508static int is_m29ew(struct cfi_private *cfi)
509{
510 if (cfi->mfr == CFI_MFR_INTEL &&
511 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
512 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
513 return 1;
514 return 0;
515}
516
517
518
519
520
521
522
523
524
525
526
527static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
528 unsigned long adr)
529{
530 struct cfi_private *cfi = map->fldrv_priv;
531
532 if (is_m29ew(cfi))
533 map_write(map, CMD(0xF0), adr);
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
561{
562
563
564
565
566 if (is_m29ew(cfi))
567 cfi_udelay(500);
568}
569
570struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
571{
572 struct cfi_private *cfi = map->fldrv_priv;
573 struct device_node __maybe_unused *np = map->device_node;
574 struct mtd_info *mtd;
575 int i;
576
577 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
578 if (!mtd)
579 return NULL;
580 mtd->priv = map;
581 mtd->type = MTD_NORFLASH;
582
583
584 mtd->_erase = cfi_amdstd_erase_varsize;
585 mtd->_write = cfi_amdstd_write_words;
586 mtd->_read = cfi_amdstd_read;
587 mtd->_sync = cfi_amdstd_sync;
588 mtd->_suspend = cfi_amdstd_suspend;
589 mtd->_resume = cfi_amdstd_resume;
590 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
591 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
592 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
593 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
594 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
595 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
596 mtd->flags = MTD_CAP_NORFLASH;
597 mtd->name = map->name;
598 mtd->writesize = 1;
599 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
600
601 pr_debug("MTD %s(): write buffer size %d\n", __func__,
602 mtd->writebufsize);
603
604 mtd->_panic_write = cfi_amdstd_panic_write;
605 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
606
607 if (cfi->cfi_mode==CFI_MODE_CFI){
608 unsigned char bootloc;
609 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
610 struct cfi_pri_amdstd *extp;
611
612 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
613 if (extp) {
614
615
616
617
618 cfi_fixup_major_minor(cfi, extp);
619
620
621
622
623
624
625
626
627 if (extp->MajorVersion != '1' ||
628 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
629 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
630 "version %c.%c (%#02x/%#02x).\n",
631 extp->MajorVersion, extp->MinorVersion,
632 extp->MajorVersion, extp->MinorVersion);
633 kfree(extp);
634 kfree(mtd);
635 return NULL;
636 }
637
638 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
639 extp->MajorVersion, extp->MinorVersion);
640
641
642 cfi->cmdset_priv = extp;
643
644
645 cfi_fixup(mtd, cfi_fixup_table);
646
647#ifdef DEBUG_CFI_FEATURES
648
649 cfi_tell_features(extp);
650#endif
651
652#ifdef CONFIG_OF
653 if (np && of_property_read_bool(
654 np, "use-advanced-sector-protection")
655 && extp->BlkProtUnprot == 8) {
656 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
657 mtd->_lock = cfi_ppb_lock;
658 mtd->_unlock = cfi_ppb_unlock;
659 mtd->_is_locked = cfi_ppb_is_locked;
660 }
661#endif
662
663 bootloc = extp->TopBottom;
664 if ((bootloc < 2) || (bootloc > 5)) {
665 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
666 "bank location (%d). Assuming bottom.\n",
667 map->name, bootloc);
668 bootloc = 2;
669 }
670
671 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
672 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
673
674 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
675 int j = (cfi->cfiq->NumEraseRegions-1)-i;
676
677 swap(cfi->cfiq->EraseRegionInfo[i],
678 cfi->cfiq->EraseRegionInfo[j]);
679 }
680 }
681
682 cfi->addr_unlock1 = 0x555;
683 cfi->addr_unlock2 = 0x2aa;
684 }
685 cfi_fixup(mtd, cfi_nopri_fixup_table);
686
687 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
688 kfree(mtd);
689 return NULL;
690 }
691
692 }
693 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
694
695 cfi_fixup(mtd, jedec_fixup_table);
696 }
697
698 cfi_fixup(mtd, fixup_table);
699
700 for (i=0; i< cfi->numchips; i++) {
701 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
702 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
703 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
704
705
706
707
708
709
710 if (cfi->cfiq->BufWriteTimeoutTyp &&
711 cfi->cfiq->BufWriteTimeoutMax)
712 cfi->chips[i].buffer_write_time_max =
713 1 << (cfi->cfiq->BufWriteTimeoutTyp +
714 cfi->cfiq->BufWriteTimeoutMax);
715 else
716 cfi->chips[i].buffer_write_time_max = 0;
717
718 cfi->chips[i].buffer_write_time_max =
719 max(cfi->chips[i].buffer_write_time_max, 2000);
720
721 cfi->chips[i].ref_point_counter = 0;
722 init_waitqueue_head(&(cfi->chips[i].wq));
723 }
724
725 map->fldrv = &cfi_amdstd_chipdrv;
726
727 return cfi_amdstd_setup(mtd);
728}
729struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
730struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
731EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
732EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
733EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
734
735static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
736{
737 struct map_info *map = mtd->priv;
738 struct cfi_private *cfi = map->fldrv_priv;
739 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
740 unsigned long offset = 0;
741 int i,j;
742
743 printk(KERN_NOTICE "number of %s chips: %d\n",
744 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
745
746 mtd->size = devsize * cfi->numchips;
747
748 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
749 mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
750 sizeof(struct mtd_erase_region_info),
751 GFP_KERNEL);
752 if (!mtd->eraseregions)
753 goto setup_err;
754
755 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
756 unsigned long ernum, ersize;
757 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
758 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
759
760 if (mtd->erasesize < ersize) {
761 mtd->erasesize = ersize;
762 }
763 for (j=0; j<cfi->numchips; j++) {
764 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
765 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
766 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
767 }
768 offset += (ersize * ernum);
769 }
770 if (offset != devsize) {
771
772 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
773 goto setup_err;
774 }
775
776 __module_get(THIS_MODULE);
777 register_reboot_notifier(&mtd->reboot_notifier);
778 return mtd;
779
780 setup_err:
781 kfree(mtd->eraseregions);
782 kfree(mtd);
783 kfree(cfi->cmdset_priv);
784 kfree(cfi->cfiq);
785 return NULL;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
800 unsigned long addr)
801{
802 struct cfi_private *cfi = map->fldrv_priv;
803 map_word d, t;
804
805 if (cfi_use_status_reg(cfi)) {
806 map_word ready = CMD(CFI_SR_DRB);
807
808
809
810
811 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
812 cfi->device_type, NULL);
813 d = map_read(map, addr);
814
815 return map_word_andequal(map, d, ready, ready);
816 }
817
818 d = map_read(map, addr);
819 t = map_read(map, addr);
820
821 return map_word_equal(map, d, t);
822}
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839static int __xipram chip_good(struct map_info *map, struct flchip *chip,
840 unsigned long addr, map_word expected)
841{
842 struct cfi_private *cfi = map->fldrv_priv;
843 map_word oldd, curd;
844
845 if (cfi_use_status_reg(cfi)) {
846 map_word ready = CMD(CFI_SR_DRB);
847 map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
848
849
850
851
852
853 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
854 cfi->device_type, NULL);
855 curd = map_read(map, addr);
856
857 if (map_word_andequal(map, curd, ready, ready))
858 return !map_word_bitsset(map, curd, err);
859
860 return 0;
861 }
862
863 oldd = map_read(map, addr);
864 curd = map_read(map, addr);
865
866 return map_word_equal(map, oldd, curd) &&
867 map_word_equal(map, curd, expected);
868}
869
870static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
871{
872 DECLARE_WAITQUEUE(wait, current);
873 struct cfi_private *cfi = map->fldrv_priv;
874 unsigned long timeo;
875 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
876
877 resettime:
878 timeo = jiffies + HZ;
879 retry:
880 switch (chip->state) {
881
882 case FL_STATUS:
883 for (;;) {
884 if (chip_ready(map, chip, adr))
885 break;
886
887 if (time_after(jiffies, timeo)) {
888 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
889 return -EIO;
890 }
891 mutex_unlock(&chip->mutex);
892 cfi_udelay(1);
893 mutex_lock(&chip->mutex);
894
895 goto retry;
896 }
897
898 case FL_READY:
899 case FL_CFI_QUERY:
900 case FL_JEDEC_QUERY:
901 return 0;
902
903 case FL_ERASING:
904 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
905 !(mode == FL_READY || mode == FL_POINT ||
906 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
907 goto sleep;
908
909
910 if ((adr & chip->in_progress_block_mask) ==
911 chip->in_progress_block_addr)
912 goto sleep;
913
914
915
916
917 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
918 chip->oldstate = FL_ERASING;
919 chip->state = FL_ERASE_SUSPENDING;
920 chip->erase_suspended = 1;
921 for (;;) {
922 if (chip_ready(map, chip, adr))
923 break;
924
925 if (time_after(jiffies, timeo)) {
926
927
928
929
930
931 put_chip(map, chip, adr);
932 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
933 return -EIO;
934 }
935
936 mutex_unlock(&chip->mutex);
937 cfi_udelay(1);
938 mutex_lock(&chip->mutex);
939
940
941 }
942 chip->state = FL_READY;
943 return 0;
944
945 case FL_XIP_WHILE_ERASING:
946 if (mode != FL_READY && mode != FL_POINT &&
947 (!cfip || !(cfip->EraseSuspend&2)))
948 goto sleep;
949 chip->oldstate = chip->state;
950 chip->state = FL_READY;
951 return 0;
952
953 case FL_SHUTDOWN:
954
955 return -EIO;
956
957 case FL_POINT:
958
959 if (mode == FL_READY && chip->oldstate == FL_READY)
960 return 0;
961
962
963 default:
964 sleep:
965 set_current_state(TASK_UNINTERRUPTIBLE);
966 add_wait_queue(&chip->wq, &wait);
967 mutex_unlock(&chip->mutex);
968 schedule();
969 remove_wait_queue(&chip->wq, &wait);
970 mutex_lock(&chip->mutex);
971 goto resettime;
972 }
973}
974
975
976static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
977{
978 struct cfi_private *cfi = map->fldrv_priv;
979
980 switch(chip->oldstate) {
981 case FL_ERASING:
982 cfi_fixup_m29ew_erase_suspend(map,
983 chip->in_progress_block_addr);
984 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
985 cfi_fixup_m29ew_delay_after_resume(cfi);
986 chip->oldstate = FL_READY;
987 chip->state = FL_ERASING;
988 break;
989
990 case FL_XIP_WHILE_ERASING:
991 chip->state = chip->oldstate;
992 chip->oldstate = FL_READY;
993 break;
994
995 case FL_READY:
996 case FL_STATUS:
997 break;
998 default:
999 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1000 }
1001 wake_up(&chip->wq);
1002}
1003
1004#ifdef CONFIG_MTD_XIP
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static void xip_disable(struct map_info *map, struct flchip *chip,
1018 unsigned long adr)
1019{
1020
1021 (void) map_read(map, adr);
1022 local_irq_disable();
1023}
1024
1025static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1026 unsigned long adr)
1027{
1028 struct cfi_private *cfi = map->fldrv_priv;
1029
1030 if (chip->state != FL_POINT && chip->state != FL_READY) {
1031 map_write(map, CMD(0xf0), adr);
1032 chip->state = FL_READY;
1033 }
1034 (void) map_read(map, adr);
1035 xip_iprefetch();
1036 local_irq_enable();
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1053 unsigned long adr, int usec)
1054{
1055 struct cfi_private *cfi = map->fldrv_priv;
1056 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1057 map_word status, OK = CMD(0x80);
1058 unsigned long suspended, start = xip_currtime();
1059 flstate_t oldstate;
1060
1061 do {
1062 cpu_relax();
1063 if (xip_irqpending() && extp &&
1064 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1065 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 map_write(map, CMD(0xb0), adr);
1077 usec -= xip_elapsed_since(start);
1078 suspended = xip_currtime();
1079 do {
1080 if (xip_elapsed_since(suspended) > 100000) {
1081
1082
1083
1084
1085
1086
1087 return;
1088 }
1089 status = map_read(map, adr);
1090 } while (!map_word_andequal(map, status, OK, OK));
1091
1092
1093 oldstate = chip->state;
1094 if (!map_word_bitsset(map, status, CMD(0x40)))
1095 break;
1096 chip->state = FL_XIP_WHILE_ERASING;
1097 chip->erase_suspended = 1;
1098 map_write(map, CMD(0xf0), adr);
1099 (void) map_read(map, adr);
1100 xip_iprefetch();
1101 local_irq_enable();
1102 mutex_unlock(&chip->mutex);
1103 xip_iprefetch();
1104 cond_resched();
1105
1106
1107
1108
1109
1110
1111
1112 mutex_lock(&chip->mutex);
1113 while (chip->state != FL_XIP_WHILE_ERASING) {
1114 DECLARE_WAITQUEUE(wait, current);
1115 set_current_state(TASK_UNINTERRUPTIBLE);
1116 add_wait_queue(&chip->wq, &wait);
1117 mutex_unlock(&chip->mutex);
1118 schedule();
1119 remove_wait_queue(&chip->wq, &wait);
1120 mutex_lock(&chip->mutex);
1121 }
1122
1123 local_irq_disable();
1124
1125
1126 cfi_fixup_m29ew_erase_suspend(map, adr);
1127
1128 map_write(map, cfi->sector_erase_cmd, adr);
1129 chip->state = oldstate;
1130 start = xip_currtime();
1131 } else if (usec >= 1000000/HZ) {
1132
1133
1134
1135
1136
1137 xip_cpu_idle();
1138 }
1139 status = map_read(map, adr);
1140 } while (!map_word_andequal(map, status, OK, OK)
1141 && xip_elapsed_since(start) < usec);
1142}
1143
1144#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1145
1146
1147
1148
1149
1150
1151
1152
1153#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1154 INVALIDATE_CACHED_RANGE(map, from, size)
1155
1156#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1157 UDELAY(map, chip, adr, usec)
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176#else
1177
1178#define xip_disable(map, chip, adr)
1179#define xip_enable(map, chip, adr)
1180#define XIP_INVAL_CACHED_RANGE(x...)
1181
1182#define UDELAY(map, chip, adr, usec) \
1183do { \
1184 mutex_unlock(&chip->mutex); \
1185 cfi_udelay(usec); \
1186 mutex_lock(&chip->mutex); \
1187} while (0)
1188
1189#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1190do { \
1191 mutex_unlock(&chip->mutex); \
1192 INVALIDATE_CACHED_RANGE(map, adr, len); \
1193 cfi_udelay(usec); \
1194 mutex_lock(&chip->mutex); \
1195} while (0)
1196
1197#endif
1198
1199static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1200{
1201 unsigned long cmd_addr;
1202 struct cfi_private *cfi = map->fldrv_priv;
1203 int ret;
1204
1205 adr += chip->start;
1206
1207
1208 cmd_addr = adr & ~(map_bankwidth(map)-1);
1209
1210 mutex_lock(&chip->mutex);
1211 ret = get_chip(map, chip, cmd_addr, FL_READY);
1212 if (ret) {
1213 mutex_unlock(&chip->mutex);
1214 return ret;
1215 }
1216
1217 if (chip->state != FL_POINT && chip->state != FL_READY) {
1218 map_write(map, CMD(0xf0), cmd_addr);
1219 chip->state = FL_READY;
1220 }
1221
1222 map_copy_from(map, buf, adr, len);
1223
1224 put_chip(map, chip, cmd_addr);
1225
1226 mutex_unlock(&chip->mutex);
1227 return 0;
1228}
1229
1230
1231static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1232{
1233 struct map_info *map = mtd->priv;
1234 struct cfi_private *cfi = map->fldrv_priv;
1235 unsigned long ofs;
1236 int chipnum;
1237 int ret = 0;
1238
1239
1240 chipnum = (from >> cfi->chipshift);
1241 ofs = from - (chipnum << cfi->chipshift);
1242
1243 while (len) {
1244 unsigned long thislen;
1245
1246 if (chipnum >= cfi->numchips)
1247 break;
1248
1249 if ((len + ofs -1) >> cfi->chipshift)
1250 thislen = (1<<cfi->chipshift) - ofs;
1251 else
1252 thislen = len;
1253
1254 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1255 if (ret)
1256 break;
1257
1258 *retlen += thislen;
1259 len -= thislen;
1260 buf += thislen;
1261
1262 ofs = 0;
1263 chipnum++;
1264 }
1265 return ret;
1266}
1267
1268typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1269 loff_t adr, size_t len, u_char *buf, size_t grouplen);
1270
1271static inline void otp_enter(struct map_info *map, struct flchip *chip,
1272 loff_t adr, size_t len)
1273{
1274 struct cfi_private *cfi = map->fldrv_priv;
1275
1276 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1277 cfi->device_type, NULL);
1278 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1279 cfi->device_type, NULL);
1280 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1281 cfi->device_type, NULL);
1282
1283 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1284}
1285
1286static inline void otp_exit(struct map_info *map, struct flchip *chip,
1287 loff_t adr, size_t len)
1288{
1289 struct cfi_private *cfi = map->fldrv_priv;
1290
1291 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1292 cfi->device_type, NULL);
1293 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1294 cfi->device_type, NULL);
1295 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1296 cfi->device_type, NULL);
1297 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1298 cfi->device_type, NULL);
1299
1300 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1301}
1302
1303static inline int do_read_secsi_onechip(struct map_info *map,
1304 struct flchip *chip, loff_t adr,
1305 size_t len, u_char *buf,
1306 size_t grouplen)
1307{
1308 DECLARE_WAITQUEUE(wait, current);
1309
1310 retry:
1311 mutex_lock(&chip->mutex);
1312
1313 if (chip->state != FL_READY){
1314 set_current_state(TASK_UNINTERRUPTIBLE);
1315 add_wait_queue(&chip->wq, &wait);
1316
1317 mutex_unlock(&chip->mutex);
1318
1319 schedule();
1320 remove_wait_queue(&chip->wq, &wait);
1321
1322 goto retry;
1323 }
1324
1325 adr += chip->start;
1326
1327 chip->state = FL_READY;
1328
1329 otp_enter(map, chip, adr, len);
1330 map_copy_from(map, buf, adr, len);
1331 otp_exit(map, chip, adr, len);
1332
1333 wake_up(&chip->wq);
1334 mutex_unlock(&chip->mutex);
1335
1336 return 0;
1337}
1338
1339static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1340{
1341 struct map_info *map = mtd->priv;
1342 struct cfi_private *cfi = map->fldrv_priv;
1343 unsigned long ofs;
1344 int chipnum;
1345 int ret = 0;
1346
1347
1348
1349 chipnum=from>>3;
1350 ofs=from & 7;
1351
1352 while (len) {
1353 unsigned long thislen;
1354
1355 if (chipnum >= cfi->numchips)
1356 break;
1357
1358 if ((len + ofs -1) >> 3)
1359 thislen = (1<<3) - ofs;
1360 else
1361 thislen = len;
1362
1363 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1364 thislen, buf, 0);
1365 if (ret)
1366 break;
1367
1368 *retlen += thislen;
1369 len -= thislen;
1370 buf += thislen;
1371
1372 ofs = 0;
1373 chipnum++;
1374 }
1375 return ret;
1376}
1377
1378static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1379 unsigned long adr, map_word datum,
1380 int mode);
1381
1382static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1383 size_t len, u_char *buf, size_t grouplen)
1384{
1385 int ret;
1386 while (len) {
1387 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1388 int gap = adr - bus_ofs;
1389 int n = min_t(int, len, map_bankwidth(map) - gap);
1390 map_word datum = map_word_ff(map);
1391
1392 if (n != map_bankwidth(map)) {
1393
1394 otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1395 datum = map_read(map, bus_ofs);
1396 otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1397 }
1398
1399 datum = map_word_load_partial(map, datum, buf, gap, n);
1400 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1401 if (ret)
1402 return ret;
1403
1404 adr += n;
1405 buf += n;
1406 len -= n;
1407 }
1408
1409 return 0;
1410}
1411
1412static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1413 size_t len, u_char *buf, size_t grouplen)
1414{
1415 struct cfi_private *cfi = map->fldrv_priv;
1416 uint8_t lockreg;
1417 unsigned long timeo;
1418 int ret;
1419
1420
1421 if ((adr != 0) || (len != grouplen))
1422 return -EINVAL;
1423
1424 mutex_lock(&chip->mutex);
1425 ret = get_chip(map, chip, chip->start, FL_LOCKING);
1426 if (ret) {
1427 mutex_unlock(&chip->mutex);
1428 return ret;
1429 }
1430 chip->state = FL_LOCKING;
1431
1432
1433 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1434 cfi->device_type, NULL);
1435 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1436 cfi->device_type, NULL);
1437 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1438 cfi->device_type, NULL);
1439
1440
1441 lockreg = cfi_read_query(map, 0);
1442
1443
1444 lockreg &= ~0x01;
1445
1446
1447
1448 map_write(map, CMD(0xA0), chip->start);
1449 map_write(map, CMD(lockreg), chip->start);
1450
1451
1452 timeo = jiffies + msecs_to_jiffies(2);
1453 for (;;) {
1454 if (chip_ready(map, chip, adr))
1455 break;
1456
1457 if (time_after(jiffies, timeo)) {
1458 pr_err("Waiting for chip to be ready timed out.\n");
1459 ret = -EIO;
1460 break;
1461 }
1462 UDELAY(map, chip, 0, 1);
1463 }
1464
1465
1466 map_write(map, CMD(0x90), chip->start);
1467 map_write(map, CMD(0x00), chip->start);
1468
1469 chip->state = FL_READY;
1470 put_chip(map, chip, chip->start);
1471 mutex_unlock(&chip->mutex);
1472
1473 return ret;
1474}
1475
1476static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1477 size_t *retlen, u_char *buf,
1478 otp_op_t action, int user_regs)
1479{
1480 struct map_info *map = mtd->priv;
1481 struct cfi_private *cfi = map->fldrv_priv;
1482 int ofs_factor = cfi->interleave * cfi->device_type;
1483 unsigned long base;
1484 int chipnum;
1485 struct flchip *chip;
1486 uint8_t otp, lockreg;
1487 int ret;
1488
1489 size_t user_size, factory_size, otpsize;
1490 loff_t user_offset, factory_offset, otpoffset;
1491 int user_locked = 0, otplocked;
1492
1493 *retlen = 0;
1494
1495 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1496 chip = &cfi->chips[chipnum];
1497 factory_size = 0;
1498 user_size = 0;
1499
1500
1501 if (is_m29ew(cfi)) {
1502 base = chip->start;
1503
1504
1505
1506 mutex_lock(&chip->mutex);
1507 ret = get_chip(map, chip, base, FL_CFI_QUERY);
1508 if (ret) {
1509 mutex_unlock(&chip->mutex);
1510 return ret;
1511 }
1512 cfi_qry_mode_on(base, map, cfi);
1513 otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1514 cfi_qry_mode_off(base, map, cfi);
1515 put_chip(map, chip, base);
1516 mutex_unlock(&chip->mutex);
1517
1518 if (otp & 0x80) {
1519
1520 factory_offset = 0;
1521 factory_size = 0x100;
1522 } else {
1523
1524 user_offset = 0;
1525 user_size = 0x100;
1526
1527 mutex_lock(&chip->mutex);
1528 ret = get_chip(map, chip, base, FL_LOCKING);
1529 if (ret) {
1530 mutex_unlock(&chip->mutex);
1531 return ret;
1532 }
1533
1534
1535 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1536 chip->start, map, cfi,
1537 cfi->device_type, NULL);
1538 cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1539 chip->start, map, cfi,
1540 cfi->device_type, NULL);
1541 cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1542 chip->start, map, cfi,
1543 cfi->device_type, NULL);
1544
1545 lockreg = cfi_read_query(map, 0);
1546
1547 map_write(map, CMD(0x90), chip->start);
1548 map_write(map, CMD(0x00), chip->start);
1549 put_chip(map, chip, chip->start);
1550 mutex_unlock(&chip->mutex);
1551
1552 user_locked = ((lockreg & 0x01) == 0x00);
1553 }
1554 }
1555
1556 otpsize = user_regs ? user_size : factory_size;
1557 if (!otpsize)
1558 continue;
1559 otpoffset = user_regs ? user_offset : factory_offset;
1560 otplocked = user_regs ? user_locked : 1;
1561
1562 if (!action) {
1563
1564 struct otp_info *otpinfo;
1565 len -= sizeof(*otpinfo);
1566 if (len <= 0)
1567 return -ENOSPC;
1568 otpinfo = (struct otp_info *)buf;
1569 otpinfo->start = from;
1570 otpinfo->length = otpsize;
1571 otpinfo->locked = otplocked;
1572 buf += sizeof(*otpinfo);
1573 *retlen += sizeof(*otpinfo);
1574 from += otpsize;
1575 } else if ((from < otpsize) && (len > 0)) {
1576 size_t size;
1577 size = (len < otpsize - from) ? len : otpsize - from;
1578 ret = action(map, chip, otpoffset + from, size, buf,
1579 otpsize);
1580 if (ret < 0)
1581 return ret;
1582
1583 buf += size;
1584 len -= size;
1585 *retlen += size;
1586 from = 0;
1587 } else {
1588 from -= otpsize;
1589 }
1590 }
1591 return 0;
1592}
1593
1594static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1595 size_t *retlen, struct otp_info *buf)
1596{
1597 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1598 NULL, 0);
1599}
1600
1601static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1602 size_t *retlen, struct otp_info *buf)
1603{
1604 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1605 NULL, 1);
1606}
1607
1608static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1609 size_t len, size_t *retlen,
1610 u_char *buf)
1611{
1612 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1613 buf, do_read_secsi_onechip, 0);
1614}
1615
1616static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1617 size_t len, size_t *retlen,
1618 u_char *buf)
1619{
1620 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1621 buf, do_read_secsi_onechip, 1);
1622}
1623
1624static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1625 size_t len, size_t *retlen,
1626 u_char *buf)
1627{
1628 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1629 do_otp_write, 1);
1630}
1631
1632static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1633 size_t len)
1634{
1635 size_t retlen;
1636 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1637 do_otp_lock, 1);
1638}
1639
1640static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1641 unsigned long adr, map_word datum,
1642 int mode)
1643{
1644 struct cfi_private *cfi = map->fldrv_priv;
1645 unsigned long timeo = jiffies + HZ;
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 unsigned long uWriteTimeout = (HZ / 1000) + 1;
1656 int ret = 0;
1657 map_word oldd;
1658 int retry_cnt = 0;
1659
1660 adr += chip->start;
1661
1662 mutex_lock(&chip->mutex);
1663 ret = get_chip(map, chip, adr, mode);
1664 if (ret) {
1665 mutex_unlock(&chip->mutex);
1666 return ret;
1667 }
1668
1669 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1670 __func__, adr, datum.x[0]);
1671
1672 if (mode == FL_OTP_WRITE)
1673 otp_enter(map, chip, adr, map_bankwidth(map));
1674
1675
1676
1677
1678
1679
1680
1681 oldd = map_read(map, adr);
1682 if (map_word_equal(map, oldd, datum)) {
1683 pr_debug("MTD %s(): NOP\n",
1684 __func__);
1685 goto op_done;
1686 }
1687
1688 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1689 ENABLE_VPP(map);
1690 xip_disable(map, chip, adr);
1691
1692 retry:
1693 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1694 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1695 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1696 map_write(map, datum, adr);
1697 chip->state = mode;
1698
1699 INVALIDATE_CACHE_UDELAY(map, chip,
1700 adr, map_bankwidth(map),
1701 chip->word_write_time);
1702
1703
1704 timeo = jiffies + uWriteTimeout;
1705 for (;;) {
1706 if (chip->state != mode) {
1707
1708 DECLARE_WAITQUEUE(wait, current);
1709
1710 set_current_state(TASK_UNINTERRUPTIBLE);
1711 add_wait_queue(&chip->wq, &wait);
1712 mutex_unlock(&chip->mutex);
1713 schedule();
1714 remove_wait_queue(&chip->wq, &wait);
1715 timeo = jiffies + (HZ / 2);
1716 mutex_lock(&chip->mutex);
1717 continue;
1718 }
1719
1720 if (time_after(jiffies, timeo) &&
1721 !chip_ready(map, chip, adr)) {
1722 xip_enable(map, chip, adr);
1723 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1724 xip_disable(map, chip, adr);
1725 break;
1726 }
1727
1728 if (chip_ready(map, chip, adr))
1729 break;
1730
1731
1732 UDELAY(map, chip, adr, 1);
1733 }
1734
1735 if (!chip_good(map, chip, adr, datum)) {
1736
1737 cfi_check_err_status(map, chip, adr);
1738 map_write(map, CMD(0xF0), chip->start);
1739
1740
1741 if (++retry_cnt <= MAX_RETRIES)
1742 goto retry;
1743
1744 ret = -EIO;
1745 }
1746 xip_enable(map, chip, adr);
1747 op_done:
1748 if (mode == FL_OTP_WRITE)
1749 otp_exit(map, chip, adr, map_bankwidth(map));
1750 chip->state = FL_READY;
1751 DISABLE_VPP(map);
1752 put_chip(map, chip, adr);
1753 mutex_unlock(&chip->mutex);
1754
1755 return ret;
1756}
1757
1758
1759static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1760 size_t *retlen, const u_char *buf)
1761{
1762 struct map_info *map = mtd->priv;
1763 struct cfi_private *cfi = map->fldrv_priv;
1764 int ret = 0;
1765 int chipnum;
1766 unsigned long ofs, chipstart;
1767 DECLARE_WAITQUEUE(wait, current);
1768
1769 chipnum = to >> cfi->chipshift;
1770 ofs = to - (chipnum << cfi->chipshift);
1771 chipstart = cfi->chips[chipnum].start;
1772
1773
1774 if (ofs & (map_bankwidth(map)-1)) {
1775 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1776 int i = ofs - bus_ofs;
1777 int n = 0;
1778 map_word tmp_buf;
1779
1780 retry:
1781 mutex_lock(&cfi->chips[chipnum].mutex);
1782
1783 if (cfi->chips[chipnum].state != FL_READY) {
1784 set_current_state(TASK_UNINTERRUPTIBLE);
1785 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1786
1787 mutex_unlock(&cfi->chips[chipnum].mutex);
1788
1789 schedule();
1790 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1791 goto retry;
1792 }
1793
1794
1795 tmp_buf = map_read(map, bus_ofs+chipstart);
1796
1797 mutex_unlock(&cfi->chips[chipnum].mutex);
1798
1799
1800 n = min_t(int, len, map_bankwidth(map)-i);
1801
1802 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1803
1804 ret = do_write_oneword(map, &cfi->chips[chipnum],
1805 bus_ofs, tmp_buf, FL_WRITING);
1806 if (ret)
1807 return ret;
1808
1809 ofs += n;
1810 buf += n;
1811 (*retlen) += n;
1812 len -= n;
1813
1814 if (ofs >> cfi->chipshift) {
1815 chipnum ++;
1816 ofs = 0;
1817 if (chipnum == cfi->numchips)
1818 return 0;
1819 }
1820 }
1821
1822
1823 while(len >= map_bankwidth(map)) {
1824 map_word datum;
1825
1826 datum = map_word_load(map, buf);
1827
1828 ret = do_write_oneword(map, &cfi->chips[chipnum],
1829 ofs, datum, FL_WRITING);
1830 if (ret)
1831 return ret;
1832
1833 ofs += map_bankwidth(map);
1834 buf += map_bankwidth(map);
1835 (*retlen) += map_bankwidth(map);
1836 len -= map_bankwidth(map);
1837
1838 if (ofs >> cfi->chipshift) {
1839 chipnum ++;
1840 ofs = 0;
1841 if (chipnum == cfi->numchips)
1842 return 0;
1843 chipstart = cfi->chips[chipnum].start;
1844 }
1845 }
1846
1847
1848 if (len & (map_bankwidth(map)-1)) {
1849 map_word tmp_buf;
1850
1851 retry1:
1852 mutex_lock(&cfi->chips[chipnum].mutex);
1853
1854 if (cfi->chips[chipnum].state != FL_READY) {
1855 set_current_state(TASK_UNINTERRUPTIBLE);
1856 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1857
1858 mutex_unlock(&cfi->chips[chipnum].mutex);
1859
1860 schedule();
1861 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1862 goto retry1;
1863 }
1864
1865 tmp_buf = map_read(map, ofs + chipstart);
1866
1867 mutex_unlock(&cfi->chips[chipnum].mutex);
1868
1869 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1870
1871 ret = do_write_oneword(map, &cfi->chips[chipnum],
1872 ofs, tmp_buf, FL_WRITING);
1873 if (ret)
1874 return ret;
1875
1876 (*retlen) += len;
1877 }
1878
1879 return 0;
1880}
1881
1882
1883
1884
1885
1886static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1887 unsigned long adr, const u_char *buf,
1888 int len)
1889{
1890 struct cfi_private *cfi = map->fldrv_priv;
1891 unsigned long timeo = jiffies + HZ;
1892
1893
1894
1895
1896 unsigned long uWriteTimeout =
1897 usecs_to_jiffies(chip->buffer_write_time_max);
1898 int ret = -EIO;
1899 unsigned long cmd_adr;
1900 int z, words;
1901 map_word datum;
1902
1903 adr += chip->start;
1904 cmd_adr = adr;
1905
1906 mutex_lock(&chip->mutex);
1907 ret = get_chip(map, chip, adr, FL_WRITING);
1908 if (ret) {
1909 mutex_unlock(&chip->mutex);
1910 return ret;
1911 }
1912
1913 datum = map_word_load(map, buf);
1914
1915 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1916 __func__, adr, datum.x[0]);
1917
1918 XIP_INVAL_CACHED_RANGE(map, adr, len);
1919 ENABLE_VPP(map);
1920 xip_disable(map, chip, cmd_adr);
1921
1922 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1924
1925
1926 map_write(map, CMD(0x25), cmd_adr);
1927
1928 chip->state = FL_WRITING_TO_BUFFER;
1929
1930
1931 words = len / map_bankwidth(map);
1932 map_write(map, CMD(words - 1), cmd_adr);
1933
1934 z = 0;
1935 while(z < words * map_bankwidth(map)) {
1936 datum = map_word_load(map, buf);
1937 map_write(map, datum, adr + z);
1938
1939 z += map_bankwidth(map);
1940 buf += map_bankwidth(map);
1941 }
1942 z -= map_bankwidth(map);
1943
1944 adr += z;
1945
1946
1947 map_write(map, CMD(0x29), cmd_adr);
1948 chip->state = FL_WRITING;
1949
1950 INVALIDATE_CACHE_UDELAY(map, chip,
1951 adr, map_bankwidth(map),
1952 chip->word_write_time);
1953
1954 timeo = jiffies + uWriteTimeout;
1955
1956 for (;;) {
1957 if (chip->state != FL_WRITING) {
1958
1959 DECLARE_WAITQUEUE(wait, current);
1960
1961 set_current_state(TASK_UNINTERRUPTIBLE);
1962 add_wait_queue(&chip->wq, &wait);
1963 mutex_unlock(&chip->mutex);
1964 schedule();
1965 remove_wait_queue(&chip->wq, &wait);
1966 timeo = jiffies + (HZ / 2);
1967 mutex_lock(&chip->mutex);
1968 continue;
1969 }
1970
1971
1972
1973
1974
1975 if (time_after(jiffies, timeo) &&
1976 !chip_good(map, chip, adr, datum))
1977 break;
1978
1979 if (chip_good(map, chip, adr, datum)) {
1980 xip_enable(map, chip, adr);
1981 goto op_done;
1982 }
1983
1984
1985 UDELAY(map, chip, adr, 1);
1986 }
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 cfi_check_err_status(map, chip, adr);
1997 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1998 cfi->device_type, NULL);
1999 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2000 cfi->device_type, NULL);
2001 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2002 cfi->device_type, NULL);
2003 xip_enable(map, chip, adr);
2004
2005
2006 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
2007 __func__, adr);
2008
2009 ret = -EIO;
2010 op_done:
2011 chip->state = FL_READY;
2012 DISABLE_VPP(map);
2013 put_chip(map, chip, adr);
2014 mutex_unlock(&chip->mutex);
2015
2016 return ret;
2017}
2018
2019
2020static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2021 size_t *retlen, const u_char *buf)
2022{
2023 struct map_info *map = mtd->priv;
2024 struct cfi_private *cfi = map->fldrv_priv;
2025 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2026 int ret = 0;
2027 int chipnum;
2028 unsigned long ofs;
2029
2030 chipnum = to >> cfi->chipshift;
2031 ofs = to - (chipnum << cfi->chipshift);
2032
2033
2034 if (ofs & (map_bankwidth(map)-1)) {
2035 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2036 if (local_len > len)
2037 local_len = len;
2038 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2039 local_len, retlen, buf);
2040 if (ret)
2041 return ret;
2042 ofs += local_len;
2043 buf += local_len;
2044 len -= local_len;
2045
2046 if (ofs >> cfi->chipshift) {
2047 chipnum ++;
2048 ofs = 0;
2049 if (chipnum == cfi->numchips)
2050 return 0;
2051 }
2052 }
2053
2054
2055 while (len >= map_bankwidth(map) * 2) {
2056
2057 int size = wbufsize - (ofs & (wbufsize-1));
2058
2059 if (size > len)
2060 size = len;
2061 if (size % map_bankwidth(map))
2062 size -= size % map_bankwidth(map);
2063
2064 ret = do_write_buffer(map, &cfi->chips[chipnum],
2065 ofs, buf, size);
2066 if (ret)
2067 return ret;
2068
2069 ofs += size;
2070 buf += size;
2071 (*retlen) += size;
2072 len -= size;
2073
2074 if (ofs >> cfi->chipshift) {
2075 chipnum ++;
2076 ofs = 0;
2077 if (chipnum == cfi->numchips)
2078 return 0;
2079 }
2080 }
2081
2082 if (len) {
2083 size_t retlen_dregs = 0;
2084
2085 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2086 len, &retlen_dregs, buf);
2087
2088 *retlen += retlen_dregs;
2089 return ret;
2090 }
2091
2092 return 0;
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2104 unsigned long adr)
2105{
2106 struct cfi_private *cfi = map->fldrv_priv;
2107 int retries = 10;
2108 int i;
2109
2110
2111
2112
2113
2114 if (chip->state == FL_READY && chip_ready(map, chip, adr))
2115 return 0;
2116
2117
2118
2119
2120
2121
2122
2123 while (retries > 0) {
2124 const unsigned long timeo = (HZ / 1000) + 1;
2125
2126
2127 map_write(map, CMD(0xF0), chip->start);
2128
2129
2130 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2131 if (chip_ready(map, chip, adr))
2132 return 0;
2133
2134 udelay(1);
2135 }
2136
2137 retries--;
2138 }
2139
2140
2141 return -EBUSY;
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2156 unsigned long adr, map_word datum)
2157{
2158 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2159 struct cfi_private *cfi = map->fldrv_priv;
2160 int retry_cnt = 0;
2161 map_word oldd;
2162 int ret = 0;
2163 int i;
2164
2165 adr += chip->start;
2166
2167 ret = cfi_amdstd_panic_wait(map, chip, adr);
2168 if (ret)
2169 return ret;
2170
2171 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2172 __func__, adr, datum.x[0]);
2173
2174
2175
2176
2177
2178
2179
2180 oldd = map_read(map, adr);
2181 if (map_word_equal(map, oldd, datum)) {
2182 pr_debug("MTD %s(): NOP\n", __func__);
2183 goto op_done;
2184 }
2185
2186 ENABLE_VPP(map);
2187
2188retry:
2189 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2190 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2191 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2192 map_write(map, datum, adr);
2193
2194 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2195 if (chip_ready(map, chip, adr))
2196 break;
2197
2198 udelay(1);
2199 }
2200
2201 if (!chip_good(map, chip, adr, datum)) {
2202
2203 cfi_check_err_status(map, chip, adr);
2204 map_write(map, CMD(0xF0), chip->start);
2205
2206
2207 if (++retry_cnt <= MAX_RETRIES)
2208 goto retry;
2209
2210 ret = -EIO;
2211 }
2212
2213op_done:
2214 DISABLE_VPP(map);
2215 return ret;
2216}
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2232 size_t *retlen, const u_char *buf)
2233{
2234 struct map_info *map = mtd->priv;
2235 struct cfi_private *cfi = map->fldrv_priv;
2236 unsigned long ofs, chipstart;
2237 int ret = 0;
2238 int chipnum;
2239
2240 chipnum = to >> cfi->chipshift;
2241 ofs = to - (chipnum << cfi->chipshift);
2242 chipstart = cfi->chips[chipnum].start;
2243
2244
2245 if (ofs & (map_bankwidth(map) - 1)) {
2246 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2247 int i = ofs - bus_ofs;
2248 int n = 0;
2249 map_word tmp_buf;
2250
2251 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2252 if (ret)
2253 return ret;
2254
2255
2256 tmp_buf = map_read(map, bus_ofs + chipstart);
2257
2258
2259 n = min_t(int, len, map_bankwidth(map) - i);
2260
2261 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2262
2263 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2264 bus_ofs, tmp_buf);
2265 if (ret)
2266 return ret;
2267
2268 ofs += n;
2269 buf += n;
2270 (*retlen) += n;
2271 len -= n;
2272
2273 if (ofs >> cfi->chipshift) {
2274 chipnum++;
2275 ofs = 0;
2276 if (chipnum == cfi->numchips)
2277 return 0;
2278 }
2279 }
2280
2281
2282 while (len >= map_bankwidth(map)) {
2283 map_word datum;
2284
2285 datum = map_word_load(map, buf);
2286
2287 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2288 ofs, datum);
2289 if (ret)
2290 return ret;
2291
2292 ofs += map_bankwidth(map);
2293 buf += map_bankwidth(map);
2294 (*retlen) += map_bankwidth(map);
2295 len -= map_bankwidth(map);
2296
2297 if (ofs >> cfi->chipshift) {
2298 chipnum++;
2299 ofs = 0;
2300 if (chipnum == cfi->numchips)
2301 return 0;
2302
2303 chipstart = cfi->chips[chipnum].start;
2304 }
2305 }
2306
2307
2308 if (len & (map_bankwidth(map) - 1)) {
2309 map_word tmp_buf;
2310
2311 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2312 if (ret)
2313 return ret;
2314
2315 tmp_buf = map_read(map, ofs + chipstart);
2316
2317 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2318
2319 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2320 ofs, tmp_buf);
2321 if (ret)
2322 return ret;
2323
2324 (*retlen) += len;
2325 }
2326
2327 return 0;
2328}
2329
2330
2331
2332
2333
2334
2335static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2336{
2337 struct cfi_private *cfi = map->fldrv_priv;
2338 unsigned long timeo = jiffies + HZ;
2339 unsigned long int adr;
2340 DECLARE_WAITQUEUE(wait, current);
2341 int ret = 0;
2342 int retry_cnt = 0;
2343
2344 adr = cfi->addr_unlock1;
2345
2346 mutex_lock(&chip->mutex);
2347 ret = get_chip(map, chip, adr, FL_WRITING);
2348 if (ret) {
2349 mutex_unlock(&chip->mutex);
2350 return ret;
2351 }
2352
2353 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2354 __func__, chip->start);
2355
2356 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2357 ENABLE_VPP(map);
2358 xip_disable(map, chip, adr);
2359
2360 retry:
2361 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2362 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2363 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2364 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2365 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2366 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2367
2368 chip->state = FL_ERASING;
2369 chip->erase_suspended = 0;
2370 chip->in_progress_block_addr = adr;
2371 chip->in_progress_block_mask = ~(map->size - 1);
2372
2373 INVALIDATE_CACHE_UDELAY(map, chip,
2374 adr, map->size,
2375 chip->erase_time*500);
2376
2377 timeo = jiffies + (HZ*20);
2378
2379 for (;;) {
2380 if (chip->state != FL_ERASING) {
2381
2382 set_current_state(TASK_UNINTERRUPTIBLE);
2383 add_wait_queue(&chip->wq, &wait);
2384 mutex_unlock(&chip->mutex);
2385 schedule();
2386 remove_wait_queue(&chip->wq, &wait);
2387 mutex_lock(&chip->mutex);
2388 continue;
2389 }
2390 if (chip->erase_suspended) {
2391
2392
2393 timeo = jiffies + (HZ*20);
2394 chip->erase_suspended = 0;
2395 }
2396
2397 if (chip_good(map, chip, adr, map_word_ff(map)))
2398 break;
2399
2400 if (time_after(jiffies, timeo)) {
2401 printk(KERN_WARNING "MTD %s(): software timeout\n",
2402 __func__);
2403 ret = -EIO;
2404 break;
2405 }
2406
2407
2408 UDELAY(map, chip, adr, 1000000/HZ);
2409 }
2410
2411 if (ret) {
2412
2413 cfi_check_err_status(map, chip, adr);
2414 map_write(map, CMD(0xF0), chip->start);
2415
2416
2417 if (++retry_cnt <= MAX_RETRIES) {
2418 ret = 0;
2419 goto retry;
2420 }
2421 }
2422
2423 chip->state = FL_READY;
2424 xip_enable(map, chip, adr);
2425 DISABLE_VPP(map);
2426 put_chip(map, chip, adr);
2427 mutex_unlock(&chip->mutex);
2428
2429 return ret;
2430}
2431
2432
2433static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2434{
2435 struct cfi_private *cfi = map->fldrv_priv;
2436 unsigned long timeo = jiffies + HZ;
2437 DECLARE_WAITQUEUE(wait, current);
2438 int ret = 0;
2439 int retry_cnt = 0;
2440
2441 adr += chip->start;
2442
2443 mutex_lock(&chip->mutex);
2444 ret = get_chip(map, chip, adr, FL_ERASING);
2445 if (ret) {
2446 mutex_unlock(&chip->mutex);
2447 return ret;
2448 }
2449
2450 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2451 __func__, adr);
2452
2453 XIP_INVAL_CACHED_RANGE(map, adr, len);
2454 ENABLE_VPP(map);
2455 xip_disable(map, chip, adr);
2456
2457 retry:
2458 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2459 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2460 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2461 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2462 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2463 map_write(map, cfi->sector_erase_cmd, adr);
2464
2465 chip->state = FL_ERASING;
2466 chip->erase_suspended = 0;
2467 chip->in_progress_block_addr = adr;
2468 chip->in_progress_block_mask = ~(len - 1);
2469
2470 INVALIDATE_CACHE_UDELAY(map, chip,
2471 adr, len,
2472 chip->erase_time*500);
2473
2474 timeo = jiffies + (HZ*20);
2475
2476 for (;;) {
2477 if (chip->state != FL_ERASING) {
2478
2479 set_current_state(TASK_UNINTERRUPTIBLE);
2480 add_wait_queue(&chip->wq, &wait);
2481 mutex_unlock(&chip->mutex);
2482 schedule();
2483 remove_wait_queue(&chip->wq, &wait);
2484 mutex_lock(&chip->mutex);
2485 continue;
2486 }
2487 if (chip->erase_suspended) {
2488
2489
2490 timeo = jiffies + (HZ*20);
2491 chip->erase_suspended = 0;
2492 }
2493
2494 if (chip_good(map, chip, adr, map_word_ff(map)))
2495 break;
2496
2497 if (time_after(jiffies, timeo)) {
2498 printk(KERN_WARNING "MTD %s(): software timeout\n",
2499 __func__);
2500 ret = -EIO;
2501 break;
2502 }
2503
2504
2505 UDELAY(map, chip, adr, 1000000/HZ);
2506 }
2507
2508 if (ret) {
2509
2510 cfi_check_err_status(map, chip, adr);
2511 map_write(map, CMD(0xF0), chip->start);
2512
2513
2514 if (++retry_cnt <= MAX_RETRIES) {
2515 ret = 0;
2516 goto retry;
2517 }
2518 }
2519
2520 chip->state = FL_READY;
2521 xip_enable(map, chip, adr);
2522 DISABLE_VPP(map);
2523 put_chip(map, chip, adr);
2524 mutex_unlock(&chip->mutex);
2525 return ret;
2526}
2527
2528
2529static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2530{
2531 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2532 instr->len, NULL);
2533}
2534
2535
2536static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2537{
2538 struct map_info *map = mtd->priv;
2539 struct cfi_private *cfi = map->fldrv_priv;
2540
2541 if (instr->addr != 0)
2542 return -EINVAL;
2543
2544 if (instr->len != mtd->size)
2545 return -EINVAL;
2546
2547 return do_erase_chip(map, &cfi->chips[0]);
2548}
2549
2550static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2551 unsigned long adr, int len, void *thunk)
2552{
2553 struct cfi_private *cfi = map->fldrv_priv;
2554 int ret;
2555
2556 mutex_lock(&chip->mutex);
2557 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2558 if (ret)
2559 goto out_unlock;
2560 chip->state = FL_LOCKING;
2561
2562 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2563
2564 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2565 cfi->device_type, NULL);
2566 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2567 cfi->device_type, NULL);
2568 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2569 cfi->device_type, NULL);
2570 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2571 cfi->device_type, NULL);
2572 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2573 cfi->device_type, NULL);
2574 map_write(map, CMD(0x40), chip->start + adr);
2575
2576 chip->state = FL_READY;
2577 put_chip(map, chip, adr + chip->start);
2578 ret = 0;
2579
2580out_unlock:
2581 mutex_unlock(&chip->mutex);
2582 return ret;
2583}
2584
2585static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2586 unsigned long adr, int len, void *thunk)
2587{
2588 struct cfi_private *cfi = map->fldrv_priv;
2589 int ret;
2590
2591 mutex_lock(&chip->mutex);
2592 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2593 if (ret)
2594 goto out_unlock;
2595 chip->state = FL_UNLOCKING;
2596
2597 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2598
2599 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2600 cfi->device_type, NULL);
2601 map_write(map, CMD(0x70), adr);
2602
2603 chip->state = FL_READY;
2604 put_chip(map, chip, adr + chip->start);
2605 ret = 0;
2606
2607out_unlock:
2608 mutex_unlock(&chip->mutex);
2609 return ret;
2610}
2611
2612static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2613{
2614 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2615}
2616
2617static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2618{
2619 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2620}
2621
2622
2623
2624
2625
2626struct ppb_lock {
2627 struct flchip *chip;
2628 unsigned long adr;
2629 int locked;
2630};
2631
2632#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2633#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2634#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2635
2636static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2637 struct flchip *chip,
2638 unsigned long adr, int len, void *thunk)
2639{
2640 struct cfi_private *cfi = map->fldrv_priv;
2641 unsigned long timeo;
2642 int ret;
2643
2644 adr += chip->start;
2645 mutex_lock(&chip->mutex);
2646 ret = get_chip(map, chip, adr, FL_LOCKING);
2647 if (ret) {
2648 mutex_unlock(&chip->mutex);
2649 return ret;
2650 }
2651
2652 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2653
2654 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2655 cfi->device_type, NULL);
2656 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2657 cfi->device_type, NULL);
2658
2659 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2660 cfi->device_type, NULL);
2661
2662 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2663 chip->state = FL_LOCKING;
2664 map_write(map, CMD(0xA0), adr);
2665 map_write(map, CMD(0x00), adr);
2666 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2667
2668
2669
2670
2671 chip->state = FL_UNLOCKING;
2672 map_write(map, CMD(0x80), chip->start);
2673 map_write(map, CMD(0x30), chip->start);
2674 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2675 chip->state = FL_JEDEC_QUERY;
2676
2677 ret = !cfi_read_query(map, adr);
2678 } else
2679 BUG();
2680
2681
2682
2683
2684 timeo = jiffies + msecs_to_jiffies(2000);
2685 for (;;) {
2686 if (chip_ready(map, chip, adr))
2687 break;
2688
2689 if (time_after(jiffies, timeo)) {
2690 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2691 ret = -EIO;
2692 break;
2693 }
2694
2695 UDELAY(map, chip, adr, 1);
2696 }
2697
2698
2699 map_write(map, CMD(0x90), chip->start);
2700 map_write(map, CMD(0x00), chip->start);
2701
2702 chip->state = FL_READY;
2703 put_chip(map, chip, adr);
2704 mutex_unlock(&chip->mutex);
2705
2706 return ret;
2707}
2708
2709static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2710 uint64_t len)
2711{
2712 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2713 DO_XXLOCK_ONEBLOCK_LOCK);
2714}
2715
2716static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2717 uint64_t len)
2718{
2719 struct mtd_erase_region_info *regions = mtd->eraseregions;
2720 struct map_info *map = mtd->priv;
2721 struct cfi_private *cfi = map->fldrv_priv;
2722 struct ppb_lock *sect;
2723 unsigned long adr;
2724 loff_t offset;
2725 uint64_t length;
2726 int chipnum;
2727 int i;
2728 int sectors;
2729 int ret;
2730 int max_sectors;
2731
2732
2733
2734
2735
2736
2737
2738 max_sectors = 0;
2739 for (i = 0; i < mtd->numeraseregions; i++)
2740 max_sectors += regions[i].numblocks;
2741
2742 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2743 if (!sect)
2744 return -ENOMEM;
2745
2746
2747
2748
2749
2750 i = 0;
2751 chipnum = 0;
2752 adr = 0;
2753 sectors = 0;
2754 offset = 0;
2755 length = mtd->size;
2756
2757 while (length) {
2758 int size = regions[i].erasesize;
2759
2760
2761
2762
2763
2764
2765 if ((offset < ofs) || (offset >= (ofs + len))) {
2766 sect[sectors].chip = &cfi->chips[chipnum];
2767 sect[sectors].adr = adr;
2768 sect[sectors].locked = do_ppb_xxlock(
2769 map, &cfi->chips[chipnum], adr, 0,
2770 DO_XXLOCK_ONEBLOCK_GETLOCK);
2771 }
2772
2773 adr += size;
2774 offset += size;
2775 length -= size;
2776
2777 if (offset == regions[i].offset + size * regions[i].numblocks)
2778 i++;
2779
2780 if (adr >> cfi->chipshift) {
2781 if (offset >= (ofs + len))
2782 break;
2783 adr = 0;
2784 chipnum++;
2785
2786 if (chipnum >= cfi->numchips)
2787 break;
2788 }
2789
2790 sectors++;
2791 if (sectors >= max_sectors) {
2792 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2793 max_sectors);
2794 kfree(sect);
2795 return -EINVAL;
2796 }
2797 }
2798
2799
2800 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2801 DO_XXLOCK_ONEBLOCK_UNLOCK);
2802 if (ret) {
2803 kfree(sect);
2804 return ret;
2805 }
2806
2807
2808
2809
2810
2811 for (i = 0; i < sectors; i++) {
2812 if (sect[i].locked)
2813 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2814 DO_XXLOCK_ONEBLOCK_LOCK);
2815 }
2816
2817 kfree(sect);
2818 return ret;
2819}
2820
2821static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2822 uint64_t len)
2823{
2824 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2825 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2826}
2827
2828static void cfi_amdstd_sync (struct mtd_info *mtd)
2829{
2830 struct map_info *map = mtd->priv;
2831 struct cfi_private *cfi = map->fldrv_priv;
2832 int i;
2833 struct flchip *chip;
2834 int ret = 0;
2835 DECLARE_WAITQUEUE(wait, current);
2836
2837 for (i=0; !ret && i<cfi->numchips; i++) {
2838 chip = &cfi->chips[i];
2839
2840 retry:
2841 mutex_lock(&chip->mutex);
2842
2843 switch(chip->state) {
2844 case FL_READY:
2845 case FL_STATUS:
2846 case FL_CFI_QUERY:
2847 case FL_JEDEC_QUERY:
2848 chip->oldstate = chip->state;
2849 chip->state = FL_SYNCING;
2850
2851
2852
2853
2854
2855 case FL_SYNCING:
2856 mutex_unlock(&chip->mutex);
2857 break;
2858
2859 default:
2860
2861 set_current_state(TASK_UNINTERRUPTIBLE);
2862 add_wait_queue(&chip->wq, &wait);
2863
2864 mutex_unlock(&chip->mutex);
2865
2866 schedule();
2867
2868 remove_wait_queue(&chip->wq, &wait);
2869
2870 goto retry;
2871 }
2872 }
2873
2874
2875
2876 for (i--; i >=0; i--) {
2877 chip = &cfi->chips[i];
2878
2879 mutex_lock(&chip->mutex);
2880
2881 if (chip->state == FL_SYNCING) {
2882 chip->state = chip->oldstate;
2883 wake_up(&chip->wq);
2884 }
2885 mutex_unlock(&chip->mutex);
2886 }
2887}
2888
2889
2890static int cfi_amdstd_suspend(struct mtd_info *mtd)
2891{
2892 struct map_info *map = mtd->priv;
2893 struct cfi_private *cfi = map->fldrv_priv;
2894 int i;
2895 struct flchip *chip;
2896 int ret = 0;
2897
2898 for (i=0; !ret && i<cfi->numchips; i++) {
2899 chip = &cfi->chips[i];
2900
2901 mutex_lock(&chip->mutex);
2902
2903 switch(chip->state) {
2904 case FL_READY:
2905 case FL_STATUS:
2906 case FL_CFI_QUERY:
2907 case FL_JEDEC_QUERY:
2908 chip->oldstate = chip->state;
2909 chip->state = FL_PM_SUSPENDED;
2910
2911
2912
2913
2914 case FL_PM_SUSPENDED:
2915 break;
2916
2917 default:
2918 ret = -EAGAIN;
2919 break;
2920 }
2921 mutex_unlock(&chip->mutex);
2922 }
2923
2924
2925
2926 if (ret) {
2927 for (i--; i >=0; i--) {
2928 chip = &cfi->chips[i];
2929
2930 mutex_lock(&chip->mutex);
2931
2932 if (chip->state == FL_PM_SUSPENDED) {
2933 chip->state = chip->oldstate;
2934 wake_up(&chip->wq);
2935 }
2936 mutex_unlock(&chip->mutex);
2937 }
2938 }
2939
2940 return ret;
2941}
2942
2943
2944static void cfi_amdstd_resume(struct mtd_info *mtd)
2945{
2946 struct map_info *map = mtd->priv;
2947 struct cfi_private *cfi = map->fldrv_priv;
2948 int i;
2949 struct flchip *chip;
2950
2951 for (i=0; i<cfi->numchips; i++) {
2952
2953 chip = &cfi->chips[i];
2954
2955 mutex_lock(&chip->mutex);
2956
2957 if (chip->state == FL_PM_SUSPENDED) {
2958 chip->state = FL_READY;
2959 map_write(map, CMD(0xF0), chip->start);
2960 wake_up(&chip->wq);
2961 }
2962 else
2963 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2964
2965 mutex_unlock(&chip->mutex);
2966 }
2967}
2968
2969
2970
2971
2972
2973
2974
2975
2976static int cfi_amdstd_reset(struct mtd_info *mtd)
2977{
2978 struct map_info *map = mtd->priv;
2979 struct cfi_private *cfi = map->fldrv_priv;
2980 int i, ret;
2981 struct flchip *chip;
2982
2983 for (i = 0; i < cfi->numchips; i++) {
2984
2985 chip = &cfi->chips[i];
2986
2987 mutex_lock(&chip->mutex);
2988
2989 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2990 if (!ret) {
2991 map_write(map, CMD(0xF0), chip->start);
2992 chip->state = FL_SHUTDOWN;
2993 put_chip(map, chip, chip->start);
2994 }
2995
2996 mutex_unlock(&chip->mutex);
2997 }
2998
2999 return 0;
3000}
3001
3002
3003static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3004 void *v)
3005{
3006 struct mtd_info *mtd;
3007
3008 mtd = container_of(nb, struct mtd_info, reboot_notifier);
3009 cfi_amdstd_reset(mtd);
3010 return NOTIFY_DONE;
3011}
3012
3013
3014static void cfi_amdstd_destroy(struct mtd_info *mtd)
3015{
3016 struct map_info *map = mtd->priv;
3017 struct cfi_private *cfi = map->fldrv_priv;
3018
3019 cfi_amdstd_reset(mtd);
3020 unregister_reboot_notifier(&mtd->reboot_notifier);
3021 kfree(cfi->cmdset_priv);
3022 kfree(cfi->cfiq);
3023 kfree(cfi);
3024 kfree(mtd->eraseregions);
3025}
3026
3027MODULE_LICENSE("GPL");
3028MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3029MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3030MODULE_ALIAS("cfi_cmdset_0006");
3031MODULE_ALIAS("cfi_cmdset_0701");
3032