1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/err.h>
30#include <linux/clk.h>
31#include <linux/io.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/partitions.h>
34#include <linux/slab.h>
35#include <linux/of_device.h>
36#include <linux/of.h>
37#include <linux/of_mtd.h>
38
39#include <linux/platform_data/mtd-davinci.h>
40#include <linux/platform_data/mtd-davinci-aemif.h>
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55struct davinci_nand_info {
56 struct mtd_info mtd;
57 struct nand_chip chip;
58 struct nand_ecclayout ecclayout;
59
60 struct device *dev;
61 struct clk *clk;
62
63 bool is_readmode;
64
65 void __iomem *base;
66 void __iomem *vaddr;
67
68 uint32_t ioaddr;
69 uint32_t current_cs;
70
71 uint32_t mask_chipsel;
72 uint32_t mask_ale;
73 uint32_t mask_cle;
74
75 uint32_t core_chipsel;
76
77 struct davinci_aemif_timing *timing;
78};
79
80static DEFINE_SPINLOCK(davinci_nand_lock);
81static bool ecc4_busy;
82
83#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
84
85
86static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
87 int offset)
88{
89 return __raw_readl(info->base + offset);
90}
91
92static inline void davinci_nand_writel(struct davinci_nand_info *info,
93 int offset, unsigned long value)
94{
95 __raw_writel(value, info->base + offset);
96}
97
98
99
100
101
102
103
104static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
105 unsigned int ctrl)
106{
107 struct davinci_nand_info *info = to_davinci_nand(mtd);
108 uint32_t addr = info->current_cs;
109 struct nand_chip *nand = mtd->priv;
110
111
112 if (ctrl & NAND_CTRL_CHANGE) {
113 if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
114 addr |= info->mask_cle;
115 else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
116 addr |= info->mask_ale;
117
118 nand->IO_ADDR_W = (void __iomem __force *)addr;
119 }
120
121 if (cmd != NAND_CMD_NONE)
122 iowrite8(cmd, nand->IO_ADDR_W);
123}
124
125static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
126{
127 struct davinci_nand_info *info = to_davinci_nand(mtd);
128 uint32_t addr = info->ioaddr;
129
130
131 if (chip > 0)
132 addr |= info->mask_chipsel;
133 info->current_cs = addr;
134
135 info->chip.IO_ADDR_W = (void __iomem __force *)addr;
136 info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
137}
138
139
140
141
142
143
144
145static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
146{
147 struct davinci_nand_info *info = to_davinci_nand(mtd);
148
149 return davinci_nand_readl(info, NANDF1ECC_OFFSET
150 + 4 * info->core_chipsel);
151}
152
153static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
154{
155 struct davinci_nand_info *info;
156 uint32_t nandcfr;
157 unsigned long flags;
158
159 info = to_davinci_nand(mtd);
160
161
162 nand_davinci_readecc_1bit(mtd);
163
164 spin_lock_irqsave(&davinci_nand_lock, flags);
165
166
167 nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
168 nandcfr |= BIT(8 + info->core_chipsel);
169 davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
170
171 spin_unlock_irqrestore(&davinci_nand_lock, flags);
172}
173
174
175
176
177static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
178 const u_char *dat, u_char *ecc_code)
179{
180 unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
181 unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
182
183
184 ecc24 = ~ecc24;
185 ecc_code[0] = (u_char)(ecc24);
186 ecc_code[1] = (u_char)(ecc24 >> 8);
187 ecc_code[2] = (u_char)(ecc24 >> 16);
188
189 return 0;
190}
191
192static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
193 u_char *read_ecc, u_char *calc_ecc)
194{
195 struct nand_chip *chip = mtd->priv;
196 uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
197 (read_ecc[2] << 16);
198 uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
199 (calc_ecc[2] << 16);
200 uint32_t diff = eccCalc ^ eccNand;
201
202 if (diff) {
203 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
204
205 if ((diff >> (12 + 3)) < chip->ecc.size) {
206 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
207 return 1;
208 } else {
209 return -1;
210 }
211 } else if (!(diff & (diff - 1))) {
212
213
214 return 1;
215 } else {
216
217 return -1;
218 }
219
220 }
221 return 0;
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
239{
240 struct davinci_nand_info *info = to_davinci_nand(mtd);
241 unsigned long flags;
242 u32 val;
243
244 spin_lock_irqsave(&davinci_nand_lock, flags);
245
246
247 val = davinci_nand_readl(info, NANDFCR_OFFSET);
248 val &= ~(0x03 << 4);
249 val |= (info->core_chipsel << 4) | BIT(12);
250 davinci_nand_writel(info, NANDFCR_OFFSET, val);
251
252 info->is_readmode = (mode == NAND_ECC_READ);
253
254 spin_unlock_irqrestore(&davinci_nand_lock, flags);
255}
256
257
258static void
259nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
260{
261 const u32 mask = 0x03ff03ff;
262
263 code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
264 code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
265 code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
266 code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
267}
268
269
270static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
271 const u_char *dat, u_char *ecc_code)
272{
273 struct davinci_nand_info *info = to_davinci_nand(mtd);
274 u32 raw_ecc[4], *p;
275 unsigned i;
276
277
278
279
280
281
282 if (info->is_readmode) {
283 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
284 return 0;
285 }
286
287
288
289
290
291
292 nand_davinci_readecc_4bit(info, raw_ecc);
293 for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
294 *ecc_code++ = p[0] & 0xff;
295 *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
296 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
297 *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
298 *ecc_code++ = (p[1] >> 18) & 0xff;
299 }
300
301 return 0;
302}
303
304
305
306
307static int nand_davinci_correct_4bit(struct mtd_info *mtd,
308 u_char *data, u_char *ecc_code, u_char *null)
309{
310 int i;
311 struct davinci_nand_info *info = to_davinci_nand(mtd);
312 unsigned short ecc10[8];
313 unsigned short *ecc16;
314 u32 syndrome[4];
315 u32 ecc_state;
316 unsigned num_errors, corrected;
317 unsigned long timeo;
318
319
320 for (i = 0; i < 10; i++) {
321 if (ecc_code[i] != 0xff)
322 goto compare;
323 }
324 return 0;
325
326compare:
327
328
329
330 if (WARN_ON(0x01 & (unsigned) ecc_code))
331 return -EINVAL;
332 ecc16 = (unsigned short *)ecc_code;
333
334 ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
335 ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
336 ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
337 ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
338 ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
339 ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
340 ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
341 ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
342
343
344 for (i = 7; i >= 0; i--)
345 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
346
347
348
349
350 davinci_nand_readl(info, NANDFSR_OFFSET);
351 nand_davinci_readecc_4bit(info, syndrome);
352 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
353 return 0;
354
355
356
357
358
359 davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
360
361
362
363
364
365 davinci_nand_writel(info, NANDFCR_OFFSET,
366 davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
367
368
369
370
371
372
373
374
375
376
377 timeo = jiffies + usecs_to_jiffies(100);
378 do {
379 ecc_state = (davinci_nand_readl(info,
380 NANDFSR_OFFSET) >> 8) & 0x0f;
381 cpu_relax();
382 } while ((ecc_state < 4) && time_before(jiffies, timeo));
383
384 for (;;) {
385 u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
386
387 switch ((fsr >> 8) & 0x0f) {
388 case 0:
389 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
390 return 0;
391 case 1:
392 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
393 return -EIO;
394 case 2:
395 case 3:
396 num_errors = 1 + ((fsr >> 16) & 0x03);
397 goto correct;
398 default:
399 cpu_relax();
400 continue;
401 }
402 }
403
404correct:
405
406 for (i = 0, corrected = 0; i < num_errors; i++) {
407 int error_address, error_value;
408
409 if (i > 1) {
410 error_address = davinci_nand_readl(info,
411 NAND_ERR_ADD2_OFFSET);
412 error_value = davinci_nand_readl(info,
413 NAND_ERR_ERRVAL2_OFFSET);
414 } else {
415 error_address = davinci_nand_readl(info,
416 NAND_ERR_ADD1_OFFSET);
417 error_value = davinci_nand_readl(info,
418 NAND_ERR_ERRVAL1_OFFSET);
419 }
420
421 if (i & 1) {
422 error_address >>= 16;
423 error_value >>= 16;
424 }
425 error_address &= 0x3ff;
426 error_address = (512 + 7) - error_address;
427
428 if (error_address < 512) {
429 data[error_address] ^= error_value;
430 corrected++;
431 }
432 }
433
434 return corrected;
435}
436
437
438
439
440
441
442
443
444
445
446
447
448static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
449{
450 struct nand_chip *chip = mtd->priv;
451
452 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
453 ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
454 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
455 ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
456 else
457 ioread8_rep(chip->IO_ADDR_R, buf, len);
458}
459
460static void nand_davinci_write_buf(struct mtd_info *mtd,
461 const uint8_t *buf, int len)
462{
463 struct nand_chip *chip = mtd->priv;
464
465 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
466 iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
467 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
468 iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
469 else
470 iowrite8_rep(chip->IO_ADDR_R, buf, len);
471}
472
473
474
475
476
477static int nand_davinci_dev_ready(struct mtd_info *mtd)
478{
479 struct davinci_nand_info *info = to_davinci_nand(mtd);
480
481 return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
482}
483
484
485
486
487
488
489
490static struct nand_ecclayout hwecc4_small = {
491 .eccbytes = 10,
492 .eccpos = { 0, 1, 2, 3, 4,
493
494 6, 7,
495 13, 14, 15, },
496 .oobfree = {
497 {.offset = 8, .length = 5, },
498 {.offset = 16, },
499 },
500};
501
502
503
504
505
506static struct nand_ecclayout hwecc4_2048 = {
507 .eccbytes = 40,
508 .eccpos = {
509
510 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
511 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
512 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
513 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
514 },
515 .oobfree = {
516
517 {.offset = 2, .length = 22, },
518
519
520 },
521};
522
523#if defined(CONFIG_OF)
524static const struct of_device_id davinci_nand_of_match[] = {
525 {.compatible = "ti,davinci-nand", },
526 {.compatible = "ti,keystone-nand", },
527 {},
528};
529MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
530
531static struct davinci_nand_pdata
532 *nand_davinci_get_pdata(struct platform_device *pdev)
533{
534 if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
535 struct davinci_nand_pdata *pdata;
536 const char *mode;
537 u32 prop;
538
539 pdata = devm_kzalloc(&pdev->dev,
540 sizeof(struct davinci_nand_pdata),
541 GFP_KERNEL);
542 pdev->dev.platform_data = pdata;
543 if (!pdata)
544 return ERR_PTR(-ENOMEM);
545 if (!of_property_read_u32(pdev->dev.of_node,
546 "ti,davinci-chipselect", &prop))
547 pdev->id = prop;
548 else
549 return ERR_PTR(-EINVAL);
550
551 if (!of_property_read_u32(pdev->dev.of_node,
552 "ti,davinci-mask-ale", &prop))
553 pdata->mask_ale = prop;
554 if (!of_property_read_u32(pdev->dev.of_node,
555 "ti,davinci-mask-cle", &prop))
556 pdata->mask_cle = prop;
557 if (!of_property_read_u32(pdev->dev.of_node,
558 "ti,davinci-mask-chipsel", &prop))
559 pdata->mask_chipsel = prop;
560 if (!of_property_read_string(pdev->dev.of_node,
561 "nand-ecc-mode", &mode) ||
562 !of_property_read_string(pdev->dev.of_node,
563 "ti,davinci-ecc-mode", &mode)) {
564 if (!strncmp("none", mode, 4))
565 pdata->ecc_mode = NAND_ECC_NONE;
566 if (!strncmp("soft", mode, 4))
567 pdata->ecc_mode = NAND_ECC_SOFT;
568 if (!strncmp("hw", mode, 2))
569 pdata->ecc_mode = NAND_ECC_HW;
570 }
571 if (!of_property_read_u32(pdev->dev.of_node,
572 "ti,davinci-ecc-bits", &prop))
573 pdata->ecc_bits = prop;
574
575 prop = of_get_nand_bus_width(pdev->dev.of_node);
576 if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
577 "ti,davinci-nand-buswidth", &prop))
578 if (prop == 16)
579 pdata->options |= NAND_BUSWIDTH_16;
580 if (of_property_read_bool(pdev->dev.of_node,
581 "nand-on-flash-bbt") ||
582 of_property_read_bool(pdev->dev.of_node,
583 "ti,davinci-nand-use-bbt"))
584 pdata->bbt_options = NAND_BBT_USE_FLASH;
585
586 if (of_device_is_compatible(pdev->dev.of_node,
587 "ti,keystone-nand")) {
588 pdata->options |= NAND_NO_SUBPAGE_WRITE;
589 }
590 }
591
592 return dev_get_platdata(&pdev->dev);
593}
594#else
595static struct davinci_nand_pdata
596 *nand_davinci_get_pdata(struct platform_device *pdev)
597{
598 return dev_get_platdata(&pdev->dev);
599}
600#endif
601
602static int nand_davinci_probe(struct platform_device *pdev)
603{
604 struct davinci_nand_pdata *pdata;
605 struct davinci_nand_info *info;
606 struct resource *res1;
607 struct resource *res2;
608 void __iomem *vaddr;
609 void __iomem *base;
610 int ret;
611 uint32_t val;
612 nand_ecc_modes_t ecc_mode;
613
614 pdata = nand_davinci_get_pdata(pdev);
615 if (IS_ERR(pdata))
616 return PTR_ERR(pdata);
617
618
619 if (!pdata)
620 return -ENODEV;
621
622
623 if (pdev->id < 0 || pdev->id > 3)
624 return -ENODEV;
625
626 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
627 if (!info)
628 return -ENOMEM;
629
630 platform_set_drvdata(pdev, info);
631
632 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
633 res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
634 if (!res1 || !res2) {
635 dev_err(&pdev->dev, "resource missing\n");
636 return -EINVAL;
637 }
638
639 vaddr = devm_ioremap_resource(&pdev->dev, res1);
640 if (IS_ERR(vaddr))
641 return PTR_ERR(vaddr);
642
643
644
645
646
647
648
649 base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
650 if (!base) {
651 dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
652 return -EADDRNOTAVAIL;
653 }
654
655 info->dev = &pdev->dev;
656 info->base = base;
657 info->vaddr = vaddr;
658
659 info->mtd.priv = &info->chip;
660 info->mtd.name = dev_name(&pdev->dev);
661 info->mtd.owner = THIS_MODULE;
662
663 info->mtd.dev.parent = &pdev->dev;
664
665 info->chip.IO_ADDR_R = vaddr;
666 info->chip.IO_ADDR_W = vaddr;
667 info->chip.chip_delay = 0;
668 info->chip.select_chip = nand_davinci_select_chip;
669
670
671 info->chip.bbt_options = pdata->bbt_options;
672
673 info->chip.options = pdata->options;
674 info->chip.bbt_td = pdata->bbt_td;
675 info->chip.bbt_md = pdata->bbt_md;
676 info->timing = pdata->timing;
677
678 info->ioaddr = (uint32_t __force) vaddr;
679
680 info->current_cs = info->ioaddr;
681 info->core_chipsel = pdev->id;
682 info->mask_chipsel = pdata->mask_chipsel;
683
684
685 info->mask_ale = pdata->mask_ale ? : MASK_ALE;
686 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
687
688
689 info->chip.cmd_ctrl = nand_davinci_hwcontrol;
690 info->chip.dev_ready = nand_davinci_dev_ready;
691
692
693 info->chip.read_buf = nand_davinci_read_buf;
694 info->chip.write_buf = nand_davinci_write_buf;
695
696
697 ecc_mode = pdata->ecc_mode;
698
699 ret = -EINVAL;
700 switch (ecc_mode) {
701 case NAND_ECC_NONE:
702 case NAND_ECC_SOFT:
703 pdata->ecc_bits = 0;
704 break;
705 case NAND_ECC_HW:
706 if (pdata->ecc_bits == 4) {
707
708
709
710
711
712 spin_lock_irq(&davinci_nand_lock);
713 if (ecc4_busy)
714 ret = -EBUSY;
715 else
716 ecc4_busy = true;
717 spin_unlock_irq(&davinci_nand_lock);
718
719 if (ret == -EBUSY)
720 return ret;
721
722 info->chip.ecc.calculate = nand_davinci_calculate_4bit;
723 info->chip.ecc.correct = nand_davinci_correct_4bit;
724 info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
725 info->chip.ecc.bytes = 10;
726 } else {
727 info->chip.ecc.calculate = nand_davinci_calculate_1bit;
728 info->chip.ecc.correct = nand_davinci_correct_1bit;
729 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
730 info->chip.ecc.bytes = 3;
731 }
732 info->chip.ecc.size = 512;
733 info->chip.ecc.strength = pdata->ecc_bits;
734 break;
735 default:
736 return -EINVAL;
737 }
738 info->chip.ecc.mode = ecc_mode;
739
740 info->clk = devm_clk_get(&pdev->dev, "aemif");
741 if (IS_ERR(info->clk)) {
742 ret = PTR_ERR(info->clk);
743 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
744 return ret;
745 }
746
747 ret = clk_prepare_enable(info->clk);
748 if (ret < 0) {
749 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
750 ret);
751 goto err_clk_enable;
752 }
753
754 spin_lock_irq(&davinci_nand_lock);
755
756
757 val = davinci_nand_readl(info, NANDFCR_OFFSET);
758 val |= BIT(info->core_chipsel);
759 davinci_nand_writel(info, NANDFCR_OFFSET, val);
760
761 spin_unlock_irq(&davinci_nand_lock);
762
763
764 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
765 if (ret < 0) {
766 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
767 goto err;
768 }
769
770
771
772
773
774
775 if (pdata->ecc_bits == 4) {
776 int chunks = info->mtd.writesize / 512;
777
778 if (!chunks || info->mtd.oobsize < 16) {
779 dev_dbg(&pdev->dev, "too small\n");
780 ret = -EINVAL;
781 goto err;
782 }
783
784
785
786
787
788 if (chunks == 1) {
789 info->ecclayout = hwecc4_small;
790 info->ecclayout.oobfree[1].length =
791 info->mtd.oobsize - 16;
792 goto syndrome_done;
793 }
794 if (chunks == 4) {
795 info->ecclayout = hwecc4_2048;
796 info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
797 goto syndrome_done;
798 }
799
800
801
802
803
804
805
806
807
808
809 dev_warn(&pdev->dev, "no 4-bit ECC support yet "
810 "for 4KiB-page NAND\n");
811 ret = -EIO;
812 goto err;
813
814syndrome_done:
815 info->chip.ecc.layout = &info->ecclayout;
816 }
817
818 ret = nand_scan_tail(&info->mtd);
819 if (ret < 0)
820 goto err;
821
822 if (pdata->parts)
823 ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
824 pdata->parts, pdata->nr_parts);
825 else {
826 struct mtd_part_parser_data ppdata;
827
828 ppdata.of_node = pdev->dev.of_node;
829 ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
830 NULL, 0);
831 }
832 if (ret < 0)
833 goto err;
834
835 val = davinci_nand_readl(info, NRCSR_OFFSET);
836 dev_info(&pdev->dev, "controller rev. %d.%d\n",
837 (val >> 8) & 0xff, val & 0xff);
838
839 return 0;
840
841err:
842 clk_disable_unprepare(info->clk);
843
844err_clk_enable:
845 spin_lock_irq(&davinci_nand_lock);
846 if (ecc_mode == NAND_ECC_HW_SYNDROME)
847 ecc4_busy = false;
848 spin_unlock_irq(&davinci_nand_lock);
849 return ret;
850}
851
852static int nand_davinci_remove(struct platform_device *pdev)
853{
854 struct davinci_nand_info *info = platform_get_drvdata(pdev);
855
856 spin_lock_irq(&davinci_nand_lock);
857 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
858 ecc4_busy = false;
859 spin_unlock_irq(&davinci_nand_lock);
860
861 nand_release(&info->mtd);
862
863 clk_disable_unprepare(info->clk);
864
865 return 0;
866}
867
868static struct platform_driver nand_davinci_driver = {
869 .probe = nand_davinci_probe,
870 .remove = nand_davinci_remove,
871 .driver = {
872 .name = "davinci_nand",
873 .of_match_table = of_match_ptr(davinci_nand_of_match),
874 },
875};
876MODULE_ALIAS("platform:davinci_nand");
877
878module_platform_driver(nand_davinci_driver);
879
880MODULE_LICENSE("GPL");
881MODULE_AUTHOR("Texas Instruments");
882MODULE_DESCRIPTION("Davinci NAND flash driver");
883
884