1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/platform_device.h>
30#include <linux/err.h>
31#include <linux/clk.h>
32#include <linux/io.h>
33#include <linux/mtd/nand.h>
34#include <linux/mtd/partitions.h>
35#include <linux/slab.h>
36#include <linux/of_device.h>
37#include <linux/of.h>
38
39#include <linux/platform_data/mtd-davinci.h>
40#include <linux/platform_data/mtd-davinci-aemif.h>
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55struct davinci_nand_info {
56 struct mtd_info mtd;
57 struct nand_chip chip;
58 struct nand_ecclayout ecclayout;
59
60 struct device *dev;
61 struct clk *clk;
62
63 bool is_readmode;
64
65 void __iomem *base;
66 void __iomem *vaddr;
67
68 uint32_t ioaddr;
69 uint32_t current_cs;
70
71 uint32_t mask_chipsel;
72 uint32_t mask_ale;
73 uint32_t mask_cle;
74
75 uint32_t core_chipsel;
76
77 struct davinci_aemif_timing *timing;
78};
79
80static DEFINE_SPINLOCK(davinci_nand_lock);
81static bool ecc4_busy;
82
83#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
84
85
86static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
87 int offset)
88{
89 return __raw_readl(info->base + offset);
90}
91
92static inline void davinci_nand_writel(struct davinci_nand_info *info,
93 int offset, unsigned long value)
94{
95 __raw_writel(value, info->base + offset);
96}
97
98
99
100
101
102
103
104static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
105 unsigned int ctrl)
106{
107 struct davinci_nand_info *info = to_davinci_nand(mtd);
108 uint32_t addr = info->current_cs;
109 struct nand_chip *nand = mtd->priv;
110
111
112 if (ctrl & NAND_CTRL_CHANGE) {
113 if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
114 addr |= info->mask_cle;
115 else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
116 addr |= info->mask_ale;
117
118 nand->IO_ADDR_W = (void __iomem __force *)addr;
119 }
120
121 if (cmd != NAND_CMD_NONE)
122 iowrite8(cmd, nand->IO_ADDR_W);
123}
124
125static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
126{
127 struct davinci_nand_info *info = to_davinci_nand(mtd);
128 uint32_t addr = info->ioaddr;
129
130
131 if (chip > 0)
132 addr |= info->mask_chipsel;
133 info->current_cs = addr;
134
135 info->chip.IO_ADDR_W = (void __iomem __force *)addr;
136 info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
137}
138
139
140
141
142
143
144
145static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
146{
147 struct davinci_nand_info *info = to_davinci_nand(mtd);
148
149 return davinci_nand_readl(info, NANDF1ECC_OFFSET
150 + 4 * info->core_chipsel);
151}
152
153static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
154{
155 struct davinci_nand_info *info;
156 uint32_t nandcfr;
157 unsigned long flags;
158
159 info = to_davinci_nand(mtd);
160
161
162 nand_davinci_readecc_1bit(mtd);
163
164 spin_lock_irqsave(&davinci_nand_lock, flags);
165
166
167 nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
168 nandcfr |= BIT(8 + info->core_chipsel);
169 davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
170
171 spin_unlock_irqrestore(&davinci_nand_lock, flags);
172}
173
174
175
176
177static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
178 const u_char *dat, u_char *ecc_code)
179{
180 unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
181 unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
182
183
184 ecc24 = ~ecc24;
185 ecc_code[0] = (u_char)(ecc24);
186 ecc_code[1] = (u_char)(ecc24 >> 8);
187 ecc_code[2] = (u_char)(ecc24 >> 16);
188
189 return 0;
190}
191
192static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
193 u_char *read_ecc, u_char *calc_ecc)
194{
195 struct nand_chip *chip = mtd->priv;
196 uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
197 (read_ecc[2] << 16);
198 uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
199 (calc_ecc[2] << 16);
200 uint32_t diff = eccCalc ^ eccNand;
201
202 if (diff) {
203 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
204
205 if ((diff >> (12 + 3)) < chip->ecc.size) {
206 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
207 return 1;
208 } else {
209 return -1;
210 }
211 } else if (!(diff & (diff - 1))) {
212
213
214 return 1;
215 } else {
216
217 return -1;
218 }
219
220 }
221 return 0;
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
239{
240 struct davinci_nand_info *info = to_davinci_nand(mtd);
241 unsigned long flags;
242 u32 val;
243
244 spin_lock_irqsave(&davinci_nand_lock, flags);
245
246
247 val = davinci_nand_readl(info, NANDFCR_OFFSET);
248 val &= ~(0x03 << 4);
249 val |= (info->core_chipsel << 4) | BIT(12);
250 davinci_nand_writel(info, NANDFCR_OFFSET, val);
251
252 info->is_readmode = (mode == NAND_ECC_READ);
253
254 spin_unlock_irqrestore(&davinci_nand_lock, flags);
255}
256
257
258static void
259nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
260{
261 const u32 mask = 0x03ff03ff;
262
263 code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
264 code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
265 code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
266 code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
267}
268
269
270static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
271 const u_char *dat, u_char *ecc_code)
272{
273 struct davinci_nand_info *info = to_davinci_nand(mtd);
274 u32 raw_ecc[4], *p;
275 unsigned i;
276
277
278
279
280
281
282 if (info->is_readmode) {
283 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
284 return 0;
285 }
286
287
288
289
290
291
292 nand_davinci_readecc_4bit(info, raw_ecc);
293 for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
294 *ecc_code++ = p[0] & 0xff;
295 *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
296 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
297 *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
298 *ecc_code++ = (p[1] >> 18) & 0xff;
299 }
300
301 return 0;
302}
303
304
305
306
307static int nand_davinci_correct_4bit(struct mtd_info *mtd,
308 u_char *data, u_char *ecc_code, u_char *null)
309{
310 int i;
311 struct davinci_nand_info *info = to_davinci_nand(mtd);
312 unsigned short ecc10[8];
313 unsigned short *ecc16;
314 u32 syndrome[4];
315 u32 ecc_state;
316 unsigned num_errors, corrected;
317 unsigned long timeo;
318
319
320 for (i = 0; i < 10; i++) {
321 if (ecc_code[i] != 0xff)
322 goto compare;
323 }
324 return 0;
325
326compare:
327
328
329
330 if (WARN_ON(0x01 & (unsigned) ecc_code))
331 return -EINVAL;
332 ecc16 = (unsigned short *)ecc_code;
333
334 ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
335 ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
336 ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
337 ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
338 ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
339 ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
340 ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
341 ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
342
343
344 for (i = 7; i >= 0; i--)
345 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
346
347
348
349
350 davinci_nand_readl(info, NANDFSR_OFFSET);
351 nand_davinci_readecc_4bit(info, syndrome);
352 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
353 return 0;
354
355
356
357
358
359 davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
360
361
362
363
364
365 davinci_nand_writel(info, NANDFCR_OFFSET,
366 davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
367
368
369
370
371
372
373
374
375
376
377 timeo = jiffies + usecs_to_jiffies(100);
378 do {
379 ecc_state = (davinci_nand_readl(info,
380 NANDFSR_OFFSET) >> 8) & 0x0f;
381 cpu_relax();
382 } while ((ecc_state < 4) && time_before(jiffies, timeo));
383
384 for (;;) {
385 u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
386
387 switch ((fsr >> 8) & 0x0f) {
388 case 0:
389 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
390 return 0;
391 case 1:
392 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
393 return -EIO;
394 case 2:
395 case 3:
396 num_errors = 1 + ((fsr >> 16) & 0x03);
397 goto correct;
398 default:
399 cpu_relax();
400 continue;
401 }
402 }
403
404correct:
405
406 for (i = 0, corrected = 0; i < num_errors; i++) {
407 int error_address, error_value;
408
409 if (i > 1) {
410 error_address = davinci_nand_readl(info,
411 NAND_ERR_ADD2_OFFSET);
412 error_value = davinci_nand_readl(info,
413 NAND_ERR_ERRVAL2_OFFSET);
414 } else {
415 error_address = davinci_nand_readl(info,
416 NAND_ERR_ADD1_OFFSET);
417 error_value = davinci_nand_readl(info,
418 NAND_ERR_ERRVAL1_OFFSET);
419 }
420
421 if (i & 1) {
422 error_address >>= 16;
423 error_value >>= 16;
424 }
425 error_address &= 0x3ff;
426 error_address = (512 + 7) - error_address;
427
428 if (error_address < 512) {
429 data[error_address] ^= error_value;
430 corrected++;
431 }
432 }
433
434 return corrected;
435}
436
437
438
439
440
441
442
443
444
445
446
447
448static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
449{
450 struct nand_chip *chip = mtd->priv;
451
452 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
453 ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
454 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
455 ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
456 else
457 ioread8_rep(chip->IO_ADDR_R, buf, len);
458}
459
460static void nand_davinci_write_buf(struct mtd_info *mtd,
461 const uint8_t *buf, int len)
462{
463 struct nand_chip *chip = mtd->priv;
464
465 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
466 iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
467 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
468 iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
469 else
470 iowrite8_rep(chip->IO_ADDR_R, buf, len);
471}
472
473
474
475
476
477static int nand_davinci_dev_ready(struct mtd_info *mtd)
478{
479 struct davinci_nand_info *info = to_davinci_nand(mtd);
480
481 return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
482}
483
484
485
486
487
488
489
490static struct nand_ecclayout hwecc4_small __initconst = {
491 .eccbytes = 10,
492 .eccpos = { 0, 1, 2, 3, 4,
493
494 6, 7,
495 13, 14, 15, },
496 .oobfree = {
497 {.offset = 8, .length = 5, },
498 {.offset = 16, },
499 },
500};
501
502
503
504
505
506static struct nand_ecclayout hwecc4_2048 __initconst = {
507 .eccbytes = 40,
508 .eccpos = {
509
510 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
511 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
512 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
513 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
514 },
515 .oobfree = {
516
517 {.offset = 2, .length = 22, },
518
519
520 },
521};
522
523#if defined(CONFIG_OF)
524static const struct of_device_id davinci_nand_of_match[] = {
525 {.compatible = "ti,davinci-nand", },
526 {},
527};
528MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
529
530static struct davinci_nand_pdata
531 *nand_davinci_get_pdata(struct platform_device *pdev)
532{
533 if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
534 struct davinci_nand_pdata *pdata;
535 const char *mode;
536 u32 prop;
537 int len;
538
539 pdata = devm_kzalloc(&pdev->dev,
540 sizeof(struct davinci_nand_pdata),
541 GFP_KERNEL);
542 pdev->dev.platform_data = pdata;
543 if (!pdata)
544 return NULL;
545 if (!of_property_read_u32(pdev->dev.of_node,
546 "ti,davinci-chipselect", &prop))
547 pdev->id = prop;
548 if (!of_property_read_u32(pdev->dev.of_node,
549 "ti,davinci-mask-ale", &prop))
550 pdata->mask_ale = prop;
551 if (!of_property_read_u32(pdev->dev.of_node,
552 "ti,davinci-mask-cle", &prop))
553 pdata->mask_cle = prop;
554 if (!of_property_read_u32(pdev->dev.of_node,
555 "ti,davinci-mask-chipsel", &prop))
556 pdata->mask_chipsel = prop;
557 if (!of_property_read_string(pdev->dev.of_node,
558 "ti,davinci-ecc-mode", &mode)) {
559 if (!strncmp("none", mode, 4))
560 pdata->ecc_mode = NAND_ECC_NONE;
561 if (!strncmp("soft", mode, 4))
562 pdata->ecc_mode = NAND_ECC_SOFT;
563 if (!strncmp("hw", mode, 2))
564 pdata->ecc_mode = NAND_ECC_HW;
565 }
566 if (!of_property_read_u32(pdev->dev.of_node,
567 "ti,davinci-ecc-bits", &prop))
568 pdata->ecc_bits = prop;
569 if (!of_property_read_u32(pdev->dev.of_node,
570 "ti,davinci-nand-buswidth", &prop))
571 if (prop == 16)
572 pdata->options |= NAND_BUSWIDTH_16;
573 if (of_find_property(pdev->dev.of_node,
574 "ti,davinci-nand-use-bbt", &len))
575 pdata->bbt_options = NAND_BBT_USE_FLASH;
576 }
577
578 return dev_get_platdata(&pdev->dev);
579}
580#else
581static struct davinci_nand_pdata
582 *nand_davinci_get_pdata(struct platform_device *pdev)
583{
584 return dev_get_platdata(&pdev->dev);
585}
586#endif
587
588static int __init nand_davinci_probe(struct platform_device *pdev)
589{
590 struct davinci_nand_pdata *pdata;
591 struct davinci_nand_info *info;
592 struct resource *res1;
593 struct resource *res2;
594 void __iomem *vaddr;
595 void __iomem *base;
596 int ret;
597 uint32_t val;
598 nand_ecc_modes_t ecc_mode;
599
600 pdata = nand_davinci_get_pdata(pdev);
601
602 if (!pdata)
603 return -ENODEV;
604
605
606 if (pdev->id < 0 || pdev->id > 3)
607 return -ENODEV;
608
609 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
610 if (!info) {
611 dev_err(&pdev->dev, "unable to allocate memory\n");
612 ret = -ENOMEM;
613 goto err_nomem;
614 }
615
616 platform_set_drvdata(pdev, info);
617
618 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
619 res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
620 if (!res1 || !res2) {
621 dev_err(&pdev->dev, "resource missing\n");
622 ret = -EINVAL;
623 goto err_nomem;
624 }
625
626 vaddr = devm_ioremap_resource(&pdev->dev, res1);
627 if (IS_ERR(vaddr)) {
628 ret = PTR_ERR(vaddr);
629 goto err_ioremap;
630 }
631 base = devm_ioremap_resource(&pdev->dev, res2);
632 if (IS_ERR(base)) {
633 ret = PTR_ERR(base);
634 goto err_ioremap;
635 }
636
637 info->dev = &pdev->dev;
638 info->base = base;
639 info->vaddr = vaddr;
640
641 info->mtd.priv = &info->chip;
642 info->mtd.name = dev_name(&pdev->dev);
643 info->mtd.owner = THIS_MODULE;
644
645 info->mtd.dev.parent = &pdev->dev;
646
647 info->chip.IO_ADDR_R = vaddr;
648 info->chip.IO_ADDR_W = vaddr;
649 info->chip.chip_delay = 0;
650 info->chip.select_chip = nand_davinci_select_chip;
651
652
653 info->chip.bbt_options = pdata->bbt_options;
654
655 info->chip.options = pdata->options;
656 info->chip.bbt_td = pdata->bbt_td;
657 info->chip.bbt_md = pdata->bbt_md;
658 info->timing = pdata->timing;
659
660 info->ioaddr = (uint32_t __force) vaddr;
661
662 info->current_cs = info->ioaddr;
663 info->core_chipsel = pdev->id;
664 info->mask_chipsel = pdata->mask_chipsel;
665
666
667 info->mask_ale = pdata->mask_ale ? : MASK_ALE;
668 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
669
670
671 info->chip.cmd_ctrl = nand_davinci_hwcontrol;
672 info->chip.dev_ready = nand_davinci_dev_ready;
673
674
675 info->chip.read_buf = nand_davinci_read_buf;
676 info->chip.write_buf = nand_davinci_write_buf;
677
678
679 ecc_mode = pdata->ecc_mode;
680
681 ret = -EINVAL;
682 switch (ecc_mode) {
683 case NAND_ECC_NONE:
684 case NAND_ECC_SOFT:
685 pdata->ecc_bits = 0;
686 break;
687 case NAND_ECC_HW:
688 if (pdata->ecc_bits == 4) {
689
690
691
692
693
694 spin_lock_irq(&davinci_nand_lock);
695 if (ecc4_busy)
696 ret = -EBUSY;
697 else
698 ecc4_busy = true;
699 spin_unlock_irq(&davinci_nand_lock);
700
701 if (ret == -EBUSY)
702 goto err_ecc;
703
704 info->chip.ecc.calculate = nand_davinci_calculate_4bit;
705 info->chip.ecc.correct = nand_davinci_correct_4bit;
706 info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
707 info->chip.ecc.bytes = 10;
708 } else {
709 info->chip.ecc.calculate = nand_davinci_calculate_1bit;
710 info->chip.ecc.correct = nand_davinci_correct_1bit;
711 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
712 info->chip.ecc.bytes = 3;
713 }
714 info->chip.ecc.size = 512;
715 info->chip.ecc.strength = pdata->ecc_bits;
716 break;
717 default:
718 ret = -EINVAL;
719 goto err_ecc;
720 }
721 info->chip.ecc.mode = ecc_mode;
722
723 info->clk = devm_clk_get(&pdev->dev, "aemif");
724 if (IS_ERR(info->clk)) {
725 ret = PTR_ERR(info->clk);
726 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
727 goto err_clk;
728 }
729
730 ret = clk_prepare_enable(info->clk);
731 if (ret < 0) {
732 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
733 ret);
734 goto err_clk_enable;
735 }
736
737
738
739
740
741 val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
742
743
744 val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
745 if (info->chip.options & NAND_BUSWIDTH_16)
746 val |= 0x1;
747
748 davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
749
750 ret = 0;
751 if (info->timing)
752 ret = davinci_aemif_setup_timing(info->timing, info->base,
753 info->core_chipsel);
754 if (ret < 0) {
755 dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
756 goto err_timing;
757 }
758
759 spin_lock_irq(&davinci_nand_lock);
760
761
762 val = davinci_nand_readl(info, NANDFCR_OFFSET);
763 val |= BIT(info->core_chipsel);
764 davinci_nand_writel(info, NANDFCR_OFFSET, val);
765
766 spin_unlock_irq(&davinci_nand_lock);
767
768
769 ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
770 if (ret < 0) {
771 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
772 goto err_scan;
773 }
774
775
776
777
778
779
780 if (pdata->ecc_bits == 4) {
781 int chunks = info->mtd.writesize / 512;
782
783 if (!chunks || info->mtd.oobsize < 16) {
784 dev_dbg(&pdev->dev, "too small\n");
785 ret = -EINVAL;
786 goto err_scan;
787 }
788
789
790
791
792
793 if (chunks == 1) {
794 info->ecclayout = hwecc4_small;
795 info->ecclayout.oobfree[1].length =
796 info->mtd.oobsize - 16;
797 goto syndrome_done;
798 }
799 if (chunks == 4) {
800 info->ecclayout = hwecc4_2048;
801 info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
802 goto syndrome_done;
803 }
804
805
806
807
808
809
810
811
812
813
814 dev_warn(&pdev->dev, "no 4-bit ECC support yet "
815 "for 4KiB-page NAND\n");
816 ret = -EIO;
817 goto err_scan;
818
819syndrome_done:
820 info->chip.ecc.layout = &info->ecclayout;
821 }
822
823 ret = nand_scan_tail(&info->mtd);
824 if (ret < 0)
825 goto err_scan;
826
827 if (pdata->parts)
828 ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
829 pdata->parts, pdata->nr_parts);
830 else {
831 struct mtd_part_parser_data ppdata;
832
833 ppdata.of_node = pdev->dev.of_node;
834 ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
835 NULL, 0);
836 }
837 if (ret < 0)
838 goto err_scan;
839
840 val = davinci_nand_readl(info, NRCSR_OFFSET);
841 dev_info(&pdev->dev, "controller rev. %d.%d\n",
842 (val >> 8) & 0xff, val & 0xff);
843
844 return 0;
845
846err_scan:
847err_timing:
848 clk_disable_unprepare(info->clk);
849
850err_clk_enable:
851 spin_lock_irq(&davinci_nand_lock);
852 if (ecc_mode == NAND_ECC_HW_SYNDROME)
853 ecc4_busy = false;
854 spin_unlock_irq(&davinci_nand_lock);
855
856err_ecc:
857err_clk:
858err_ioremap:
859err_nomem:
860 return ret;
861}
862
863static int __exit nand_davinci_remove(struct platform_device *pdev)
864{
865 struct davinci_nand_info *info = platform_get_drvdata(pdev);
866
867 spin_lock_irq(&davinci_nand_lock);
868 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
869 ecc4_busy = false;
870 spin_unlock_irq(&davinci_nand_lock);
871
872 nand_release(&info->mtd);
873
874 clk_disable_unprepare(info->clk);
875
876 return 0;
877}
878
879static struct platform_driver nand_davinci_driver = {
880 .remove = __exit_p(nand_davinci_remove),
881 .driver = {
882 .name = "davinci_nand",
883 .owner = THIS_MODULE,
884 .of_match_table = of_match_ptr(davinci_nand_of_match),
885 },
886};
887MODULE_ALIAS("platform:davinci_nand");
888
889module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
890
891MODULE_LICENSE("GPL");
892MODULE_AUTHOR("Texas Instruments");
893MODULE_DESCRIPTION("Davinci NAND flash driver");
894
895