1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/err.h>
30#include <linux/clk.h>
31#include <linux/io.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/partitions.h>
34#include <linux/slab.h>
35#include <linux/of_device.h>
36#include <linux/of.h>
37
38#include <linux/platform_data/mtd-davinci.h>
39#include <linux/platform_data/mtd-davinci-aemif.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54struct davinci_nand_info {
55 struct nand_chip chip;
56
57 struct device *dev;
58 struct clk *clk;
59
60 bool is_readmode;
61
62 void __iomem *base;
63 void __iomem *vaddr;
64
65 uint32_t ioaddr;
66 uint32_t current_cs;
67
68 uint32_t mask_chipsel;
69 uint32_t mask_ale;
70 uint32_t mask_cle;
71
72 uint32_t core_chipsel;
73
74 struct davinci_aemif_timing *timing;
75};
76
77static DEFINE_SPINLOCK(davinci_nand_lock);
78static bool ecc4_busy;
79
80static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
81{
82 return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
83}
84
85static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
86 int offset)
87{
88 return __raw_readl(info->base + offset);
89}
90
91static inline void davinci_nand_writel(struct davinci_nand_info *info,
92 int offset, unsigned long value)
93{
94 __raw_writel(value, info->base + offset);
95}
96
97
98
99
100
101
102
103static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
104 unsigned int ctrl)
105{
106 struct davinci_nand_info *info = to_davinci_nand(mtd);
107 uint32_t addr = info->current_cs;
108 struct nand_chip *nand = mtd_to_nand(mtd);
109
110
111 if (ctrl & NAND_CTRL_CHANGE) {
112 if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
113 addr |= info->mask_cle;
114 else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
115 addr |= info->mask_ale;
116
117 nand->IO_ADDR_W = (void __iomem __force *)addr;
118 }
119
120 if (cmd != NAND_CMD_NONE)
121 iowrite8(cmd, nand->IO_ADDR_W);
122}
123
124static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
125{
126 struct davinci_nand_info *info = to_davinci_nand(mtd);
127 uint32_t addr = info->ioaddr;
128
129
130 if (chip > 0)
131 addr |= info->mask_chipsel;
132 info->current_cs = addr;
133
134 info->chip.IO_ADDR_W = (void __iomem __force *)addr;
135 info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
136}
137
138
139
140
141
142
143
144static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
145{
146 struct davinci_nand_info *info = to_davinci_nand(mtd);
147
148 return davinci_nand_readl(info, NANDF1ECC_OFFSET
149 + 4 * info->core_chipsel);
150}
151
152static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
153{
154 struct davinci_nand_info *info;
155 uint32_t nandcfr;
156 unsigned long flags;
157
158 info = to_davinci_nand(mtd);
159
160
161 nand_davinci_readecc_1bit(mtd);
162
163 spin_lock_irqsave(&davinci_nand_lock, flags);
164
165
166 nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
167 nandcfr |= BIT(8 + info->core_chipsel);
168 davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
169
170 spin_unlock_irqrestore(&davinci_nand_lock, flags);
171}
172
173
174
175
176static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
177 const u_char *dat, u_char *ecc_code)
178{
179 unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
180 unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
181
182
183 ecc24 = ~ecc24;
184 ecc_code[0] = (u_char)(ecc24);
185 ecc_code[1] = (u_char)(ecc24 >> 8);
186 ecc_code[2] = (u_char)(ecc24 >> 16);
187
188 return 0;
189}
190
191static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
192 u_char *read_ecc, u_char *calc_ecc)
193{
194 struct nand_chip *chip = mtd_to_nand(mtd);
195 uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
196 (read_ecc[2] << 16);
197 uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
198 (calc_ecc[2] << 16);
199 uint32_t diff = eccCalc ^ eccNand;
200
201 if (diff) {
202 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
203
204 if ((diff >> (12 + 3)) < chip->ecc.size) {
205 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
206 return 1;
207 } else {
208 return -EBADMSG;
209 }
210 } else if (!(diff & (diff - 1))) {
211
212
213 return 1;
214 } else {
215
216 return -EBADMSG;
217 }
218
219 }
220 return 0;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
238{
239 struct davinci_nand_info *info = to_davinci_nand(mtd);
240 unsigned long flags;
241 u32 val;
242
243 spin_lock_irqsave(&davinci_nand_lock, flags);
244
245
246 val = davinci_nand_readl(info, NANDFCR_OFFSET);
247 val &= ~(0x03 << 4);
248 val |= (info->core_chipsel << 4) | BIT(12);
249 davinci_nand_writel(info, NANDFCR_OFFSET, val);
250
251 info->is_readmode = (mode == NAND_ECC_READ);
252
253 spin_unlock_irqrestore(&davinci_nand_lock, flags);
254}
255
256
257static void
258nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
259{
260 const u32 mask = 0x03ff03ff;
261
262 code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
263 code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
264 code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
265 code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
266}
267
268
269static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
270 const u_char *dat, u_char *ecc_code)
271{
272 struct davinci_nand_info *info = to_davinci_nand(mtd);
273 u32 raw_ecc[4], *p;
274 unsigned i;
275
276
277
278
279
280
281 if (info->is_readmode) {
282 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
283 return 0;
284 }
285
286
287
288
289
290
291 nand_davinci_readecc_4bit(info, raw_ecc);
292 for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
293 *ecc_code++ = p[0] & 0xff;
294 *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
295 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
296 *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
297 *ecc_code++ = (p[1] >> 18) & 0xff;
298 }
299
300 return 0;
301}
302
303
304
305
306static int nand_davinci_correct_4bit(struct mtd_info *mtd,
307 u_char *data, u_char *ecc_code, u_char *null)
308{
309 int i;
310 struct davinci_nand_info *info = to_davinci_nand(mtd);
311 unsigned short ecc10[8];
312 unsigned short *ecc16;
313 u32 syndrome[4];
314 u32 ecc_state;
315 unsigned num_errors, corrected;
316 unsigned long timeo;
317
318
319
320
321 if (WARN_ON(0x01 & (unsigned) ecc_code))
322 return -EINVAL;
323 ecc16 = (unsigned short *)ecc_code;
324
325 ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
326 ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
327 ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
328 ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
329 ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
330 ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
331 ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
332 ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
333
334
335 for (i = 7; i >= 0; i--)
336 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
337
338
339
340
341 davinci_nand_readl(info, NANDFSR_OFFSET);
342 nand_davinci_readecc_4bit(info, syndrome);
343 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
344 return 0;
345
346
347
348
349
350 davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
351
352
353
354
355
356 davinci_nand_writel(info, NANDFCR_OFFSET,
357 davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
358
359
360
361
362
363
364
365
366
367
368 timeo = jiffies + usecs_to_jiffies(100);
369 do {
370 ecc_state = (davinci_nand_readl(info,
371 NANDFSR_OFFSET) >> 8) & 0x0f;
372 cpu_relax();
373 } while ((ecc_state < 4) && time_before(jiffies, timeo));
374
375 for (;;) {
376 u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
377
378 switch ((fsr >> 8) & 0x0f) {
379 case 0:
380 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
381 return 0;
382 case 1:
383 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
384 return -EBADMSG;
385 case 2:
386 case 3:
387 num_errors = 1 + ((fsr >> 16) & 0x03);
388 goto correct;
389 default:
390 cpu_relax();
391 continue;
392 }
393 }
394
395correct:
396
397 for (i = 0, corrected = 0; i < num_errors; i++) {
398 int error_address, error_value;
399
400 if (i > 1) {
401 error_address = davinci_nand_readl(info,
402 NAND_ERR_ADD2_OFFSET);
403 error_value = davinci_nand_readl(info,
404 NAND_ERR_ERRVAL2_OFFSET);
405 } else {
406 error_address = davinci_nand_readl(info,
407 NAND_ERR_ADD1_OFFSET);
408 error_value = davinci_nand_readl(info,
409 NAND_ERR_ERRVAL1_OFFSET);
410 }
411
412 if (i & 1) {
413 error_address >>= 16;
414 error_value >>= 16;
415 }
416 error_address &= 0x3ff;
417 error_address = (512 + 7) - error_address;
418
419 if (error_address < 512) {
420 data[error_address] ^= error_value;
421 corrected++;
422 }
423 }
424
425 return corrected;
426}
427
428
429
430
431
432
433
434
435
436
437
438
439static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
440{
441 struct nand_chip *chip = mtd_to_nand(mtd);
442
443 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
444 ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
445 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
446 ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
447 else
448 ioread8_rep(chip->IO_ADDR_R, buf, len);
449}
450
451static void nand_davinci_write_buf(struct mtd_info *mtd,
452 const uint8_t *buf, int len)
453{
454 struct nand_chip *chip = mtd_to_nand(mtd);
455
456 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
457 iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
458 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
459 iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
460 else
461 iowrite8_rep(chip->IO_ADDR_R, buf, len);
462}
463
464
465
466
467
468static int nand_davinci_dev_ready(struct mtd_info *mtd)
469{
470 struct davinci_nand_info *info = to_davinci_nand(mtd);
471
472 return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
473}
474
475
476
477
478
479
480
481static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
482 struct mtd_oob_region *oobregion)
483{
484 if (section > 2)
485 return -ERANGE;
486
487 if (!section) {
488 oobregion->offset = 0;
489 oobregion->length = 5;
490 } else if (section == 1) {
491 oobregion->offset = 6;
492 oobregion->length = 2;
493 } else {
494 oobregion->offset = 13;
495 oobregion->length = 3;
496 }
497
498 return 0;
499}
500
501static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
502 struct mtd_oob_region *oobregion)
503{
504 if (section > 1)
505 return -ERANGE;
506
507 if (!section) {
508 oobregion->offset = 8;
509 oobregion->length = 5;
510 } else {
511 oobregion->offset = 16;
512 oobregion->length = mtd->oobsize - 16;
513 }
514
515 return 0;
516}
517
518static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
519 .ecc = hwecc4_ooblayout_small_ecc,
520 .free = hwecc4_ooblayout_small_free,
521};
522
523#if defined(CONFIG_OF)
524static const struct of_device_id davinci_nand_of_match[] = {
525 {.compatible = "ti,davinci-nand", },
526 {.compatible = "ti,keystone-nand", },
527 {},
528};
529MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
530
531static struct davinci_nand_pdata
532 *nand_davinci_get_pdata(struct platform_device *pdev)
533{
534 if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
535 struct davinci_nand_pdata *pdata;
536 const char *mode;
537 u32 prop;
538
539 pdata = devm_kzalloc(&pdev->dev,
540 sizeof(struct davinci_nand_pdata),
541 GFP_KERNEL);
542 pdev->dev.platform_data = pdata;
543 if (!pdata)
544 return ERR_PTR(-ENOMEM);
545 if (!of_property_read_u32(pdev->dev.of_node,
546 "ti,davinci-chipselect", &prop))
547 pdev->id = prop;
548 else
549 return ERR_PTR(-EINVAL);
550
551 if (!of_property_read_u32(pdev->dev.of_node,
552 "ti,davinci-mask-ale", &prop))
553 pdata->mask_ale = prop;
554 if (!of_property_read_u32(pdev->dev.of_node,
555 "ti,davinci-mask-cle", &prop))
556 pdata->mask_cle = prop;
557 if (!of_property_read_u32(pdev->dev.of_node,
558 "ti,davinci-mask-chipsel", &prop))
559 pdata->mask_chipsel = prop;
560 if (!of_property_read_string(pdev->dev.of_node,
561 "ti,davinci-ecc-mode", &mode)) {
562 if (!strncmp("none", mode, 4))
563 pdata->ecc_mode = NAND_ECC_NONE;
564 if (!strncmp("soft", mode, 4))
565 pdata->ecc_mode = NAND_ECC_SOFT;
566 if (!strncmp("hw", mode, 2))
567 pdata->ecc_mode = NAND_ECC_HW;
568 }
569 if (!of_property_read_u32(pdev->dev.of_node,
570 "ti,davinci-ecc-bits", &prop))
571 pdata->ecc_bits = prop;
572
573 if (!of_property_read_u32(pdev->dev.of_node,
574 "ti,davinci-nand-buswidth", &prop) && prop == 16)
575 pdata->options |= NAND_BUSWIDTH_16;
576
577 if (of_property_read_bool(pdev->dev.of_node,
578 "ti,davinci-nand-use-bbt"))
579 pdata->bbt_options = NAND_BBT_USE_FLASH;
580
581 if (of_device_is_compatible(pdev->dev.of_node,
582 "ti,keystone-nand")) {
583 pdata->options |= NAND_NO_SUBPAGE_WRITE;
584 }
585 }
586
587 return dev_get_platdata(&pdev->dev);
588}
589#else
590static struct davinci_nand_pdata
591 *nand_davinci_get_pdata(struct platform_device *pdev)
592{
593 return dev_get_platdata(&pdev->dev);
594}
595#endif
596
597static int nand_davinci_probe(struct platform_device *pdev)
598{
599 struct davinci_nand_pdata *pdata;
600 struct davinci_nand_info *info;
601 struct resource *res1;
602 struct resource *res2;
603 void __iomem *vaddr;
604 void __iomem *base;
605 int ret;
606 uint32_t val;
607 struct mtd_info *mtd;
608
609 pdata = nand_davinci_get_pdata(pdev);
610 if (IS_ERR(pdata))
611 return PTR_ERR(pdata);
612
613
614 if (!pdata)
615 return -ENODEV;
616
617
618 if (pdev->id < 0 || pdev->id > 3)
619 return -ENODEV;
620
621 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
622 if (!info)
623 return -ENOMEM;
624
625 platform_set_drvdata(pdev, info);
626
627 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
628 res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
629 if (!res1 || !res2) {
630 dev_err(&pdev->dev, "resource missing\n");
631 return -EINVAL;
632 }
633
634 vaddr = devm_ioremap_resource(&pdev->dev, res1);
635 if (IS_ERR(vaddr))
636 return PTR_ERR(vaddr);
637
638
639
640
641
642
643
644 base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
645 if (!base) {
646 dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
647 return -EADDRNOTAVAIL;
648 }
649
650 info->dev = &pdev->dev;
651 info->base = base;
652 info->vaddr = vaddr;
653
654 mtd = nand_to_mtd(&info->chip);
655 mtd->dev.parent = &pdev->dev;
656 nand_set_flash_node(&info->chip, pdev->dev.of_node);
657
658 info->chip.IO_ADDR_R = vaddr;
659 info->chip.IO_ADDR_W = vaddr;
660 info->chip.chip_delay = 0;
661 info->chip.select_chip = nand_davinci_select_chip;
662
663
664 info->chip.bbt_options = pdata->bbt_options;
665
666 info->chip.options = pdata->options;
667 info->chip.bbt_td = pdata->bbt_td;
668 info->chip.bbt_md = pdata->bbt_md;
669 info->timing = pdata->timing;
670
671 info->ioaddr = (uint32_t __force) vaddr;
672
673 info->current_cs = info->ioaddr;
674 info->core_chipsel = pdev->id;
675 info->mask_chipsel = pdata->mask_chipsel;
676
677
678 info->mask_ale = pdata->mask_ale ? : MASK_ALE;
679 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
680
681
682 info->chip.cmd_ctrl = nand_davinci_hwcontrol;
683 info->chip.dev_ready = nand_davinci_dev_ready;
684
685
686 info->chip.read_buf = nand_davinci_read_buf;
687 info->chip.write_buf = nand_davinci_write_buf;
688
689
690 info->chip.ecc.mode = pdata->ecc_mode;
691
692 ret = -EINVAL;
693
694 info->clk = devm_clk_get(&pdev->dev, "aemif");
695 if (IS_ERR(info->clk)) {
696 ret = PTR_ERR(info->clk);
697 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
698 return ret;
699 }
700
701 ret = clk_prepare_enable(info->clk);
702 if (ret < 0) {
703 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
704 ret);
705 goto err_clk_enable;
706 }
707
708 spin_lock_irq(&davinci_nand_lock);
709
710
711 val = davinci_nand_readl(info, NANDFCR_OFFSET);
712 val |= BIT(info->core_chipsel);
713 davinci_nand_writel(info, NANDFCR_OFFSET, val);
714
715 spin_unlock_irq(&davinci_nand_lock);
716
717
718 ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
719 if (ret < 0) {
720 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
721 goto err;
722 }
723
724 switch (info->chip.ecc.mode) {
725 case NAND_ECC_NONE:
726 pdata->ecc_bits = 0;
727 break;
728 case NAND_ECC_SOFT:
729 pdata->ecc_bits = 0;
730
731
732
733
734
735
736 info->chip.ecc.algo = NAND_ECC_HAMMING;
737 break;
738 case NAND_ECC_HW:
739 if (pdata->ecc_bits == 4) {
740
741
742
743
744
745 spin_lock_irq(&davinci_nand_lock);
746 if (ecc4_busy)
747 ret = -EBUSY;
748 else
749 ecc4_busy = true;
750 spin_unlock_irq(&davinci_nand_lock);
751
752 if (ret == -EBUSY)
753 return ret;
754
755 info->chip.ecc.calculate = nand_davinci_calculate_4bit;
756 info->chip.ecc.correct = nand_davinci_correct_4bit;
757 info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
758 info->chip.ecc.bytes = 10;
759 info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
760 } else {
761 info->chip.ecc.calculate = nand_davinci_calculate_1bit;
762 info->chip.ecc.correct = nand_davinci_correct_1bit;
763 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
764 info->chip.ecc.bytes = 3;
765 }
766 info->chip.ecc.size = 512;
767 info->chip.ecc.strength = pdata->ecc_bits;
768 break;
769 default:
770 return -EINVAL;
771 }
772
773
774
775
776
777
778 if (pdata->ecc_bits == 4) {
779 int chunks = mtd->writesize / 512;
780
781 if (!chunks || mtd->oobsize < 16) {
782 dev_dbg(&pdev->dev, "too small\n");
783 ret = -EINVAL;
784 goto err;
785 }
786
787
788
789
790
791 if (chunks == 1) {
792 mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
793 } else if (chunks == 4 || chunks == 8) {
794 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
795 info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
796 } else {
797 ret = -EIO;
798 goto err;
799 }
800 }
801
802 ret = nand_scan_tail(mtd);
803 if (ret < 0)
804 goto err;
805
806 if (pdata->parts)
807 ret = mtd_device_parse_register(mtd, NULL, NULL,
808 pdata->parts, pdata->nr_parts);
809 else
810 ret = mtd_device_register(mtd, NULL, 0);
811 if (ret < 0)
812 goto err;
813
814 val = davinci_nand_readl(info, NRCSR_OFFSET);
815 dev_info(&pdev->dev, "controller rev. %d.%d\n",
816 (val >> 8) & 0xff, val & 0xff);
817
818 return 0;
819
820err:
821 clk_disable_unprepare(info->clk);
822
823err_clk_enable:
824 spin_lock_irq(&davinci_nand_lock);
825 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
826 ecc4_busy = false;
827 spin_unlock_irq(&davinci_nand_lock);
828 return ret;
829}
830
831static int nand_davinci_remove(struct platform_device *pdev)
832{
833 struct davinci_nand_info *info = platform_get_drvdata(pdev);
834
835 spin_lock_irq(&davinci_nand_lock);
836 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
837 ecc4_busy = false;
838 spin_unlock_irq(&davinci_nand_lock);
839
840 nand_release(nand_to_mtd(&info->chip));
841
842 clk_disable_unprepare(info->clk);
843
844 return 0;
845}
846
847static struct platform_driver nand_davinci_driver = {
848 .probe = nand_davinci_probe,
849 .remove = nand_davinci_remove,
850 .driver = {
851 .name = "davinci_nand",
852 .of_match_table = of_match_ptr(davinci_nand_of_match),
853 },
854};
855MODULE_ALIAS("platform:davinci_nand");
856
857module_platform_driver(nand_davinci_driver);
858
859MODULE_LICENSE("GPL");
860MODULE_AUTHOR("Texas Instruments");
861MODULE_DESCRIPTION("Davinci NAND flash driver");
862
863