1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/dmaengine.h>
22#include <linux/dma-direction.h>
23#include <linux/dma-mapping.h>
24#include <linux/err.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/resource.h>
28#include <linux/sched.h>
29#include <linux/types.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/nand.h>
32#include <linux/mtd/nand_ecc.h>
33#include <linux/platform_device.h>
34#include <linux/of.h>
35#include <linux/mtd/partitions.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/mtd/fsmc.h>
39#include <linux/amba/bus.h>
40#include <mtd/mtd-abi.h>
41
42static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
43 struct mtd_oob_region *oobregion)
44{
45 struct nand_chip *chip = mtd_to_nand(mtd);
46
47 if (section >= chip->ecc.steps)
48 return -ERANGE;
49
50 oobregion->offset = (section * 16) + 2;
51 oobregion->length = 3;
52
53 return 0;
54}
55
56static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
57 struct mtd_oob_region *oobregion)
58{
59 struct nand_chip *chip = mtd_to_nand(mtd);
60
61 if (section >= chip->ecc.steps)
62 return -ERANGE;
63
64 oobregion->offset = (section * 16) + 8;
65
66 if (section < chip->ecc.steps - 1)
67 oobregion->length = 8;
68 else
69 oobregion->length = mtd->oobsize - oobregion->offset;
70
71 return 0;
72}
73
74static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
75 .ecc = fsmc_ecc1_ooblayout_ecc,
76 .free = fsmc_ecc1_ooblayout_free,
77};
78
79
80
81
82
83
84
85static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
86 struct mtd_oob_region *oobregion)
87{
88 struct nand_chip *chip = mtd_to_nand(mtd);
89
90 if (section >= chip->ecc.steps)
91 return -ERANGE;
92
93 oobregion->length = chip->ecc.bytes;
94
95 if (!section && mtd->writesize <= 512)
96 oobregion->offset = 0;
97 else
98 oobregion->offset = (section * 16) + 2;
99
100 return 0;
101}
102
103static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
104 struct mtd_oob_region *oobregion)
105{
106 struct nand_chip *chip = mtd_to_nand(mtd);
107
108 if (section >= chip->ecc.steps)
109 return -ERANGE;
110
111 oobregion->offset = (section * 16) + 15;
112
113 if (section < chip->ecc.steps - 1)
114 oobregion->length = 3;
115 else
116 oobregion->length = mtd->oobsize - oobregion->offset;
117
118 return 0;
119}
120
121static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
122 .ecc = fsmc_ecc4_ooblayout_ecc,
123 .free = fsmc_ecc4_ooblayout_free,
124};
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148struct fsmc_nand_data {
149 u32 pid;
150 struct nand_chip nand;
151 struct mtd_partition *partitions;
152 unsigned int nr_partitions;
153
154 unsigned int bank;
155 struct device *dev;
156 enum access_mode mode;
157 struct clk *clk;
158
159
160 struct dma_chan *read_dma_chan;
161 struct dma_chan *write_dma_chan;
162 struct completion dma_access_complete;
163
164 struct fsmc_nand_timings *dev_timings;
165
166 dma_addr_t data_pa;
167 void __iomem *data_va;
168 void __iomem *cmd_va;
169 void __iomem *addr_va;
170 void __iomem *regs_va;
171
172 void (*select_chip)(uint32_t bank, uint32_t busw);
173};
174
175static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
176{
177 return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
178}
179
180
181static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
182{
183 struct nand_chip *chip = mtd_to_nand(mtd);
184 struct fsmc_nand_data *host;
185
186 host = mtd_to_fsmc(mtd);
187
188 switch (chipnr) {
189 case -1:
190 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
191 break;
192 case 0:
193 case 1:
194 case 2:
195 case 3:
196 if (host->select_chip)
197 host->select_chip(chipnr,
198 chip->options & NAND_BUSWIDTH_16);
199 break;
200
201 default:
202 dev_err(host->dev, "unsupported chip-select %d\n", chipnr);
203 }
204}
205
206
207
208
209
210static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
211{
212 struct nand_chip *this = mtd_to_nand(mtd);
213 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
214 void __iomem *regs = host->regs_va;
215 unsigned int bank = host->bank;
216
217 if (ctrl & NAND_CTRL_CHANGE) {
218 u32 pc;
219
220 if (ctrl & NAND_CLE) {
221 this->IO_ADDR_R = host->cmd_va;
222 this->IO_ADDR_W = host->cmd_va;
223 } else if (ctrl & NAND_ALE) {
224 this->IO_ADDR_R = host->addr_va;
225 this->IO_ADDR_W = host->addr_va;
226 } else {
227 this->IO_ADDR_R = host->data_va;
228 this->IO_ADDR_W = host->data_va;
229 }
230
231 pc = readl(FSMC_NAND_REG(regs, bank, PC));
232 if (ctrl & NAND_NCE)
233 pc |= FSMC_ENABLE;
234 else
235 pc &= ~FSMC_ENABLE;
236 writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
237 }
238
239 mb();
240
241 if (cmd != NAND_CMD_NONE)
242 writeb_relaxed(cmd, this->IO_ADDR_W);
243}
244
245
246
247
248
249
250
251static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
252 uint32_t busw, struct fsmc_nand_timings *timings)
253{
254 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
255 uint32_t tclr, tar, thiz, thold, twait, tset;
256 struct fsmc_nand_timings *tims;
257 struct fsmc_nand_timings default_timings = {
258 .tclr = FSMC_TCLR_1,
259 .tar = FSMC_TAR_1,
260 .thiz = FSMC_THIZ_1,
261 .thold = FSMC_THOLD_4,
262 .twait = FSMC_TWAIT_6,
263 .tset = FSMC_TSET_0,
264 };
265
266 if (timings)
267 tims = timings;
268 else
269 tims = &default_timings;
270
271 tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
272 tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
273 thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
274 thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
275 twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
276 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
277
278 if (busw)
279 writel_relaxed(value | FSMC_DEVWID_16,
280 FSMC_NAND_REG(regs, bank, PC));
281 else
282 writel_relaxed(value | FSMC_DEVWID_8,
283 FSMC_NAND_REG(regs, bank, PC));
284
285 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
286 FSMC_NAND_REG(regs, bank, PC));
287 writel_relaxed(thiz | thold | twait | tset,
288 FSMC_NAND_REG(regs, bank, COMM));
289 writel_relaxed(thiz | thold | twait | tset,
290 FSMC_NAND_REG(regs, bank, ATTRIB));
291}
292
293
294
295
296static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
297{
298 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
299 void __iomem *regs = host->regs_va;
300 uint32_t bank = host->bank;
301
302 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
303 FSMC_NAND_REG(regs, bank, PC));
304 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
305 FSMC_NAND_REG(regs, bank, PC));
306 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
307 FSMC_NAND_REG(regs, bank, PC));
308}
309
310
311
312
313
314
315static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
316 uint8_t *ecc)
317{
318 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
319 void __iomem *regs = host->regs_va;
320 uint32_t bank = host->bank;
321 uint32_t ecc_tmp;
322 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
323
324 do {
325 if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
326 break;
327 else
328 cond_resched();
329 } while (!time_after_eq(jiffies, deadline));
330
331 if (time_after_eq(jiffies, deadline)) {
332 dev_err(host->dev, "calculate ecc timed out\n");
333 return -ETIMEDOUT;
334 }
335
336 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
337 ecc[0] = (uint8_t) (ecc_tmp >> 0);
338 ecc[1] = (uint8_t) (ecc_tmp >> 8);
339 ecc[2] = (uint8_t) (ecc_tmp >> 16);
340 ecc[3] = (uint8_t) (ecc_tmp >> 24);
341
342 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
343 ecc[4] = (uint8_t) (ecc_tmp >> 0);
344 ecc[5] = (uint8_t) (ecc_tmp >> 8);
345 ecc[6] = (uint8_t) (ecc_tmp >> 16);
346 ecc[7] = (uint8_t) (ecc_tmp >> 24);
347
348 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
349 ecc[8] = (uint8_t) (ecc_tmp >> 0);
350 ecc[9] = (uint8_t) (ecc_tmp >> 8);
351 ecc[10] = (uint8_t) (ecc_tmp >> 16);
352 ecc[11] = (uint8_t) (ecc_tmp >> 24);
353
354 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
355 ecc[12] = (uint8_t) (ecc_tmp >> 16);
356
357 return 0;
358}
359
360
361
362
363
364
365static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
366 uint8_t *ecc)
367{
368 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
369 void __iomem *regs = host->regs_va;
370 uint32_t bank = host->bank;
371 uint32_t ecc_tmp;
372
373 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
374 ecc[0] = (uint8_t) (ecc_tmp >> 0);
375 ecc[1] = (uint8_t) (ecc_tmp >> 8);
376 ecc[2] = (uint8_t) (ecc_tmp >> 16);
377
378 return 0;
379}
380
381
382static int count_written_bits(uint8_t *buff, int size, int max_bits)
383{
384 int k, written_bits = 0;
385
386 for (k = 0; k < size; k++) {
387 written_bits += hweight8(~buff[k]);
388 if (written_bits > max_bits)
389 break;
390 }
391
392 return written_bits;
393}
394
395static void dma_complete(void *param)
396{
397 struct fsmc_nand_data *host = param;
398
399 complete(&host->dma_access_complete);
400}
401
402static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
403 enum dma_data_direction direction)
404{
405 struct dma_chan *chan;
406 struct dma_device *dma_dev;
407 struct dma_async_tx_descriptor *tx;
408 dma_addr_t dma_dst, dma_src, dma_addr;
409 dma_cookie_t cookie;
410 unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
411 int ret;
412 unsigned long time_left;
413
414 if (direction == DMA_TO_DEVICE)
415 chan = host->write_dma_chan;
416 else if (direction == DMA_FROM_DEVICE)
417 chan = host->read_dma_chan;
418 else
419 return -EINVAL;
420
421 dma_dev = chan->device;
422 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
423
424 if (direction == DMA_TO_DEVICE) {
425 dma_src = dma_addr;
426 dma_dst = host->data_pa;
427 } else {
428 dma_src = host->data_pa;
429 dma_dst = dma_addr;
430 }
431
432 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
433 len, flags);
434 if (!tx) {
435 dev_err(host->dev, "device_prep_dma_memcpy error\n");
436 ret = -EIO;
437 goto unmap_dma;
438 }
439
440 tx->callback = dma_complete;
441 tx->callback_param = host;
442 cookie = tx->tx_submit(tx);
443
444 ret = dma_submit_error(cookie);
445 if (ret) {
446 dev_err(host->dev, "dma_submit_error %d\n", cookie);
447 goto unmap_dma;
448 }
449
450 dma_async_issue_pending(chan);
451
452 time_left =
453 wait_for_completion_timeout(&host->dma_access_complete,
454 msecs_to_jiffies(3000));
455 if (time_left == 0) {
456 dmaengine_terminate_all(chan);
457 dev_err(host->dev, "wait_for_completion_timeout\n");
458 ret = -ETIMEDOUT;
459 goto unmap_dma;
460 }
461
462 ret = 0;
463
464unmap_dma:
465 dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
466
467 return ret;
468}
469
470
471
472
473
474
475
476static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
477{
478 int i;
479 struct nand_chip *chip = mtd_to_nand(mtd);
480
481 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
482 IS_ALIGNED(len, sizeof(uint32_t))) {
483 uint32_t *p = (uint32_t *)buf;
484 len = len >> 2;
485 for (i = 0; i < len; i++)
486 writel_relaxed(p[i], chip->IO_ADDR_W);
487 } else {
488 for (i = 0; i < len; i++)
489 writeb_relaxed(buf[i], chip->IO_ADDR_W);
490 }
491}
492
493
494
495
496
497
498
499static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
500{
501 int i;
502 struct nand_chip *chip = mtd_to_nand(mtd);
503
504 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
505 IS_ALIGNED(len, sizeof(uint32_t))) {
506 uint32_t *p = (uint32_t *)buf;
507 len = len >> 2;
508 for (i = 0; i < len; i++)
509 p[i] = readl_relaxed(chip->IO_ADDR_R);
510 } else {
511 for (i = 0; i < len; i++)
512 buf[i] = readb_relaxed(chip->IO_ADDR_R);
513 }
514}
515
516
517
518
519
520
521
522static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
523{
524 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
525
526 dma_xfer(host, buf, len, DMA_FROM_DEVICE);
527}
528
529
530
531
532
533
534
535static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
536 int len)
537{
538 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
539
540 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
558 uint8_t *buf, int oob_required, int page)
559{
560 int i, j, s, stat, eccsize = chip->ecc.size;
561 int eccbytes = chip->ecc.bytes;
562 int eccsteps = chip->ecc.steps;
563 uint8_t *p = buf;
564 uint8_t *ecc_calc = chip->buffers->ecccalc;
565 uint8_t *ecc_code = chip->buffers->ecccode;
566 int off, len, group = 0;
567
568
569
570
571
572 uint16_t ecc_oob[7];
573 uint8_t *oob = (uint8_t *)&ecc_oob[0];
574 unsigned int max_bitflips = 0;
575
576 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
577 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
578 chip->ecc.hwctl(mtd, NAND_ECC_READ);
579 chip->read_buf(mtd, p, eccsize);
580
581 for (j = 0; j < eccbytes;) {
582 struct mtd_oob_region oobregion;
583 int ret;
584
585 ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
586 if (ret)
587 return ret;
588
589 off = oobregion.offset;
590 len = oobregion.length;
591
592
593
594
595
596
597 if (chip->options & NAND_BUSWIDTH_16)
598 len = roundup(len, 2);
599
600 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
601 chip->read_buf(mtd, oob + j, len);
602 j += len;
603 }
604
605 memcpy(&ecc_code[i], oob, chip->ecc.bytes);
606 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
607
608 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
609 if (stat < 0) {
610 mtd->ecc_stats.failed++;
611 } else {
612 mtd->ecc_stats.corrected += stat;
613 max_bitflips = max_t(unsigned int, max_bitflips, stat);
614 }
615 }
616
617 return max_bitflips;
618}
619
620
621
622
623
624
625
626
627
628
629
630static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
631 uint8_t *read_ecc, uint8_t *calc_ecc)
632{
633 struct nand_chip *chip = mtd_to_nand(mtd);
634 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
635 void __iomem *regs = host->regs_va;
636 unsigned int bank = host->bank;
637 uint32_t err_idx[8];
638 uint32_t num_err, i;
639 uint32_t ecc1, ecc2, ecc3, ecc4;
640
641 num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
642
643
644 if (likely(num_err == 0))
645 return 0;
646
647
648 if (unlikely(num_err > 8)) {
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663 int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
664 int bits_data = count_written_bits(dat, chip->ecc.size, 8);
665
666 if ((bits_ecc + bits_data) <= 8) {
667 if (bits_data)
668 memset(dat, 0xff, chip->ecc.size);
669 return bits_data;
670 }
671
672 return -EBADMSG;
673 }
674
675
676
677
678
679
680
681
682
683
684 ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
685 ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
686 ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
687 ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
688
689 err_idx[0] = (ecc1 >> 0) & 0x1FFF;
690 err_idx[1] = (ecc1 >> 13) & 0x1FFF;
691 err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
692 err_idx[3] = (ecc2 >> 7) & 0x1FFF;
693 err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
694 err_idx[5] = (ecc3 >> 1) & 0x1FFF;
695 err_idx[6] = (ecc3 >> 14) & 0x1FFF;
696 err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
697
698 i = 0;
699 while (num_err--) {
700 change_bit(0, (unsigned long *)&err_idx[i]);
701 change_bit(1, (unsigned long *)&err_idx[i]);
702
703 if (err_idx[i] < chip->ecc.size * 8) {
704 change_bit(err_idx[i], (unsigned long *)dat);
705 i++;
706 }
707 }
708 return i;
709}
710
711static bool filter(struct dma_chan *chan, void *slave)
712{
713 chan->private = slave;
714 return true;
715}
716
717#ifdef CONFIG_OF
718static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
719 struct device_node *np)
720{
721 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
722 u32 val;
723 int ret;
724
725
726 pdata->width = 8;
727 if (!of_property_read_u32(np, "bank-width", &val)) {
728 if (val == 2) {
729 pdata->width = 16;
730 } else if (val != 1) {
731 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
732 return -EINVAL;
733 }
734 }
735 if (of_get_property(np, "nand-skip-bbtscan", NULL))
736 pdata->options = NAND_SKIP_BBTSCAN;
737
738 pdata->nand_timings = devm_kzalloc(&pdev->dev,
739 sizeof(*pdata->nand_timings), GFP_KERNEL);
740 if (!pdata->nand_timings)
741 return -ENOMEM;
742 ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
743 sizeof(*pdata->nand_timings));
744 if (ret) {
745 dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
746 pdata->nand_timings = NULL;
747 }
748
749
750 pdata->bank = 0;
751 if (!of_property_read_u32(np, "bank", &val)) {
752 if (val > 3) {
753 dev_err(&pdev->dev, "invalid bank %u\n", val);
754 return -EINVAL;
755 }
756 pdata->bank = val;
757 }
758 return 0;
759}
760#else
761static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
762 struct device_node *np)
763{
764 return -ENOSYS;
765}
766#endif
767
768
769
770
771
772static int __init fsmc_nand_probe(struct platform_device *pdev)
773{
774 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
775 struct device_node __maybe_unused *np = pdev->dev.of_node;
776 struct fsmc_nand_data *host;
777 struct mtd_info *mtd;
778 struct nand_chip *nand;
779 struct resource *res;
780 dma_cap_mask_t mask;
781 int ret = 0;
782 u32 pid;
783 int i;
784
785 if (np) {
786 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
787 pdev->dev.platform_data = pdata;
788 ret = fsmc_nand_probe_config_dt(pdev, np);
789 if (ret) {
790 dev_err(&pdev->dev, "no platform data\n");
791 return -ENODEV;
792 }
793 }
794
795 if (!pdata) {
796 dev_err(&pdev->dev, "platform data is NULL\n");
797 return -EINVAL;
798 }
799
800
801 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
802 if (!host)
803 return -ENOMEM;
804
805 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
806 host->data_va = devm_ioremap_resource(&pdev->dev, res);
807 if (IS_ERR(host->data_va))
808 return PTR_ERR(host->data_va);
809
810 host->data_pa = (dma_addr_t)res->start;
811
812 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
813 host->addr_va = devm_ioremap_resource(&pdev->dev, res);
814 if (IS_ERR(host->addr_va))
815 return PTR_ERR(host->addr_va);
816
817 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
818 host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
819 if (IS_ERR(host->cmd_va))
820 return PTR_ERR(host->cmd_va);
821
822 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
823 host->regs_va = devm_ioremap_resource(&pdev->dev, res);
824 if (IS_ERR(host->regs_va))
825 return PTR_ERR(host->regs_va);
826
827 host->clk = clk_get(&pdev->dev, NULL);
828 if (IS_ERR(host->clk)) {
829 dev_err(&pdev->dev, "failed to fetch block clock\n");
830 return PTR_ERR(host->clk);
831 }
832
833 ret = clk_prepare_enable(host->clk);
834 if (ret)
835 goto err_clk_prepare_enable;
836
837
838
839
840
841 for (pid = 0, i = 0; i < 4; i++)
842 pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
843 host->pid = pid;
844 dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
845 "revision %02x, config %02x\n",
846 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
847 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
848
849 host->bank = pdata->bank;
850 host->select_chip = pdata->select_bank;
851 host->partitions = pdata->partitions;
852 host->nr_partitions = pdata->nr_partitions;
853 host->dev = &pdev->dev;
854 host->dev_timings = pdata->nand_timings;
855 host->mode = pdata->mode;
856
857 if (host->mode == USE_DMA_ACCESS)
858 init_completion(&host->dma_access_complete);
859
860
861 mtd = nand_to_mtd(&host->nand);
862 nand = &host->nand;
863 nand_set_controller_data(nand, host);
864 nand_set_flash_node(nand, np);
865
866 mtd->dev.parent = &pdev->dev;
867 nand->IO_ADDR_R = host->data_va;
868 nand->IO_ADDR_W = host->data_va;
869 nand->cmd_ctrl = fsmc_cmd_ctrl;
870 nand->chip_delay = 30;
871
872
873
874
875
876 nand->ecc.mode = NAND_ECC_HW;
877 nand->ecc.hwctl = fsmc_enable_hwecc;
878 nand->ecc.size = 512;
879 nand->options = pdata->options;
880 nand->select_chip = fsmc_select_chip;
881 nand->badblockbits = 7;
882 nand_set_flash_node(nand, np);
883
884 if (pdata->width == FSMC_NAND_BW16)
885 nand->options |= NAND_BUSWIDTH_16;
886
887 switch (host->mode) {
888 case USE_DMA_ACCESS:
889 dma_cap_zero(mask);
890 dma_cap_set(DMA_MEMCPY, mask);
891 host->read_dma_chan = dma_request_channel(mask, filter,
892 pdata->read_dma_priv);
893 if (!host->read_dma_chan) {
894 dev_err(&pdev->dev, "Unable to get read dma channel\n");
895 goto err_req_read_chnl;
896 }
897 host->write_dma_chan = dma_request_channel(mask, filter,
898 pdata->write_dma_priv);
899 if (!host->write_dma_chan) {
900 dev_err(&pdev->dev, "Unable to get write dma channel\n");
901 goto err_req_write_chnl;
902 }
903 nand->read_buf = fsmc_read_buf_dma;
904 nand->write_buf = fsmc_write_buf_dma;
905 break;
906
907 default:
908 case USE_WORD_ACCESS:
909 nand->read_buf = fsmc_read_buf;
910 nand->write_buf = fsmc_write_buf;
911 break;
912 }
913
914 fsmc_nand_setup(host->regs_va, host->bank,
915 nand->options & NAND_BUSWIDTH_16,
916 host->dev_timings);
917
918 if (AMBA_REV_BITS(host->pid) >= 8) {
919 nand->ecc.read_page = fsmc_read_page_hwecc;
920 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
921 nand->ecc.correct = fsmc_bch8_correct_data;
922 nand->ecc.bytes = 13;
923 nand->ecc.strength = 8;
924 }
925
926
927
928
929 if (nand_scan_ident(mtd, 1, NULL)) {
930 ret = -ENXIO;
931 dev_err(&pdev->dev, "No NAND Device found!\n");
932 goto err_scan_ident;
933 }
934
935 if (AMBA_REV_BITS(host->pid) >= 8) {
936 switch (mtd->oobsize) {
937 case 16:
938 case 64:
939 case 128:
940 case 224:
941 case 256:
942 break;
943 default:
944 dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
945 mtd->oobsize);
946 ret = -EINVAL;
947 goto err_probe;
948 }
949
950 mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
951 } else {
952 switch (nand->ecc.mode) {
953 case NAND_ECC_HW:
954 dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
955 nand->ecc.calculate = fsmc_read_hwecc_ecc1;
956 nand->ecc.correct = nand_correct_data;
957 nand->ecc.bytes = 3;
958 nand->ecc.strength = 1;
959 break;
960
961 case NAND_ECC_SOFT:
962 if (nand->ecc.algo == NAND_ECC_BCH) {
963 dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
964 break;
965 }
966
967 default:
968 dev_err(&pdev->dev, "Unsupported ECC mode!\n");
969 goto err_probe;
970 }
971
972
973
974
975
976 if (nand->ecc.mode == NAND_ECC_HW) {
977 switch (mtd->oobsize) {
978 case 16:
979 case 64:
980 case 128:
981 mtd_set_ooblayout(mtd,
982 &fsmc_ecc1_ooblayout_ops);
983 break;
984 default:
985 dev_warn(&pdev->dev,
986 "No oob scheme defined for oobsize %d\n",
987 mtd->oobsize);
988 ret = -EINVAL;
989 goto err_probe;
990 }
991 }
992 }
993
994
995 if (nand_scan_tail(mtd)) {
996 ret = -ENXIO;
997 goto err_probe;
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 mtd->name = "nand";
1011 ret = mtd_device_register(mtd, host->partitions, host->nr_partitions);
1012 if (ret)
1013 goto err_probe;
1014
1015 platform_set_drvdata(pdev, host);
1016 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
1017 return 0;
1018
1019err_probe:
1020err_scan_ident:
1021 if (host->mode == USE_DMA_ACCESS)
1022 dma_release_channel(host->write_dma_chan);
1023err_req_write_chnl:
1024 if (host->mode == USE_DMA_ACCESS)
1025 dma_release_channel(host->read_dma_chan);
1026err_req_read_chnl:
1027 clk_disable_unprepare(host->clk);
1028err_clk_prepare_enable:
1029 clk_put(host->clk);
1030 return ret;
1031}
1032
1033
1034
1035
1036static int fsmc_nand_remove(struct platform_device *pdev)
1037{
1038 struct fsmc_nand_data *host = platform_get_drvdata(pdev);
1039
1040 if (host) {
1041 nand_release(nand_to_mtd(&host->nand));
1042
1043 if (host->mode == USE_DMA_ACCESS) {
1044 dma_release_channel(host->write_dma_chan);
1045 dma_release_channel(host->read_dma_chan);
1046 }
1047 clk_disable_unprepare(host->clk);
1048 clk_put(host->clk);
1049 }
1050
1051 return 0;
1052}
1053
1054#ifdef CONFIG_PM_SLEEP
1055static int fsmc_nand_suspend(struct device *dev)
1056{
1057 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1058 if (host)
1059 clk_disable_unprepare(host->clk);
1060 return 0;
1061}
1062
1063static int fsmc_nand_resume(struct device *dev)
1064{
1065 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1066 if (host) {
1067 clk_prepare_enable(host->clk);
1068 fsmc_nand_setup(host->regs_va, host->bank,
1069 host->nand.options & NAND_BUSWIDTH_16,
1070 host->dev_timings);
1071 }
1072 return 0;
1073}
1074#endif
1075
1076static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
1077
1078#ifdef CONFIG_OF
1079static const struct of_device_id fsmc_nand_id_table[] = {
1080 { .compatible = "st,spear600-fsmc-nand" },
1081 { .compatible = "stericsson,fsmc-nand" },
1082 {}
1083};
1084MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
1085#endif
1086
1087static struct platform_driver fsmc_nand_driver = {
1088 .remove = fsmc_nand_remove,
1089 .driver = {
1090 .name = "fsmc-nand",
1091 .of_match_table = of_match_ptr(fsmc_nand_id_table),
1092 .pm = &fsmc_nand_pm_ops,
1093 },
1094};
1095
1096module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
1097
1098MODULE_LICENSE("GPL");
1099MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
1100MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
1101