1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/dmaengine.h>
22#include <linux/dma-direction.h>
23#include <linux/dma-mapping.h>
24#include <linux/err.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/resource.h>
28#include <linux/sched.h>
29#include <linux/types.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/rawnand.h>
32#include <linux/mtd/nand_ecc.h>
33#include <linux/platform_device.h>
34#include <linux/of.h>
35#include <linux/mtd/partitions.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/amba/bus.h>
39#include <mtd/mtd-abi.h>
40
41
42#define CTRL 0x0
43
44 #define BANK_ENABLE (1 << 0)
45 #define MUXED (1 << 1)
46 #define NOR_DEV (2 << 2)
47 #define WIDTH_8 (0 << 4)
48 #define WIDTH_16 (1 << 4)
49 #define RSTPWRDWN (1 << 6)
50 #define WPROT (1 << 7)
51 #define WRT_ENABLE (1 << 12)
52 #define WAIT_ENB (1 << 13)
53
54#define CTRL_TIM 0x4
55
56
57#define FSMC_NOR_BANK_SZ 0x8
58#define FSMC_NOR_REG_SIZE 0x40
59
60#define FSMC_NOR_REG(base, bank, reg) (base + \
61 FSMC_NOR_BANK_SZ * (bank) + \
62 reg)
63
64
65#define PC 0x00
66
67 #define FSMC_RESET (1 << 0)
68 #define FSMC_WAITON (1 << 1)
69 #define FSMC_ENABLE (1 << 2)
70 #define FSMC_DEVTYPE_NAND (1 << 3)
71 #define FSMC_DEVWID_8 (0 << 4)
72 #define FSMC_DEVWID_16 (1 << 4)
73 #define FSMC_ECCEN (1 << 6)
74 #define FSMC_ECCPLEN_512 (0 << 7)
75 #define FSMC_ECCPLEN_256 (1 << 7)
76 #define FSMC_TCLR_1 (1)
77 #define FSMC_TCLR_SHIFT (9)
78 #define FSMC_TCLR_MASK (0xF)
79 #define FSMC_TAR_1 (1)
80 #define FSMC_TAR_SHIFT (13)
81 #define FSMC_TAR_MASK (0xF)
82#define STS 0x04
83
84 #define FSMC_CODE_RDY (1 << 15)
85#define COMM 0x08
86
87 #define FSMC_TSET_0 0
88 #define FSMC_TSET_SHIFT 0
89 #define FSMC_TSET_MASK 0xFF
90 #define FSMC_TWAIT_6 6
91 #define FSMC_TWAIT_SHIFT 8
92 #define FSMC_TWAIT_MASK 0xFF
93 #define FSMC_THOLD_4 4
94 #define FSMC_THOLD_SHIFT 16
95 #define FSMC_THOLD_MASK 0xFF
96 #define FSMC_THIZ_1 1
97 #define FSMC_THIZ_SHIFT 24
98 #define FSMC_THIZ_MASK 0xFF
99#define ATTRIB 0x0C
100#define IOATA 0x10
101#define ECC1 0x14
102#define ECC2 0x18
103#define ECC3 0x1C
104#define FSMC_NAND_BANK_SZ 0x20
105
106#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
107
108struct fsmc_nand_timings {
109 uint8_t tclr;
110 uint8_t tar;
111 uint8_t thiz;
112 uint8_t thold;
113 uint8_t twait;
114 uint8_t tset;
115};
116
117enum access_mode {
118 USE_DMA_ACCESS = 1,
119 USE_WORD_ACCESS,
120};
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144struct fsmc_nand_data {
145 u32 pid;
146 struct nand_chip nand;
147
148 unsigned int bank;
149 struct device *dev;
150 enum access_mode mode;
151 struct clk *clk;
152
153
154 struct dma_chan *read_dma_chan;
155 struct dma_chan *write_dma_chan;
156 struct completion dma_access_complete;
157
158 struct fsmc_nand_timings *dev_timings;
159
160 dma_addr_t data_pa;
161 void __iomem *data_va;
162 void __iomem *cmd_va;
163 void __iomem *addr_va;
164 void __iomem *regs_va;
165};
166
167static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
168 struct mtd_oob_region *oobregion)
169{
170 struct nand_chip *chip = mtd_to_nand(mtd);
171
172 if (section >= chip->ecc.steps)
173 return -ERANGE;
174
175 oobregion->offset = (section * 16) + 2;
176 oobregion->length = 3;
177
178 return 0;
179}
180
181static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
182 struct mtd_oob_region *oobregion)
183{
184 struct nand_chip *chip = mtd_to_nand(mtd);
185
186 if (section >= chip->ecc.steps)
187 return -ERANGE;
188
189 oobregion->offset = (section * 16) + 8;
190
191 if (section < chip->ecc.steps - 1)
192 oobregion->length = 8;
193 else
194 oobregion->length = mtd->oobsize - oobregion->offset;
195
196 return 0;
197}
198
199static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
200 .ecc = fsmc_ecc1_ooblayout_ecc,
201 .free = fsmc_ecc1_ooblayout_free,
202};
203
204
205
206
207
208
209
210static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
211 struct mtd_oob_region *oobregion)
212{
213 struct nand_chip *chip = mtd_to_nand(mtd);
214
215 if (section >= chip->ecc.steps)
216 return -ERANGE;
217
218 oobregion->length = chip->ecc.bytes;
219
220 if (!section && mtd->writesize <= 512)
221 oobregion->offset = 0;
222 else
223 oobregion->offset = (section * 16) + 2;
224
225 return 0;
226}
227
228static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
229 struct mtd_oob_region *oobregion)
230{
231 struct nand_chip *chip = mtd_to_nand(mtd);
232
233 if (section >= chip->ecc.steps)
234 return -ERANGE;
235
236 oobregion->offset = (section * 16) + 15;
237
238 if (section < chip->ecc.steps - 1)
239 oobregion->length = 3;
240 else
241 oobregion->length = mtd->oobsize - oobregion->offset;
242
243 return 0;
244}
245
246static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
247 .ecc = fsmc_ecc4_ooblayout_ecc,
248 .free = fsmc_ecc4_ooblayout_free,
249};
250
251static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
252{
253 return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
254}
255
256
257
258
259
260
261
262static void fsmc_nand_setup(struct fsmc_nand_data *host,
263 struct fsmc_nand_timings *tims)
264{
265 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
266 uint32_t tclr, tar, thiz, thold, twait, tset;
267
268 tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
269 tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
270 thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
271 thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
272 twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
273 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
274
275 if (host->nand.options & NAND_BUSWIDTH_16)
276 writel_relaxed(value | FSMC_DEVWID_16, host->regs_va + PC);
277 else
278 writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + PC);
279
280 writel_relaxed(readl(host->regs_va + PC) | tclr | tar,
281 host->regs_va + PC);
282 writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
283 writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
284}
285
286static int fsmc_calc_timings(struct fsmc_nand_data *host,
287 const struct nand_sdr_timings *sdrt,
288 struct fsmc_nand_timings *tims)
289{
290 unsigned long hclk = clk_get_rate(host->clk);
291 unsigned long hclkn = NSEC_PER_SEC / hclk;
292 uint32_t thiz, thold, twait, tset;
293
294 if (sdrt->tRC_min < 30000)
295 return -EOPNOTSUPP;
296
297 tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1;
298 if (tims->tar > FSMC_TAR_MASK)
299 tims->tar = FSMC_TAR_MASK;
300 tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1;
301 if (tims->tclr > FSMC_TCLR_MASK)
302 tims->tclr = FSMC_TCLR_MASK;
303
304 thiz = sdrt->tCS_min - sdrt->tWP_min;
305 tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn);
306
307 thold = sdrt->tDH_min;
308 if (thold < sdrt->tCH_min)
309 thold = sdrt->tCH_min;
310 if (thold < sdrt->tCLH_min)
311 thold = sdrt->tCLH_min;
312 if (thold < sdrt->tWH_min)
313 thold = sdrt->tWH_min;
314 if (thold < sdrt->tALH_min)
315 thold = sdrt->tALH_min;
316 if (thold < sdrt->tREH_min)
317 thold = sdrt->tREH_min;
318 tims->thold = DIV_ROUND_UP(thold / 1000, hclkn);
319 if (tims->thold == 0)
320 tims->thold = 1;
321 else if (tims->thold > FSMC_THOLD_MASK)
322 tims->thold = FSMC_THOLD_MASK;
323
324 twait = max(sdrt->tRP_min, sdrt->tWP_min);
325 tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
326 if (tims->twait == 0)
327 tims->twait = 1;
328 else if (tims->twait > FSMC_TWAIT_MASK)
329 tims->twait = FSMC_TWAIT_MASK;
330
331 tset = max(sdrt->tCS_min - sdrt->tWP_min,
332 sdrt->tCEA_max - sdrt->tREA_max);
333 tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
334 if (tims->tset == 0)
335 tims->tset = 1;
336 else if (tims->tset > FSMC_TSET_MASK)
337 tims->tset = FSMC_TSET_MASK;
338
339 return 0;
340}
341
342static int fsmc_setup_data_interface(struct mtd_info *mtd, int csline,
343 const struct nand_data_interface *conf)
344{
345 struct nand_chip *nand = mtd_to_nand(mtd);
346 struct fsmc_nand_data *host = nand_get_controller_data(nand);
347 struct fsmc_nand_timings tims;
348 const struct nand_sdr_timings *sdrt;
349 int ret;
350
351 sdrt = nand_get_sdr_timings(conf);
352 if (IS_ERR(sdrt))
353 return PTR_ERR(sdrt);
354
355 ret = fsmc_calc_timings(host, sdrt, &tims);
356 if (ret)
357 return ret;
358
359 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
360 return 0;
361
362 fsmc_nand_setup(host, &tims);
363
364 return 0;
365}
366
367
368
369
370static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
371{
372 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
373
374 writel_relaxed(readl(host->regs_va + PC) & ~FSMC_ECCPLEN_256,
375 host->regs_va + PC);
376 writel_relaxed(readl(host->regs_va + PC) & ~FSMC_ECCEN,
377 host->regs_va + PC);
378 writel_relaxed(readl(host->regs_va + PC) | FSMC_ECCEN,
379 host->regs_va + PC);
380}
381
382
383
384
385
386
387static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
388 uint8_t *ecc)
389{
390 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
391 uint32_t ecc_tmp;
392 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
393
394 do {
395 if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
396 break;
397 else
398 cond_resched();
399 } while (!time_after_eq(jiffies, deadline));
400
401 if (time_after_eq(jiffies, deadline)) {
402 dev_err(host->dev, "calculate ecc timed out\n");
403 return -ETIMEDOUT;
404 }
405
406 ecc_tmp = readl_relaxed(host->regs_va + ECC1);
407 ecc[0] = (uint8_t) (ecc_tmp >> 0);
408 ecc[1] = (uint8_t) (ecc_tmp >> 8);
409 ecc[2] = (uint8_t) (ecc_tmp >> 16);
410 ecc[3] = (uint8_t) (ecc_tmp >> 24);
411
412 ecc_tmp = readl_relaxed(host->regs_va + ECC2);
413 ecc[4] = (uint8_t) (ecc_tmp >> 0);
414 ecc[5] = (uint8_t) (ecc_tmp >> 8);
415 ecc[6] = (uint8_t) (ecc_tmp >> 16);
416 ecc[7] = (uint8_t) (ecc_tmp >> 24);
417
418 ecc_tmp = readl_relaxed(host->regs_va + ECC3);
419 ecc[8] = (uint8_t) (ecc_tmp >> 0);
420 ecc[9] = (uint8_t) (ecc_tmp >> 8);
421 ecc[10] = (uint8_t) (ecc_tmp >> 16);
422 ecc[11] = (uint8_t) (ecc_tmp >> 24);
423
424 ecc_tmp = readl_relaxed(host->regs_va + STS);
425 ecc[12] = (uint8_t) (ecc_tmp >> 16);
426
427 return 0;
428}
429
430
431
432
433
434
435static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
436 uint8_t *ecc)
437{
438 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
439 uint32_t ecc_tmp;
440
441 ecc_tmp = readl_relaxed(host->regs_va + ECC1);
442 ecc[0] = (uint8_t) (ecc_tmp >> 0);
443 ecc[1] = (uint8_t) (ecc_tmp >> 8);
444 ecc[2] = (uint8_t) (ecc_tmp >> 16);
445
446 return 0;
447}
448
449
450static int count_written_bits(uint8_t *buff, int size, int max_bits)
451{
452 int k, written_bits = 0;
453
454 for (k = 0; k < size; k++) {
455 written_bits += hweight8(~buff[k]);
456 if (written_bits > max_bits)
457 break;
458 }
459
460 return written_bits;
461}
462
463static void dma_complete(void *param)
464{
465 struct fsmc_nand_data *host = param;
466
467 complete(&host->dma_access_complete);
468}
469
470static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
471 enum dma_data_direction direction)
472{
473 struct dma_chan *chan;
474 struct dma_device *dma_dev;
475 struct dma_async_tx_descriptor *tx;
476 dma_addr_t dma_dst, dma_src, dma_addr;
477 dma_cookie_t cookie;
478 unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
479 int ret;
480 unsigned long time_left;
481
482 if (direction == DMA_TO_DEVICE)
483 chan = host->write_dma_chan;
484 else if (direction == DMA_FROM_DEVICE)
485 chan = host->read_dma_chan;
486 else
487 return -EINVAL;
488
489 dma_dev = chan->device;
490 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
491
492 if (direction == DMA_TO_DEVICE) {
493 dma_src = dma_addr;
494 dma_dst = host->data_pa;
495 } else {
496 dma_src = host->data_pa;
497 dma_dst = dma_addr;
498 }
499
500 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
501 len, flags);
502 if (!tx) {
503 dev_err(host->dev, "device_prep_dma_memcpy error\n");
504 ret = -EIO;
505 goto unmap_dma;
506 }
507
508 tx->callback = dma_complete;
509 tx->callback_param = host;
510 cookie = tx->tx_submit(tx);
511
512 ret = dma_submit_error(cookie);
513 if (ret) {
514 dev_err(host->dev, "dma_submit_error %d\n", cookie);
515 goto unmap_dma;
516 }
517
518 dma_async_issue_pending(chan);
519
520 time_left =
521 wait_for_completion_timeout(&host->dma_access_complete,
522 msecs_to_jiffies(3000));
523 if (time_left == 0) {
524 dmaengine_terminate_all(chan);
525 dev_err(host->dev, "wait_for_completion_timeout\n");
526 ret = -ETIMEDOUT;
527 goto unmap_dma;
528 }
529
530 ret = 0;
531
532unmap_dma:
533 dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
534
535 return ret;
536}
537
538
539
540
541
542
543
544static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
545{
546 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
547 int i;
548
549 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
550 IS_ALIGNED(len, sizeof(uint32_t))) {
551 uint32_t *p = (uint32_t *)buf;
552 len = len >> 2;
553 for (i = 0; i < len; i++)
554 writel_relaxed(p[i], host->data_va);
555 } else {
556 for (i = 0; i < len; i++)
557 writeb_relaxed(buf[i], host->data_va);
558 }
559}
560
561
562
563
564
565
566
567static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
568{
569 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
570 int i;
571
572 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
573 IS_ALIGNED(len, sizeof(uint32_t))) {
574 uint32_t *p = (uint32_t *)buf;
575 len = len >> 2;
576 for (i = 0; i < len; i++)
577 p[i] = readl_relaxed(host->data_va);
578 } else {
579 for (i = 0; i < len; i++)
580 buf[i] = readb_relaxed(host->data_va);
581 }
582}
583
584
585
586
587
588
589
590static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
591{
592 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
593
594 dma_xfer(host, buf, len, DMA_FROM_DEVICE);
595}
596
597
598
599
600
601
602
603static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
604 int len)
605{
606 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
607
608 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
609}
610
611
612static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
613{
614 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
615 u32 pc;
616
617
618 if (chipnr > 0)
619 return;
620
621 pc = readl(host->regs_va + PC);
622 if (chipnr < 0)
623 writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + PC);
624 else
625 writel_relaxed(pc | FSMC_ENABLE, host->regs_va + PC);
626
627
628 mb();
629}
630
631
632
633
634
635
636
637static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
638 bool check_only)
639{
640 struct mtd_info *mtd = nand_to_mtd(chip);
641 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
642 const struct nand_op_instr *instr = NULL;
643 int ret = 0;
644 unsigned int op_id;
645 int i;
646
647 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
648 for (op_id = 0; op_id < op->ninstrs; op_id++) {
649 instr = &op->instrs[op_id];
650
651 switch (instr->type) {
652 case NAND_OP_CMD_INSTR:
653 pr_debug(" ->CMD [0x%02x]\n",
654 instr->ctx.cmd.opcode);
655
656 writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
657 break;
658
659 case NAND_OP_ADDR_INSTR:
660 pr_debug(" ->ADDR [%d cyc]",
661 instr->ctx.addr.naddrs);
662
663 for (i = 0; i < instr->ctx.addr.naddrs; i++)
664 writeb_relaxed(instr->ctx.addr.addrs[i],
665 host->addr_va);
666 break;
667
668 case NAND_OP_DATA_IN_INSTR:
669 pr_debug(" ->DATA_IN [%d B%s]\n", instr->ctx.data.len,
670 instr->ctx.data.force_8bit ?
671 ", force 8-bit" : "");
672
673 if (host->mode == USE_DMA_ACCESS)
674 fsmc_read_buf_dma(mtd, instr->ctx.data.buf.in,
675 instr->ctx.data.len);
676 else
677 fsmc_read_buf(mtd, instr->ctx.data.buf.in,
678 instr->ctx.data.len);
679 break;
680
681 case NAND_OP_DATA_OUT_INSTR:
682 pr_debug(" ->DATA_OUT [%d B%s]\n", instr->ctx.data.len,
683 instr->ctx.data.force_8bit ?
684 ", force 8-bit" : "");
685
686 if (host->mode == USE_DMA_ACCESS)
687 fsmc_write_buf_dma(mtd, instr->ctx.data.buf.out,
688 instr->ctx.data.len);
689 else
690 fsmc_write_buf(mtd, instr->ctx.data.buf.out,
691 instr->ctx.data.len);
692 break;
693
694 case NAND_OP_WAITRDY_INSTR:
695 pr_debug(" ->WAITRDY [max %d ms]\n",
696 instr->ctx.waitrdy.timeout_ms);
697
698 ret = nand_soft_waitrdy(chip,
699 instr->ctx.waitrdy.timeout_ms);
700 break;
701 }
702 }
703
704 return ret;
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
722 uint8_t *buf, int oob_required, int page)
723{
724 int i, j, s, stat, eccsize = chip->ecc.size;
725 int eccbytes = chip->ecc.bytes;
726 int eccsteps = chip->ecc.steps;
727 uint8_t *p = buf;
728 uint8_t *ecc_calc = chip->ecc.calc_buf;
729 uint8_t *ecc_code = chip->ecc.code_buf;
730 int off, len, group = 0;
731
732
733
734
735
736 uint16_t ecc_oob[7];
737 uint8_t *oob = (uint8_t *)&ecc_oob[0];
738 unsigned int max_bitflips = 0;
739
740 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
741 nand_read_page_op(chip, page, s * eccsize, NULL, 0);
742 chip->ecc.hwctl(mtd, NAND_ECC_READ);
743 chip->read_buf(mtd, p, eccsize);
744
745 for (j = 0; j < eccbytes;) {
746 struct mtd_oob_region oobregion;
747 int ret;
748
749 ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
750 if (ret)
751 return ret;
752
753 off = oobregion.offset;
754 len = oobregion.length;
755
756
757
758
759
760
761 if (chip->options & NAND_BUSWIDTH_16)
762 len = roundup(len, 2);
763
764 nand_read_oob_op(chip, page, off, oob + j, len);
765 j += len;
766 }
767
768 memcpy(&ecc_code[i], oob, chip->ecc.bytes);
769 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
770
771 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
772 if (stat < 0) {
773 mtd->ecc_stats.failed++;
774 } else {
775 mtd->ecc_stats.corrected += stat;
776 max_bitflips = max_t(unsigned int, max_bitflips, stat);
777 }
778 }
779
780 return max_bitflips;
781}
782
783
784
785
786
787
788
789
790
791
792
793static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
794 uint8_t *read_ecc, uint8_t *calc_ecc)
795{
796 struct nand_chip *chip = mtd_to_nand(mtd);
797 struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
798 uint32_t err_idx[8];
799 uint32_t num_err, i;
800 uint32_t ecc1, ecc2, ecc3, ecc4;
801
802 num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
803
804
805 if (likely(num_err == 0))
806 return 0;
807
808
809 if (unlikely(num_err > 8)) {
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824 int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
825 int bits_data = count_written_bits(dat, chip->ecc.size, 8);
826
827 if ((bits_ecc + bits_data) <= 8) {
828 if (bits_data)
829 memset(dat, 0xff, chip->ecc.size);
830 return bits_data;
831 }
832
833 return -EBADMSG;
834 }
835
836
837
838
839
840
841
842
843
844
845 ecc1 = readl_relaxed(host->regs_va + ECC1);
846 ecc2 = readl_relaxed(host->regs_va + ECC2);
847 ecc3 = readl_relaxed(host->regs_va + ECC3);
848 ecc4 = readl_relaxed(host->regs_va + STS);
849
850 err_idx[0] = (ecc1 >> 0) & 0x1FFF;
851 err_idx[1] = (ecc1 >> 13) & 0x1FFF;
852 err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
853 err_idx[3] = (ecc2 >> 7) & 0x1FFF;
854 err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
855 err_idx[5] = (ecc3 >> 1) & 0x1FFF;
856 err_idx[6] = (ecc3 >> 14) & 0x1FFF;
857 err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
858
859 i = 0;
860 while (num_err--) {
861 change_bit(0, (unsigned long *)&err_idx[i]);
862 change_bit(1, (unsigned long *)&err_idx[i]);
863
864 if (err_idx[i] < chip->ecc.size * 8) {
865 change_bit(err_idx[i], (unsigned long *)dat);
866 i++;
867 }
868 }
869 return i;
870}
871
872static bool filter(struct dma_chan *chan, void *slave)
873{
874 chan->private = slave;
875 return true;
876}
877
878static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
879 struct fsmc_nand_data *host,
880 struct nand_chip *nand)
881{
882 struct device_node *np = pdev->dev.of_node;
883 u32 val;
884 int ret;
885
886 nand->options = 0;
887
888 if (!of_property_read_u32(np, "bank-width", &val)) {
889 if (val == 2) {
890 nand->options |= NAND_BUSWIDTH_16;
891 } else if (val != 1) {
892 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
893 return -EINVAL;
894 }
895 }
896
897 if (of_get_property(np, "nand-skip-bbtscan", NULL))
898 nand->options |= NAND_SKIP_BBTSCAN;
899
900 host->dev_timings = devm_kzalloc(&pdev->dev,
901 sizeof(*host->dev_timings), GFP_KERNEL);
902 if (!host->dev_timings)
903 return -ENOMEM;
904 ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
905 sizeof(*host->dev_timings));
906 if (ret)
907 host->dev_timings = NULL;
908
909
910 host->bank = 0;
911 if (!of_property_read_u32(np, "bank", &val)) {
912 if (val > 3) {
913 dev_err(&pdev->dev, "invalid bank %u\n", val);
914 return -EINVAL;
915 }
916 host->bank = val;
917 }
918 return 0;
919}
920
921
922
923
924
925static int __init fsmc_nand_probe(struct platform_device *pdev)
926{
927 struct fsmc_nand_data *host;
928 struct mtd_info *mtd;
929 struct nand_chip *nand;
930 struct resource *res;
931 void __iomem *base;
932 dma_cap_mask_t mask;
933 int ret = 0;
934 u32 pid;
935 int i;
936
937
938 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
939 if (!host)
940 return -ENOMEM;
941
942 nand = &host->nand;
943
944 ret = fsmc_nand_probe_config_dt(pdev, host, nand);
945 if (ret)
946 return ret;
947
948 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
949 host->data_va = devm_ioremap_resource(&pdev->dev, res);
950 if (IS_ERR(host->data_va))
951 return PTR_ERR(host->data_va);
952
953 host->data_pa = (dma_addr_t)res->start;
954
955 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
956 host->addr_va = devm_ioremap_resource(&pdev->dev, res);
957 if (IS_ERR(host->addr_va))
958 return PTR_ERR(host->addr_va);
959
960 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
961 host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
962 if (IS_ERR(host->cmd_va))
963 return PTR_ERR(host->cmd_va);
964
965 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
966 base = devm_ioremap_resource(&pdev->dev, res);
967 if (IS_ERR(base))
968 return PTR_ERR(base);
969
970 host->regs_va = base + FSMC_NOR_REG_SIZE +
971 (host->bank * FSMC_NAND_BANK_SZ);
972
973 host->clk = devm_clk_get(&pdev->dev, NULL);
974 if (IS_ERR(host->clk)) {
975 dev_err(&pdev->dev, "failed to fetch block clock\n");
976 return PTR_ERR(host->clk);
977 }
978
979 ret = clk_prepare_enable(host->clk);
980 if (ret)
981 return ret;
982
983
984
985
986
987 for (pid = 0, i = 0; i < 4; i++)
988 pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
989 host->pid = pid;
990 dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
991 "revision %02x, config %02x\n",
992 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
993 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
994
995 host->dev = &pdev->dev;
996
997 if (host->mode == USE_DMA_ACCESS)
998 init_completion(&host->dma_access_complete);
999
1000
1001 mtd = nand_to_mtd(&host->nand);
1002 nand_set_controller_data(nand, host);
1003 nand_set_flash_node(nand, pdev->dev.of_node);
1004
1005 mtd->dev.parent = &pdev->dev;
1006 nand->exec_op = fsmc_exec_op;
1007 nand->select_chip = fsmc_select_chip;
1008 nand->chip_delay = 30;
1009
1010
1011
1012
1013
1014 nand->ecc.mode = NAND_ECC_HW;
1015 nand->ecc.hwctl = fsmc_enable_hwecc;
1016 nand->ecc.size = 512;
1017 nand->badblockbits = 7;
1018
1019 if (host->mode == USE_DMA_ACCESS) {
1020 dma_cap_zero(mask);
1021 dma_cap_set(DMA_MEMCPY, mask);
1022 host->read_dma_chan = dma_request_channel(mask, filter, NULL);
1023 if (!host->read_dma_chan) {
1024 dev_err(&pdev->dev, "Unable to get read dma channel\n");
1025 goto disable_clk;
1026 }
1027 host->write_dma_chan = dma_request_channel(mask, filter, NULL);
1028 if (!host->write_dma_chan) {
1029 dev_err(&pdev->dev, "Unable to get write dma channel\n");
1030 goto release_dma_read_chan;
1031 }
1032 }
1033
1034 if (host->dev_timings)
1035 fsmc_nand_setup(host, host->dev_timings);
1036 else
1037 nand->setup_data_interface = fsmc_setup_data_interface;
1038
1039 if (AMBA_REV_BITS(host->pid) >= 8) {
1040 nand->ecc.read_page = fsmc_read_page_hwecc;
1041 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
1042 nand->ecc.correct = fsmc_bch8_correct_data;
1043 nand->ecc.bytes = 13;
1044 nand->ecc.strength = 8;
1045 }
1046
1047
1048
1049
1050 ret = nand_scan_ident(mtd, 1, NULL);
1051 if (ret) {
1052 dev_err(&pdev->dev, "No NAND Device found!\n");
1053 goto release_dma_write_chan;
1054 }
1055
1056 if (AMBA_REV_BITS(host->pid) >= 8) {
1057 switch (mtd->oobsize) {
1058 case 16:
1059 case 64:
1060 case 128:
1061 case 224:
1062 case 256:
1063 break;
1064 default:
1065 dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
1066 mtd->oobsize);
1067 ret = -EINVAL;
1068 goto release_dma_write_chan;
1069 }
1070
1071 mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
1072 } else {
1073 switch (nand->ecc.mode) {
1074 case NAND_ECC_HW:
1075 dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
1076 nand->ecc.calculate = fsmc_read_hwecc_ecc1;
1077 nand->ecc.correct = nand_correct_data;
1078 nand->ecc.bytes = 3;
1079 nand->ecc.strength = 1;
1080 break;
1081
1082 case NAND_ECC_SOFT:
1083 if (nand->ecc.algo == NAND_ECC_BCH) {
1084 dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
1085 break;
1086 }
1087
1088 case NAND_ECC_ON_DIE:
1089 break;
1090
1091 default:
1092 dev_err(&pdev->dev, "Unsupported ECC mode!\n");
1093 goto release_dma_write_chan;
1094 }
1095
1096
1097
1098
1099
1100 if (nand->ecc.mode == NAND_ECC_HW) {
1101 switch (mtd->oobsize) {
1102 case 16:
1103 case 64:
1104 case 128:
1105 mtd_set_ooblayout(mtd,
1106 &fsmc_ecc1_ooblayout_ops);
1107 break;
1108 default:
1109 dev_warn(&pdev->dev,
1110 "No oob scheme defined for oobsize %d\n",
1111 mtd->oobsize);
1112 ret = -EINVAL;
1113 goto release_dma_write_chan;
1114 }
1115 }
1116 }
1117
1118
1119 ret = nand_scan_tail(mtd);
1120 if (ret)
1121 goto release_dma_write_chan;
1122
1123 mtd->name = "nand";
1124 ret = mtd_device_register(mtd, NULL, 0);
1125 if (ret)
1126 goto cleanup_nand;
1127
1128 platform_set_drvdata(pdev, host);
1129 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
1130
1131 return 0;
1132
1133cleanup_nand:
1134 nand_cleanup(nand);
1135release_dma_write_chan:
1136 if (host->mode == USE_DMA_ACCESS)
1137 dma_release_channel(host->write_dma_chan);
1138release_dma_read_chan:
1139 if (host->mode == USE_DMA_ACCESS)
1140 dma_release_channel(host->read_dma_chan);
1141disable_clk:
1142 clk_disable_unprepare(host->clk);
1143
1144 return ret;
1145}
1146
1147
1148
1149
1150static int fsmc_nand_remove(struct platform_device *pdev)
1151{
1152 struct fsmc_nand_data *host = platform_get_drvdata(pdev);
1153
1154 if (host) {
1155 nand_release(nand_to_mtd(&host->nand));
1156
1157 if (host->mode == USE_DMA_ACCESS) {
1158 dma_release_channel(host->write_dma_chan);
1159 dma_release_channel(host->read_dma_chan);
1160 }
1161 clk_disable_unprepare(host->clk);
1162 }
1163
1164 return 0;
1165}
1166
1167#ifdef CONFIG_PM_SLEEP
1168static int fsmc_nand_suspend(struct device *dev)
1169{
1170 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1171 if (host)
1172 clk_disable_unprepare(host->clk);
1173 return 0;
1174}
1175
1176static int fsmc_nand_resume(struct device *dev)
1177{
1178 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1179 if (host) {
1180 clk_prepare_enable(host->clk);
1181 if (host->dev_timings)
1182 fsmc_nand_setup(host, host->dev_timings);
1183 }
1184 return 0;
1185}
1186#endif
1187
1188static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
1189
1190static const struct of_device_id fsmc_nand_id_table[] = {
1191 { .compatible = "st,spear600-fsmc-nand" },
1192 { .compatible = "stericsson,fsmc-nand" },
1193 {}
1194};
1195MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
1196
1197static struct platform_driver fsmc_nand_driver = {
1198 .remove = fsmc_nand_remove,
1199 .driver = {
1200 .name = "fsmc-nand",
1201 .of_match_table = fsmc_nand_id_table,
1202 .pm = &fsmc_nand_pm_ops,
1203 },
1204};
1205
1206module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
1207
1208MODULE_LICENSE("GPL");
1209MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
1210MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
1211