1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/completion.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/sh_dma.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/rawnand.h>
29#include <linux/mtd/partitions.h>
30#include <linux/mtd/sh_flctl.h>
31
32static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
33 struct mtd_oob_region *oobregion)
34{
35 struct nand_chip *chip = mtd_to_nand(mtd);
36
37 if (section)
38 return -ERANGE;
39
40 oobregion->offset = 0;
41 oobregion->length = chip->ecc.bytes;
42
43 return 0;
44}
45
46static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
47 struct mtd_oob_region *oobregion)
48{
49 if (section)
50 return -ERANGE;
51
52 oobregion->offset = 12;
53 oobregion->length = 4;
54
55 return 0;
56}
57
58static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
59 .ecc = flctl_4secc_ooblayout_sp_ecc,
60 .free = flctl_4secc_ooblayout_sp_free,
61};
62
63static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
64 struct mtd_oob_region *oobregion)
65{
66 struct nand_chip *chip = mtd_to_nand(mtd);
67
68 if (section >= chip->ecc.steps)
69 return -ERANGE;
70
71 oobregion->offset = (section * 16) + 6;
72 oobregion->length = chip->ecc.bytes;
73
74 return 0;
75}
76
77static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
78 struct mtd_oob_region *oobregion)
79{
80 struct nand_chip *chip = mtd_to_nand(mtd);
81
82 if (section >= chip->ecc.steps)
83 return -ERANGE;
84
85 oobregion->offset = section * 16;
86 oobregion->length = 6;
87
88 if (!section) {
89 oobregion->offset += 2;
90 oobregion->length -= 2;
91 }
92
93 return 0;
94}
95
96static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
97 .ecc = flctl_4secc_ooblayout_lp_ecc,
98 .free = flctl_4secc_ooblayout_lp_free,
99};
100
101static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
102
103static struct nand_bbt_descr flctl_4secc_smallpage = {
104 .options = NAND_BBT_SCAN2NDPAGE,
105 .offs = 11,
106 .len = 1,
107 .pattern = scan_ff_pattern,
108};
109
110static struct nand_bbt_descr flctl_4secc_largepage = {
111 .options = NAND_BBT_SCAN2NDPAGE,
112 .offs = 0,
113 .len = 2,
114 .pattern = scan_ff_pattern,
115};
116
117static void empty_fifo(struct sh_flctl *flctl)
118{
119 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
120 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
121}
122
123static void start_translation(struct sh_flctl *flctl)
124{
125 writeb(TRSTRT, FLTRCR(flctl));
126}
127
128static void timeout_error(struct sh_flctl *flctl, const char *str)
129{
130 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
131}
132
133static void wait_completion(struct sh_flctl *flctl)
134{
135 uint32_t timeout = LOOP_TIMEOUT_MAX;
136
137 while (timeout--) {
138 if (readb(FLTRCR(flctl)) & TREND) {
139 writeb(0x0, FLTRCR(flctl));
140 return;
141 }
142 udelay(1);
143 }
144
145 timeout_error(flctl, __func__);
146 writeb(0x0, FLTRCR(flctl));
147}
148
149static void flctl_dma_complete(void *param)
150{
151 struct sh_flctl *flctl = param;
152
153 complete(&flctl->dma_complete);
154}
155
156static void flctl_release_dma(struct sh_flctl *flctl)
157{
158 if (flctl->chan_fifo0_rx) {
159 dma_release_channel(flctl->chan_fifo0_rx);
160 flctl->chan_fifo0_rx = NULL;
161 }
162 if (flctl->chan_fifo0_tx) {
163 dma_release_channel(flctl->chan_fifo0_tx);
164 flctl->chan_fifo0_tx = NULL;
165 }
166}
167
168static void flctl_setup_dma(struct sh_flctl *flctl)
169{
170 dma_cap_mask_t mask;
171 struct dma_slave_config cfg;
172 struct platform_device *pdev = flctl->pdev;
173 struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
174 int ret;
175
176 if (!pdata)
177 return;
178
179 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
180 return;
181
182
183 dma_cap_zero(mask);
184 dma_cap_set(DMA_SLAVE, mask);
185
186 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
187 (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
188 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
189 flctl->chan_fifo0_tx);
190
191 if (!flctl->chan_fifo0_tx)
192 return;
193
194 memset(&cfg, 0, sizeof(cfg));
195 cfg.direction = DMA_MEM_TO_DEV;
196 cfg.dst_addr = flctl->fifo;
197 cfg.src_addr = 0;
198 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
199 if (ret < 0)
200 goto err;
201
202 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
203 (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
204 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
205 flctl->chan_fifo0_rx);
206
207 if (!flctl->chan_fifo0_rx)
208 goto err;
209
210 cfg.direction = DMA_DEV_TO_MEM;
211 cfg.dst_addr = 0;
212 cfg.src_addr = flctl->fifo;
213 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
214 if (ret < 0)
215 goto err;
216
217 init_completion(&flctl->dma_complete);
218
219 return;
220
221err:
222 flctl_release_dma(flctl);
223}
224
225static void set_addr(struct mtd_info *mtd, int column, int page_addr)
226{
227 struct sh_flctl *flctl = mtd_to_flctl(mtd);
228 uint32_t addr = 0;
229
230 if (column == -1) {
231 addr = page_addr;
232 } else if (page_addr != -1) {
233
234 if (flctl->chip.options & NAND_BUSWIDTH_16)
235 column >>= 1;
236 if (flctl->page_size) {
237 addr = column & 0x0FFF;
238 addr |= (page_addr & 0xff) << 16;
239 addr |= ((page_addr >> 8) & 0xff) << 24;
240
241 if (flctl->rw_ADRCNT == ADRCNT2_E) {
242 uint32_t addr2;
243 addr2 = (page_addr >> 16) & 0xff;
244 writel(addr2, FLADR2(flctl));
245 }
246 } else {
247 addr = column;
248 addr |= (page_addr & 0xff) << 8;
249 addr |= ((page_addr >> 8) & 0xff) << 16;
250 addr |= ((page_addr >> 16) & 0xff) << 24;
251 }
252 }
253 writel(addr, FLADR(flctl));
254}
255
256static void wait_rfifo_ready(struct sh_flctl *flctl)
257{
258 uint32_t timeout = LOOP_TIMEOUT_MAX;
259
260 while (timeout--) {
261 uint32_t val;
262
263 val = readl(FLDTCNTR(flctl)) >> 16;
264 if (val & 0xFF)
265 return;
266 udelay(1);
267 }
268 timeout_error(flctl, __func__);
269}
270
271static void wait_wfifo_ready(struct sh_flctl *flctl)
272{
273 uint32_t len, timeout = LOOP_TIMEOUT_MAX;
274
275 while (timeout--) {
276
277 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
278 if (len >= 4)
279 return;
280 udelay(1);
281 }
282 timeout_error(flctl, __func__);
283}
284
285static enum flctl_ecc_res_t wait_recfifo_ready
286 (struct sh_flctl *flctl, int sector_number)
287{
288 uint32_t timeout = LOOP_TIMEOUT_MAX;
289 void __iomem *ecc_reg[4];
290 int i;
291 int state = FL_SUCCESS;
292 uint32_t data, size;
293
294
295
296
297
298
299
300
301 while (timeout--) {
302
303 size = readl(FLDTCNTR(flctl)) >> 24;
304 if ((size & 0xFF) == 4)
305 return state;
306
307
308 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
309
310
311
312
313 udelay(1);
314 continue;
315 }
316
317
318 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
319
320 for (i = 0; i < 512; i++) {
321 if (flctl->done_buff[i] != 0xff) {
322 state = FL_ERROR;
323 break;
324 }
325 }
326
327 if (state == FL_SUCCESS)
328 dev_dbg(&flctl->pdev->dev,
329 "reading empty sector %d, ecc error ignored\n",
330 sector_number);
331
332 writel(0, FL4ECCCR(flctl));
333 continue;
334 }
335
336
337 ecc_reg[0] = FL4ECCRESULT0(flctl);
338 ecc_reg[1] = FL4ECCRESULT1(flctl);
339 ecc_reg[2] = FL4ECCRESULT2(flctl);
340 ecc_reg[3] = FL4ECCRESULT3(flctl);
341
342 for (i = 0; i < 3; i++) {
343 uint8_t org;
344 unsigned int index;
345
346 data = readl(ecc_reg[i]);
347
348 if (flctl->page_size)
349 index = (512 * sector_number) +
350 (data >> 16);
351 else
352 index = data >> 16;
353
354 org = flctl->done_buff[index];
355 flctl->done_buff[index] = org ^ (data & 0xFF);
356 }
357 state = FL_REPAIRABLE;
358 writel(0, FL4ECCCR(flctl));
359 }
360
361 timeout_error(flctl, __func__);
362 return FL_TIMEOUT;
363}
364
365static void wait_wecfifo_ready(struct sh_flctl *flctl)
366{
367 uint32_t timeout = LOOP_TIMEOUT_MAX;
368 uint32_t len;
369
370 while (timeout--) {
371
372 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
373 if (len >= 4)
374 return;
375 udelay(1);
376 }
377 timeout_error(flctl, __func__);
378}
379
380static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
381 int len, enum dma_data_direction dir)
382{
383 struct dma_async_tx_descriptor *desc = NULL;
384 struct dma_chan *chan;
385 enum dma_transfer_direction tr_dir;
386 dma_addr_t dma_addr;
387 dma_cookie_t cookie;
388 uint32_t reg;
389 int ret;
390
391 if (dir == DMA_FROM_DEVICE) {
392 chan = flctl->chan_fifo0_rx;
393 tr_dir = DMA_DEV_TO_MEM;
394 } else {
395 chan = flctl->chan_fifo0_tx;
396 tr_dir = DMA_MEM_TO_DEV;
397 }
398
399 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
400
401 if (!dma_mapping_error(chan->device->dev, dma_addr))
402 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
403 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
404
405 if (desc) {
406 reg = readl(FLINTDMACR(flctl));
407 reg |= DREQ0EN;
408 writel(reg, FLINTDMACR(flctl));
409
410 desc->callback = flctl_dma_complete;
411 desc->callback_param = flctl;
412 cookie = dmaengine_submit(desc);
413 if (dma_submit_error(cookie)) {
414 ret = dma_submit_error(cookie);
415 dev_warn(&flctl->pdev->dev,
416 "DMA submit failed, falling back to PIO\n");
417 goto out;
418 }
419
420 dma_async_issue_pending(chan);
421 } else {
422
423 flctl_release_dma(flctl);
424 dev_warn(&flctl->pdev->dev,
425 "DMA failed, falling back to PIO\n");
426 ret = -EIO;
427 goto out;
428 }
429
430 ret =
431 wait_for_completion_timeout(&flctl->dma_complete,
432 msecs_to_jiffies(3000));
433
434 if (ret <= 0) {
435 dmaengine_terminate_all(chan);
436 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
437 }
438
439out:
440 reg = readl(FLINTDMACR(flctl));
441 reg &= ~DREQ0EN;
442 writel(reg, FLINTDMACR(flctl));
443
444 dma_unmap_single(chan->device->dev, dma_addr, len, dir);
445
446
447 return ret;
448}
449
450static void read_datareg(struct sh_flctl *flctl, int offset)
451{
452 unsigned long data;
453 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
454
455 wait_completion(flctl);
456
457 data = readl(FLDATAR(flctl));
458 *buf = le32_to_cpu(data);
459}
460
461static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
462{
463 int i, len_4align;
464 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
465
466 len_4align = (rlen + 3) / 4;
467
468
469 if (flctl->chan_fifo0_rx && rlen >= 32 &&
470 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
471 goto convert;
472
473
474 for (i = 0; i < len_4align; i++) {
475 wait_rfifo_ready(flctl);
476 buf[i] = readl(FLDTFIFO(flctl));
477 }
478
479convert:
480 for (i = 0; i < len_4align; i++)
481 buf[i] = be32_to_cpu(buf[i]);
482}
483
484static enum flctl_ecc_res_t read_ecfiforeg
485 (struct sh_flctl *flctl, uint8_t *buff, int sector)
486{
487 int i;
488 enum flctl_ecc_res_t res;
489 unsigned long *ecc_buf = (unsigned long *)buff;
490
491 res = wait_recfifo_ready(flctl , sector);
492
493 if (res != FL_ERROR) {
494 for (i = 0; i < 4; i++) {
495 ecc_buf[i] = readl(FLECFIFO(flctl));
496 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
497 }
498 }
499
500 return res;
501}
502
503static void write_fiforeg(struct sh_flctl *flctl, int rlen,
504 unsigned int offset)
505{
506 int i, len_4align;
507 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
508
509 len_4align = (rlen + 3) / 4;
510 for (i = 0; i < len_4align; i++) {
511 wait_wfifo_ready(flctl);
512 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
513 }
514}
515
516static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
517 unsigned int offset)
518{
519 int i, len_4align;
520 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
521
522 len_4align = (rlen + 3) / 4;
523
524 for (i = 0; i < len_4align; i++)
525 buf[i] = cpu_to_be32(buf[i]);
526
527
528 if (flctl->chan_fifo0_tx && rlen >= 32 &&
529 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
530 return;
531
532
533 for (i = 0; i < len_4align; i++) {
534 wait_wecfifo_ready(flctl);
535 writel(buf[i], FLECFIFO(flctl));
536 }
537}
538
539static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
540{
541 struct sh_flctl *flctl = mtd_to_flctl(mtd);
542 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
543 uint32_t flcmdcr_val, addr_len_bytes = 0;
544
545
546 if (flctl->page_size)
547 flcmncr_val |= SNAND_E;
548 else
549 flcmncr_val &= ~SNAND_E;
550
551
552 flcmdcr_val = DOCMD1_E | DOADR_E;
553
554
555 switch (cmd) {
556 case NAND_CMD_ERASE1:
557 addr_len_bytes = flctl->erase_ADRCNT;
558 flcmdcr_val |= DOCMD2_E;
559 break;
560 case NAND_CMD_READ0:
561 case NAND_CMD_READOOB:
562 case NAND_CMD_RNDOUT:
563 addr_len_bytes = flctl->rw_ADRCNT;
564 flcmdcr_val |= CDSRC_E;
565 if (flctl->chip.options & NAND_BUSWIDTH_16)
566 flcmncr_val |= SEL_16BIT;
567 break;
568 case NAND_CMD_SEQIN:
569
570 flcmdcr_val &= ~DOADR_E;
571 break;
572 case NAND_CMD_PAGEPROG:
573 addr_len_bytes = flctl->rw_ADRCNT;
574 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
575 if (flctl->chip.options & NAND_BUSWIDTH_16)
576 flcmncr_val |= SEL_16BIT;
577 break;
578 case NAND_CMD_READID:
579 flcmncr_val &= ~SNAND_E;
580 flcmdcr_val |= CDSRC_E;
581 addr_len_bytes = ADRCNT_1;
582 break;
583 case NAND_CMD_STATUS:
584 case NAND_CMD_RESET:
585 flcmncr_val &= ~SNAND_E;
586 flcmdcr_val &= ~(DOADR_E | DOSR_E);
587 break;
588 default:
589 break;
590 }
591
592
593 flcmdcr_val |= addr_len_bytes;
594
595
596 writel(flcmncr_val, FLCMNCR(flctl));
597 writel(flcmdcr_val, FLCMDCR(flctl));
598 writel(flcmcdr_val, FLCMCDR(flctl));
599}
600
601static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
602 int oob_required, int page)
603{
604 struct mtd_info *mtd = nand_to_mtd(chip);
605
606 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
607 if (oob_required)
608 chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
609 return 0;
610}
611
612static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
613 int oob_required, int page)
614{
615 struct mtd_info *mtd = nand_to_mtd(chip);
616
617 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
618 chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
619 return nand_prog_page_end_op(chip);
620}
621
622static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
623{
624 struct sh_flctl *flctl = mtd_to_flctl(mtd);
625 int sector, page_sectors;
626 enum flctl_ecc_res_t ecc_result;
627
628 page_sectors = flctl->page_size ? 4 : 1;
629
630 set_cmd_regs(mtd, NAND_CMD_READ0,
631 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
632
633 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
634 FLCMNCR(flctl));
635 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
636 writel(page_addr << 2, FLADR(flctl));
637
638 empty_fifo(flctl);
639 start_translation(flctl);
640
641 for (sector = 0; sector < page_sectors; sector++) {
642 read_fiforeg(flctl, 512, 512 * sector);
643
644 ecc_result = read_ecfiforeg(flctl,
645 &flctl->done_buff[mtd->writesize + 16 * sector],
646 sector);
647
648 switch (ecc_result) {
649 case FL_REPAIRABLE:
650 dev_info(&flctl->pdev->dev,
651 "applied ecc on page 0x%x", page_addr);
652 mtd->ecc_stats.corrected++;
653 break;
654 case FL_ERROR:
655 dev_warn(&flctl->pdev->dev,
656 "page 0x%x contains corrupted data\n",
657 page_addr);
658 mtd->ecc_stats.failed++;
659 break;
660 default:
661 ;
662 }
663 }
664
665 wait_completion(flctl);
666
667 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
668 FLCMNCR(flctl));
669}
670
671static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
672{
673 struct sh_flctl *flctl = mtd_to_flctl(mtd);
674 int page_sectors = flctl->page_size ? 4 : 1;
675 int i;
676
677 set_cmd_regs(mtd, NAND_CMD_READ0,
678 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
679
680 empty_fifo(flctl);
681
682 for (i = 0; i < page_sectors; i++) {
683 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
684 writel(16, FLDTCNTR(flctl));
685
686 start_translation(flctl);
687 read_fiforeg(flctl, 16, 16 * i);
688 wait_completion(flctl);
689 }
690}
691
692static void execmd_write_page_sector(struct mtd_info *mtd)
693{
694 struct sh_flctl *flctl = mtd_to_flctl(mtd);
695 int page_addr = flctl->seqin_page_addr;
696 int sector, page_sectors;
697
698 page_sectors = flctl->page_size ? 4 : 1;
699
700 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
701 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
702
703 empty_fifo(flctl);
704 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
705 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
706 writel(page_addr << 2, FLADR(flctl));
707 start_translation(flctl);
708
709 for (sector = 0; sector < page_sectors; sector++) {
710 write_fiforeg(flctl, 512, 512 * sector);
711 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
712 }
713
714 wait_completion(flctl);
715 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
716}
717
718static void execmd_write_oob(struct mtd_info *mtd)
719{
720 struct sh_flctl *flctl = mtd_to_flctl(mtd);
721 int page_addr = flctl->seqin_page_addr;
722 int sector, page_sectors;
723
724 page_sectors = flctl->page_size ? 4 : 1;
725
726 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
727 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
728
729 for (sector = 0; sector < page_sectors; sector++) {
730 empty_fifo(flctl);
731 set_addr(mtd, sector * 528 + 512, page_addr);
732 writel(16, FLDTCNTR(flctl));
733
734 start_translation(flctl);
735 write_fiforeg(flctl, 16, 16 * sector);
736 wait_completion(flctl);
737 }
738}
739
740static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
741 int column, int page_addr)
742{
743 struct mtd_info *mtd = nand_to_mtd(chip);
744 struct sh_flctl *flctl = mtd_to_flctl(mtd);
745 uint32_t read_cmd = 0;
746
747 pm_runtime_get_sync(&flctl->pdev->dev);
748
749 flctl->read_bytes = 0;
750 if (command != NAND_CMD_PAGEPROG)
751 flctl->index = 0;
752
753 switch (command) {
754 case NAND_CMD_READ1:
755 case NAND_CMD_READ0:
756 if (flctl->hwecc) {
757
758 execmd_read_page_sector(mtd, page_addr);
759 break;
760 }
761 if (flctl->page_size)
762 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
763 | command);
764 else
765 set_cmd_regs(mtd, command, command);
766
767 set_addr(mtd, 0, page_addr);
768
769 flctl->read_bytes = mtd->writesize + mtd->oobsize;
770 if (flctl->chip.options & NAND_BUSWIDTH_16)
771 column >>= 1;
772 flctl->index += column;
773 goto read_normal_exit;
774
775 case NAND_CMD_READOOB:
776 if (flctl->hwecc) {
777
778 execmd_read_oob(mtd, page_addr);
779 break;
780 }
781
782 if (flctl->page_size) {
783 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
784 | NAND_CMD_READ0);
785 set_addr(mtd, mtd->writesize, page_addr);
786 } else {
787 set_cmd_regs(mtd, command, command);
788 set_addr(mtd, 0, page_addr);
789 }
790 flctl->read_bytes = mtd->oobsize;
791 goto read_normal_exit;
792
793 case NAND_CMD_RNDOUT:
794 if (flctl->hwecc)
795 break;
796
797 if (flctl->page_size)
798 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
799 | command);
800 else
801 set_cmd_regs(mtd, command, command);
802
803 set_addr(mtd, column, 0);
804
805 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
806 goto read_normal_exit;
807
808 case NAND_CMD_READID:
809 set_cmd_regs(mtd, command, command);
810
811
812 if (flctl->chip.options & NAND_BUSWIDTH_16)
813 column <<= 1;
814 set_addr(mtd, column, 0);
815
816 flctl->read_bytes = 8;
817 writel(flctl->read_bytes, FLDTCNTR(flctl));
818 empty_fifo(flctl);
819 start_translation(flctl);
820 read_fiforeg(flctl, flctl->read_bytes, 0);
821 wait_completion(flctl);
822 break;
823
824 case NAND_CMD_ERASE1:
825 flctl->erase1_page_addr = page_addr;
826 break;
827
828 case NAND_CMD_ERASE2:
829 set_cmd_regs(mtd, NAND_CMD_ERASE1,
830 (command << 8) | NAND_CMD_ERASE1);
831 set_addr(mtd, -1, flctl->erase1_page_addr);
832 start_translation(flctl);
833 wait_completion(flctl);
834 break;
835
836 case NAND_CMD_SEQIN:
837 if (!flctl->page_size) {
838
839 if (column >= mtd->writesize) {
840 column -= mtd->writesize;
841 read_cmd = NAND_CMD_READOOB;
842 } else if (column < 256) {
843 read_cmd = NAND_CMD_READ0;
844 } else {
845 column -= 256;
846 read_cmd = NAND_CMD_READ1;
847 }
848 }
849 flctl->seqin_column = column;
850 flctl->seqin_page_addr = page_addr;
851 flctl->seqin_read_cmd = read_cmd;
852 break;
853
854 case NAND_CMD_PAGEPROG:
855 empty_fifo(flctl);
856 if (!flctl->page_size) {
857 set_cmd_regs(mtd, NAND_CMD_SEQIN,
858 flctl->seqin_read_cmd);
859 set_addr(mtd, -1, -1);
860 writel(0, FLDTCNTR(flctl));
861 start_translation(flctl);
862 wait_completion(flctl);
863 }
864 if (flctl->hwecc) {
865
866 if (flctl->seqin_column == mtd->writesize)
867 execmd_write_oob(mtd);
868 else if (!flctl->seqin_column)
869 execmd_write_page_sector(mtd);
870 else
871 pr_err("Invalid address !?\n");
872 break;
873 }
874 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
875 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
876 writel(flctl->index, FLDTCNTR(flctl));
877 start_translation(flctl);
878 write_fiforeg(flctl, flctl->index, 0);
879 wait_completion(flctl);
880 break;
881
882 case NAND_CMD_STATUS:
883 set_cmd_regs(mtd, command, command);
884 set_addr(mtd, -1, -1);
885
886 flctl->read_bytes = 1;
887 writel(flctl->read_bytes, FLDTCNTR(flctl));
888 start_translation(flctl);
889 read_datareg(flctl, 0);
890 break;
891
892 case NAND_CMD_RESET:
893 set_cmd_regs(mtd, command, command);
894 set_addr(mtd, -1, -1);
895
896 writel(0, FLDTCNTR(flctl));
897 start_translation(flctl);
898 wait_completion(flctl);
899 break;
900
901 default:
902 break;
903 }
904 goto runtime_exit;
905
906read_normal_exit:
907 writel(flctl->read_bytes, FLDTCNTR(flctl));
908 empty_fifo(flctl);
909 start_translation(flctl);
910 read_fiforeg(flctl, flctl->read_bytes, 0);
911 wait_completion(flctl);
912runtime_exit:
913 pm_runtime_put_sync(&flctl->pdev->dev);
914 return;
915}
916
917static void flctl_select_chip(struct nand_chip *chip, int chipnr)
918{
919 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
920 int ret;
921
922 switch (chipnr) {
923 case -1:
924 flctl->flcmncr_base &= ~CE0_ENABLE;
925
926 pm_runtime_get_sync(&flctl->pdev->dev);
927 writel(flctl->flcmncr_base, FLCMNCR(flctl));
928
929 if (flctl->qos_request) {
930 dev_pm_qos_remove_request(&flctl->pm_qos);
931 flctl->qos_request = 0;
932 }
933
934 pm_runtime_put_sync(&flctl->pdev->dev);
935 break;
936 case 0:
937 flctl->flcmncr_base |= CE0_ENABLE;
938
939 if (!flctl->qos_request) {
940 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
941 &flctl->pm_qos,
942 DEV_PM_QOS_RESUME_LATENCY,
943 100);
944 if (ret < 0)
945 dev_err(&flctl->pdev->dev,
946 "PM QoS request failed: %d\n", ret);
947 flctl->qos_request = 1;
948 }
949
950 if (flctl->holden) {
951 pm_runtime_get_sync(&flctl->pdev->dev);
952 writel(HOLDEN, FLHOLDCR(flctl));
953 pm_runtime_put_sync(&flctl->pdev->dev);
954 }
955 break;
956 default:
957 BUG();
958 }
959}
960
961static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
962{
963 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
964
965 memcpy(&flctl->done_buff[flctl->index], buf, len);
966 flctl->index += len;
967}
968
969static uint8_t flctl_read_byte(struct nand_chip *chip)
970{
971 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
972 uint8_t data;
973
974 data = flctl->done_buff[flctl->index];
975 flctl->index++;
976 return data;
977}
978
979static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
980{
981 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
982
983 memcpy(buf, &flctl->done_buff[flctl->index], len);
984 flctl->index += len;
985}
986
987static int flctl_chip_attach_chip(struct nand_chip *chip)
988{
989 struct mtd_info *mtd = nand_to_mtd(chip);
990 struct sh_flctl *flctl = mtd_to_flctl(mtd);
991
992
993
994
995
996 if (chip->options & NAND_BUSWIDTH_16)
997 flctl->flcmncr_base |= SEL_16BIT;
998
999 if (mtd->writesize == 512) {
1000 flctl->page_size = 0;
1001 if (chip->chipsize > (32 << 20)) {
1002
1003 flctl->rw_ADRCNT = ADRCNT_4;
1004 flctl->erase_ADRCNT = ADRCNT_3;
1005 } else if (chip->chipsize > (2 << 16)) {
1006
1007 flctl->rw_ADRCNT = ADRCNT_3;
1008 flctl->erase_ADRCNT = ADRCNT_2;
1009 } else {
1010 flctl->rw_ADRCNT = ADRCNT_2;
1011 flctl->erase_ADRCNT = ADRCNT_1;
1012 }
1013 } else {
1014 flctl->page_size = 1;
1015 if (chip->chipsize > (128 << 20)) {
1016
1017 flctl->rw_ADRCNT = ADRCNT2_E;
1018 flctl->erase_ADRCNT = ADRCNT_3;
1019 } else if (chip->chipsize > (8 << 16)) {
1020
1021 flctl->rw_ADRCNT = ADRCNT_4;
1022 flctl->erase_ADRCNT = ADRCNT_2;
1023 } else {
1024 flctl->rw_ADRCNT = ADRCNT_3;
1025 flctl->erase_ADRCNT = ADRCNT_1;
1026 }
1027 }
1028
1029 if (flctl->hwecc) {
1030 if (mtd->writesize == 512) {
1031 mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
1032 chip->badblock_pattern = &flctl_4secc_smallpage;
1033 } else {
1034 mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
1035 chip->badblock_pattern = &flctl_4secc_largepage;
1036 }
1037
1038 chip->ecc.size = 512;
1039 chip->ecc.bytes = 10;
1040 chip->ecc.strength = 4;
1041 chip->ecc.read_page = flctl_read_page_hwecc;
1042 chip->ecc.write_page = flctl_write_page_hwecc;
1043 chip->ecc.mode = NAND_ECC_HW;
1044
1045
1046 flctl->flcmncr_base |= _4ECCEN;
1047 } else {
1048 chip->ecc.mode = NAND_ECC_SOFT;
1049 chip->ecc.algo = NAND_ECC_HAMMING;
1050 }
1051
1052 return 0;
1053}
1054
1055static const struct nand_controller_ops flctl_nand_controller_ops = {
1056 .attach_chip = flctl_chip_attach_chip,
1057};
1058
1059static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1060{
1061 struct sh_flctl *flctl = dev_id;
1062
1063 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1064 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1065
1066 return IRQ_HANDLED;
1067}
1068
1069struct flctl_soc_config {
1070 unsigned long flcmncr_val;
1071 unsigned has_hwecc:1;
1072 unsigned use_holden:1;
1073};
1074
1075static struct flctl_soc_config flctl_sh7372_config = {
1076 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1077 .has_hwecc = 1,
1078 .use_holden = 1,
1079};
1080
1081static const struct of_device_id of_flctl_match[] = {
1082 { .compatible = "renesas,shmobile-flctl-sh7372",
1083 .data = &flctl_sh7372_config },
1084 {},
1085};
1086MODULE_DEVICE_TABLE(of, of_flctl_match);
1087
1088static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1089{
1090 const struct flctl_soc_config *config;
1091 struct sh_flctl_platform_data *pdata;
1092
1093 config = of_device_get_match_data(dev);
1094 if (!config) {
1095 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1096 return NULL;
1097 }
1098
1099 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1100 GFP_KERNEL);
1101 if (!pdata)
1102 return NULL;
1103
1104
1105 pdata->flcmncr_val = config->flcmncr_val;
1106 pdata->has_hwecc = config->has_hwecc;
1107 pdata->use_holden = config->use_holden;
1108
1109 return pdata;
1110}
1111
1112static int flctl_probe(struct platform_device *pdev)
1113{
1114 struct resource *res;
1115 struct sh_flctl *flctl;
1116 struct mtd_info *flctl_mtd;
1117 struct nand_chip *nand;
1118 struct sh_flctl_platform_data *pdata;
1119 int ret;
1120 int irq;
1121
1122 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
1123 if (!flctl)
1124 return -ENOMEM;
1125
1126 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1127 flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1128 if (IS_ERR(flctl->reg))
1129 return PTR_ERR(flctl->reg);
1130 flctl->fifo = res->start + 0x24;
1131
1132 irq = platform_get_irq(pdev, 0);
1133 if (irq < 0) {
1134 dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
1135 return irq;
1136 }
1137
1138 ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
1139 "flste", flctl);
1140 if (ret) {
1141 dev_err(&pdev->dev, "request interrupt failed.\n");
1142 return ret;
1143 }
1144
1145 if (pdev->dev.of_node)
1146 pdata = flctl_parse_dt(&pdev->dev);
1147 else
1148 pdata = dev_get_platdata(&pdev->dev);
1149
1150 if (!pdata) {
1151 dev_err(&pdev->dev, "no setup data defined\n");
1152 return -EINVAL;
1153 }
1154
1155 platform_set_drvdata(pdev, flctl);
1156 nand = &flctl->chip;
1157 flctl_mtd = nand_to_mtd(nand);
1158 nand_set_flash_node(nand, pdev->dev.of_node);
1159 flctl_mtd->dev.parent = &pdev->dev;
1160 flctl->pdev = pdev;
1161 flctl->hwecc = pdata->has_hwecc;
1162 flctl->holden = pdata->use_holden;
1163 flctl->flcmncr_base = pdata->flcmncr_val;
1164 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1165
1166
1167
1168 nand->legacy.chip_delay = 20;
1169
1170 nand->legacy.read_byte = flctl_read_byte;
1171 nand->legacy.write_buf = flctl_write_buf;
1172 nand->legacy.read_buf = flctl_read_buf;
1173 nand->legacy.select_chip = flctl_select_chip;
1174 nand->legacy.cmdfunc = flctl_cmdfunc;
1175 nand->legacy.set_features = nand_get_set_features_notsupp;
1176 nand->legacy.get_features = nand_get_set_features_notsupp;
1177
1178 if (pdata->flcmncr_val & SEL_16BIT)
1179 nand->options |= NAND_BUSWIDTH_16;
1180
1181 pm_runtime_enable(&pdev->dev);
1182 pm_runtime_resume(&pdev->dev);
1183
1184 flctl_setup_dma(flctl);
1185
1186 nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
1187 ret = nand_scan(nand, 1);
1188 if (ret)
1189 goto err_chip;
1190
1191 ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
1192 if (ret)
1193 goto cleanup_nand;
1194
1195 return 0;
1196
1197cleanup_nand:
1198 nand_cleanup(nand);
1199err_chip:
1200 flctl_release_dma(flctl);
1201 pm_runtime_disable(&pdev->dev);
1202 return ret;
1203}
1204
1205static int flctl_remove(struct platform_device *pdev)
1206{
1207 struct sh_flctl *flctl = platform_get_drvdata(pdev);
1208
1209 flctl_release_dma(flctl);
1210 nand_release(&flctl->chip);
1211 pm_runtime_disable(&pdev->dev);
1212
1213 return 0;
1214}
1215
1216static struct platform_driver flctl_driver = {
1217 .remove = flctl_remove,
1218 .driver = {
1219 .name = "sh_flctl",
1220 .of_match_table = of_match_ptr(of_flctl_match),
1221 },
1222};
1223
1224module_platform_driver_probe(flctl_driver, flctl_probe);
1225
1226MODULE_LICENSE("GPL v2");
1227MODULE_AUTHOR("Yoshihiro Shimoda");
1228MODULE_DESCRIPTION("SuperH FLCTL driver");
1229MODULE_ALIAS("platform:sh_flctl");
1230