1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <common.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/types.h>
20#include <malloc.h>
21#include <linux/errno.h>
22#include <asm/io.h>
23#include <asm/arch/clock.h>
24#include <asm/arch/imx-regs.h>
25#include <asm/imx-common/regs-bch.h>
26#include <asm/imx-common/regs-gpmi.h>
27#include <asm/arch/sys_proto.h>
28#include <asm/imx-common/dma.h>
29
30#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
31
32#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE 512
33#if (defined(CONFIG_MX6) || defined(CONFIG_MX7))
34#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
35#else
36#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
37#endif
38#define MXS_NAND_METADATA_SIZE 10
39#define MXS_NAND_BITS_PER_ECC_LEVEL 13
40
41#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
42#define MXS_NAND_COMMAND_BUFFER_SIZE 32
43#else
44#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
45#endif
46
47#define MXS_NAND_BCH_TIMEOUT 10000
48
49struct mxs_nand_info {
50 int cur_chip;
51
52 uint32_t cmd_queue_len;
53 uint32_t data_buf_size;
54
55 uint8_t *cmd_buf;
56 uint8_t *data_buf;
57 uint8_t *oob_buf;
58
59 uint8_t marking_block_bad;
60 uint8_t raw_oob_mode;
61
62
63 int (*hooked_read_oob)(struct mtd_info *mtd,
64 loff_t from, struct mtd_oob_ops *ops);
65 int (*hooked_write_oob)(struct mtd_info *mtd,
66 loff_t to, struct mtd_oob_ops *ops);
67 int (*hooked_block_markbad)(struct mtd_info *mtd,
68 loff_t ofs);
69
70
71 struct mxs_dma_desc **desc;
72 uint32_t desc_index;
73};
74
75struct nand_ecclayout fake_ecc_layout;
76static int chunk_data_size = MXS_NAND_CHUNK_DATA_CHUNK_SIZE;
77static int galois_field = 13;
78
79
80
81
82#ifndef CONFIG_SYS_DCACHE_OFF
83static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
84{
85 uint32_t addr = (uint32_t)info->data_buf;
86
87 flush_dcache_range(addr, addr + info->data_buf_size);
88}
89
90static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
91{
92 uint32_t addr = (uint32_t)info->data_buf;
93
94 invalidate_dcache_range(addr, addr + info->data_buf_size);
95}
96
97static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
98{
99 uint32_t addr = (uint32_t)info->cmd_buf;
100
101 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
102}
103#else
104static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
105static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
106static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
107#endif
108
109static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
110{
111 struct mxs_dma_desc *desc;
112
113 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
114 printf("MXS NAND: Too many DMA descriptors requested\n");
115 return NULL;
116 }
117
118 desc = info->desc[info->desc_index];
119 info->desc_index++;
120
121 return desc;
122}
123
124static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
125{
126 int i;
127 struct mxs_dma_desc *desc;
128
129 for (i = 0; i < info->desc_index; i++) {
130 desc = info->desc[i];
131 memset(desc, 0, sizeof(struct mxs_dma_desc));
132 desc->address = (dma_addr_t)desc;
133 }
134
135 info->desc_index = 0;
136}
137
138static uint32_t mxs_nand_ecc_chunk_cnt(uint32_t page_data_size)
139{
140 return page_data_size / chunk_data_size;
141}
142
143static uint32_t mxs_nand_ecc_size_in_bits(uint32_t ecc_strength)
144{
145 return ecc_strength * galois_field;
146}
147
148static uint32_t mxs_nand_aux_status_offset(void)
149{
150 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
151}
152
153static inline uint32_t mxs_nand_get_ecc_strength(uint32_t page_data_size,
154 uint32_t page_oob_size)
155{
156 int ecc_strength;
157 int max_ecc_strength_supported;
158
159
160 if (is_mx6sx() || is_mx7())
161 max_ecc_strength_supported = 62;
162 else
163 max_ecc_strength_supported = 40;
164
165
166
167
168
169
170
171
172
173 ecc_strength = ((page_oob_size - MXS_NAND_METADATA_SIZE) * 8)
174 / (galois_field *
175 mxs_nand_ecc_chunk_cnt(page_data_size));
176
177 return min(round_down(ecc_strength, 2), max_ecc_strength_supported);
178}
179
180static inline uint32_t mxs_nand_get_mark_offset(uint32_t page_data_size,
181 uint32_t ecc_strength)
182{
183 uint32_t chunk_data_size_in_bits;
184 uint32_t chunk_ecc_size_in_bits;
185 uint32_t chunk_total_size_in_bits;
186 uint32_t block_mark_chunk_number;
187 uint32_t block_mark_chunk_bit_offset;
188 uint32_t block_mark_bit_offset;
189
190 chunk_data_size_in_bits = chunk_data_size * 8;
191 chunk_ecc_size_in_bits = mxs_nand_ecc_size_in_bits(ecc_strength);
192
193 chunk_total_size_in_bits =
194 chunk_data_size_in_bits + chunk_ecc_size_in_bits;
195
196
197 block_mark_bit_offset = page_data_size * 8;
198
199
200 block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8;
201
202
203
204
205
206 block_mark_chunk_number =
207 block_mark_bit_offset / chunk_total_size_in_bits;
208
209
210
211
212
213 block_mark_chunk_bit_offset = block_mark_bit_offset -
214 (block_mark_chunk_number * chunk_total_size_in_bits);
215
216 if (block_mark_chunk_bit_offset > chunk_data_size_in_bits)
217 return 1;
218
219
220
221
222
223 block_mark_bit_offset -=
224 block_mark_chunk_number * chunk_ecc_size_in_bits;
225
226 return block_mark_bit_offset;
227}
228
229static uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
230{
231 uint32_t ecc_strength;
232 ecc_strength = mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize);
233 return mxs_nand_get_mark_offset(mtd->writesize, ecc_strength) >> 3;
234}
235
236static uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
237{
238 uint32_t ecc_strength;
239 ecc_strength = mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize);
240 return mxs_nand_get_mark_offset(mtd->writesize, ecc_strength) & 0x7;
241}
242
243
244
245
246static int mxs_nand_wait_for_bch_complete(void)
247{
248 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
249 int timeout = MXS_NAND_BCH_TIMEOUT;
250 int ret;
251
252 ret = mxs_wait_mask_set(&bch_regs->hw_bch_ctrl_reg,
253 BCH_CTRL_COMPLETE_IRQ, timeout);
254
255 writel(BCH_CTRL_COMPLETE_IRQ, &bch_regs->hw_bch_ctrl_clr);
256
257 return ret;
258}
259
260
261
262
263
264
265
266
267
268
269
270static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
271{
272 struct nand_chip *nand = mtd_to_nand(mtd);
273 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
274 struct mxs_dma_desc *d;
275 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
276 int ret;
277
278
279
280
281
282 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
283 printf("MXS NAND: Command queue too long\n");
284 return;
285 }
286
287
288
289
290
291
292
293
294
295
296
297
298 if (ctrl & (NAND_ALE | NAND_CLE)) {
299 if (data != NAND_CMD_NONE)
300 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
301 return;
302 }
303
304
305
306
307
308
309 if (nand_info->cmd_queue_len == 0)
310 return;
311
312
313 d = mxs_nand_get_dma_desc(nand_info);
314 d->cmd.data =
315 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
316 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
317 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
318 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
319
320 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
321
322 d->cmd.pio_words[0] =
323 GPMI_CTRL0_COMMAND_MODE_WRITE |
324 GPMI_CTRL0_WORD_LENGTH |
325 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
326 GPMI_CTRL0_ADDRESS_NAND_CLE |
327 GPMI_CTRL0_ADDRESS_INCREMENT |
328 nand_info->cmd_queue_len;
329
330 mxs_dma_desc_append(channel, d);
331
332
333 mxs_nand_flush_cmd_buf(nand_info);
334
335
336 ret = mxs_dma_go(channel);
337 if (ret)
338 printf("MXS NAND: Error sending command\n");
339
340 mxs_nand_return_dma_descs(nand_info);
341
342
343 nand_info->cmd_queue_len = 0;
344}
345
346
347
348
349static int mxs_nand_device_ready(struct mtd_info *mtd)
350{
351 struct nand_chip *chip = mtd_to_nand(mtd);
352 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
353 struct mxs_gpmi_regs *gpmi_regs =
354 (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
355 uint32_t tmp;
356
357 tmp = readl(&gpmi_regs->hw_gpmi_stat);
358 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
359
360 return tmp & 1;
361}
362
363
364
365
366static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
367{
368 struct nand_chip *nand = mtd_to_nand(mtd);
369 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
370
371 nand_info->cur_chip = chip;
372}
373
374
375
376
377
378
379
380
381static void mxs_nand_swap_block_mark(struct mtd_info *mtd,
382 uint8_t *data_buf, uint8_t *oob_buf)
383{
384 uint32_t bit_offset;
385 uint32_t buf_offset;
386
387 uint32_t src;
388 uint32_t dst;
389
390 bit_offset = mxs_nand_mark_bit_offset(mtd);
391 buf_offset = mxs_nand_mark_byte_offset(mtd);
392
393
394
395
396
397
398
399 src = data_buf[buf_offset] >> bit_offset;
400 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
401
402 dst = oob_buf[0];
403
404 oob_buf[0] = src;
405
406 data_buf[buf_offset] &= ~(0xff << bit_offset);
407 data_buf[buf_offset + 1] &= 0xff << bit_offset;
408
409 data_buf[buf_offset] |= dst << bit_offset;
410 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
411}
412
413
414
415
416static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
417{
418 struct nand_chip *nand = mtd_to_nand(mtd);
419 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
420 struct mxs_dma_desc *d;
421 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
422 int ret;
423
424 if (length > NAND_MAX_PAGESIZE) {
425 printf("MXS NAND: DMA buffer too big\n");
426 return;
427 }
428
429 if (!buf) {
430 printf("MXS NAND: DMA buffer is NULL\n");
431 return;
432 }
433
434
435 d = mxs_nand_get_dma_desc(nand_info);
436 d->cmd.data =
437 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
438 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
439 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
440 (length << MXS_DMA_DESC_BYTES_OFFSET);
441
442 d->cmd.address = (dma_addr_t)nand_info->data_buf;
443
444 d->cmd.pio_words[0] =
445 GPMI_CTRL0_COMMAND_MODE_READ |
446 GPMI_CTRL0_WORD_LENGTH |
447 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
448 GPMI_CTRL0_ADDRESS_NAND_DATA |
449 length;
450
451 mxs_dma_desc_append(channel, d);
452
453
454
455
456
457
458
459
460
461 d = mxs_nand_get_dma_desc(nand_info);
462 d->cmd.data =
463 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
464 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
465 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
466
467 d->cmd.address = 0;
468
469 d->cmd.pio_words[0] =
470 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
471 GPMI_CTRL0_WORD_LENGTH |
472 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
473 GPMI_CTRL0_ADDRESS_NAND_DATA;
474
475 mxs_dma_desc_append(channel, d);
476
477
478 mxs_nand_inval_data_buf(nand_info);
479
480
481 ret = mxs_dma_go(channel);
482 if (ret) {
483 printf("MXS NAND: DMA read error\n");
484 goto rtn;
485 }
486
487
488 mxs_nand_inval_data_buf(nand_info);
489
490 memcpy(buf, nand_info->data_buf, length);
491
492rtn:
493 mxs_nand_return_dma_descs(nand_info);
494}
495
496
497
498
499static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
500 int length)
501{
502 struct nand_chip *nand = mtd_to_nand(mtd);
503 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
504 struct mxs_dma_desc *d;
505 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
506 int ret;
507
508 if (length > NAND_MAX_PAGESIZE) {
509 printf("MXS NAND: DMA buffer too big\n");
510 return;
511 }
512
513 if (!buf) {
514 printf("MXS NAND: DMA buffer is NULL\n");
515 return;
516 }
517
518 memcpy(nand_info->data_buf, buf, length);
519
520
521 d = mxs_nand_get_dma_desc(nand_info);
522 d->cmd.data =
523 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
524 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
525 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
526 (length << MXS_DMA_DESC_BYTES_OFFSET);
527
528 d->cmd.address = (dma_addr_t)nand_info->data_buf;
529
530 d->cmd.pio_words[0] =
531 GPMI_CTRL0_COMMAND_MODE_WRITE |
532 GPMI_CTRL0_WORD_LENGTH |
533 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
534 GPMI_CTRL0_ADDRESS_NAND_DATA |
535 length;
536
537 mxs_dma_desc_append(channel, d);
538
539
540 mxs_nand_flush_data_buf(nand_info);
541
542
543 ret = mxs_dma_go(channel);
544 if (ret)
545 printf("MXS NAND: DMA write error\n");
546
547 mxs_nand_return_dma_descs(nand_info);
548}
549
550
551
552
553static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
554{
555 uint8_t buf;
556 mxs_nand_read_buf(mtd, &buf, 1);
557 return buf;
558}
559
560
561
562
563static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
564 uint8_t *buf, int oob_required,
565 int page)
566{
567 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
568 struct mxs_dma_desc *d;
569 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
570 uint32_t corrected = 0, failed = 0;
571 uint8_t *status;
572 int i, ret;
573
574
575 d = mxs_nand_get_dma_desc(nand_info);
576 d->cmd.data =
577 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
578 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
579 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
580
581 d->cmd.address = 0;
582
583 d->cmd.pio_words[0] =
584 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
585 GPMI_CTRL0_WORD_LENGTH |
586 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
587 GPMI_CTRL0_ADDRESS_NAND_DATA;
588
589 mxs_dma_desc_append(channel, d);
590
591
592 d = mxs_nand_get_dma_desc(nand_info);
593 d->cmd.data =
594 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
595 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
596
597 d->cmd.address = 0;
598
599 d->cmd.pio_words[0] =
600 GPMI_CTRL0_COMMAND_MODE_READ |
601 GPMI_CTRL0_WORD_LENGTH |
602 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
603 GPMI_CTRL0_ADDRESS_NAND_DATA |
604 (mtd->writesize + mtd->oobsize);
605 d->cmd.pio_words[1] = 0;
606 d->cmd.pio_words[2] =
607 GPMI_ECCCTRL_ENABLE_ECC |
608 GPMI_ECCCTRL_ECC_CMD_DECODE |
609 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
610 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
611 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
612 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
613
614 mxs_dma_desc_append(channel, d);
615
616
617 d = mxs_nand_get_dma_desc(nand_info);
618 d->cmd.data =
619 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
620 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
621 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
622
623 d->cmd.address = 0;
624
625 d->cmd.pio_words[0] =
626 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
627 GPMI_CTRL0_WORD_LENGTH |
628 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
629 GPMI_CTRL0_ADDRESS_NAND_DATA |
630 (mtd->writesize + mtd->oobsize);
631 d->cmd.pio_words[1] = 0;
632 d->cmd.pio_words[2] = 0;
633
634 mxs_dma_desc_append(channel, d);
635
636
637 d = mxs_nand_get_dma_desc(nand_info);
638 d->cmd.data =
639 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
640 MXS_DMA_DESC_DEC_SEM;
641
642 d->cmd.address = 0;
643
644 mxs_dma_desc_append(channel, d);
645
646
647 mxs_nand_inval_data_buf(nand_info);
648
649
650 ret = mxs_dma_go(channel);
651 if (ret) {
652 printf("MXS NAND: DMA read error\n");
653 goto rtn;
654 }
655
656 ret = mxs_nand_wait_for_bch_complete();
657 if (ret) {
658 printf("MXS NAND: BCH read timeout\n");
659 goto rtn;
660 }
661
662
663 mxs_nand_inval_data_buf(nand_info);
664
665
666 mxs_nand_swap_block_mark(mtd, nand_info->data_buf, nand_info->oob_buf);
667
668
669 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
670 for (i = 0; i < mxs_nand_ecc_chunk_cnt(mtd->writesize); i++) {
671 if (status[i] == 0x00)
672 continue;
673
674 if (status[i] == 0xff)
675 continue;
676
677 if (status[i] == 0xfe) {
678 failed++;
679 continue;
680 }
681
682 corrected += status[i];
683 }
684
685
686 mtd->ecc_stats.failed += failed;
687 mtd->ecc_stats.corrected += corrected;
688
689
690
691
692
693
694
695
696
697
698 memset(nand->oob_poi, 0xff, mtd->oobsize);
699
700 nand->oob_poi[0] = nand_info->oob_buf[0];
701
702 memcpy(buf, nand_info->data_buf, mtd->writesize);
703
704rtn:
705 mxs_nand_return_dma_descs(nand_info);
706
707 return ret;
708}
709
710
711
712
713static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
714 struct nand_chip *nand, const uint8_t *buf,
715 int oob_required, int page)
716{
717 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
718 struct mxs_dma_desc *d;
719 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
720 int ret;
721
722 memcpy(nand_info->data_buf, buf, mtd->writesize);
723 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
724
725
726 mxs_nand_swap_block_mark(mtd, nand_info->data_buf, nand_info->oob_buf);
727
728
729 d = mxs_nand_get_dma_desc(nand_info);
730 d->cmd.data =
731 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
732 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
733 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
734
735 d->cmd.address = 0;
736
737 d->cmd.pio_words[0] =
738 GPMI_CTRL0_COMMAND_MODE_WRITE |
739 GPMI_CTRL0_WORD_LENGTH |
740 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
741 GPMI_CTRL0_ADDRESS_NAND_DATA;
742 d->cmd.pio_words[1] = 0;
743 d->cmd.pio_words[2] =
744 GPMI_ECCCTRL_ENABLE_ECC |
745 GPMI_ECCCTRL_ECC_CMD_ENCODE |
746 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
747 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
748 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
749 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
750
751 mxs_dma_desc_append(channel, d);
752
753
754 mxs_nand_flush_data_buf(nand_info);
755
756
757 ret = mxs_dma_go(channel);
758 if (ret) {
759 printf("MXS NAND: DMA write error\n");
760 goto rtn;
761 }
762
763 ret = mxs_nand_wait_for_bch_complete();
764 if (ret) {
765 printf("MXS NAND: BCH write timeout\n");
766 goto rtn;
767 }
768
769rtn:
770 mxs_nand_return_dma_descs(nand_info);
771 return 0;
772}
773
774
775
776
777
778
779
780static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
781 struct mtd_oob_ops *ops)
782{
783 struct nand_chip *chip = mtd_to_nand(mtd);
784 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
785 int ret;
786
787 if (ops->mode == MTD_OPS_RAW)
788 nand_info->raw_oob_mode = 1;
789 else
790 nand_info->raw_oob_mode = 0;
791
792 ret = nand_info->hooked_read_oob(mtd, from, ops);
793
794 nand_info->raw_oob_mode = 0;
795
796 return ret;
797}
798
799
800
801
802
803
804
805static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
806 struct mtd_oob_ops *ops)
807{
808 struct nand_chip *chip = mtd_to_nand(mtd);
809 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
810 int ret;
811
812 if (ops->mode == MTD_OPS_RAW)
813 nand_info->raw_oob_mode = 1;
814 else
815 nand_info->raw_oob_mode = 0;
816
817 ret = nand_info->hooked_write_oob(mtd, to, ops);
818
819 nand_info->raw_oob_mode = 0;
820
821 return ret;
822}
823
824
825
826
827
828
829
830static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
831{
832 struct nand_chip *chip = mtd_to_nand(mtd);
833 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
834 int ret;
835
836 nand_info->marking_block_bad = 1;
837
838 ret = nand_info->hooked_block_markbad(mtd, ofs);
839
840 nand_info->marking_block_bad = 0;
841
842 return ret;
843}
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
890 int page)
891{
892 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
893
894
895
896
897
898
899 if (nand_info->raw_oob_mode) {
900
901
902
903
904 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
905 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
906 } else {
907
908
909
910
911 memset(nand->oob_poi, 0xff, mtd->oobsize);
912
913 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
914 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
915 }
916
917 return 0;
918
919}
920
921
922
923
924static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
925 int page)
926{
927 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
928 uint8_t block_mark = 0;
929
930
931
932
933
934
935
936
937
938
939 if (!nand_info->marking_block_bad) {
940 printf("NXS NAND: Writing OOB isn't supported\n");
941 return -EIO;
942 }
943
944
945 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
946 nand->write_buf(mtd, &block_mark, 1);
947 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
948
949
950 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
951 return -EIO;
952
953 return 0;
954}
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
970{
971 return 0;
972}
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988static int mxs_nand_scan_bbt(struct mtd_info *mtd)
989{
990 struct nand_chip *nand = mtd_to_nand(mtd);
991 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
992 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
993 uint32_t tmp;
994
995 if (mtd->oobsize > MXS_NAND_CHUNK_DATA_CHUNK_SIZE) {
996 galois_field = 14;
997 chunk_data_size = MXS_NAND_CHUNK_DATA_CHUNK_SIZE * 2;
998 }
999
1000 if (mtd->oobsize > chunk_data_size) {
1001 printf("Not support the NAND chips whose oob size is larger then %d bytes!\n", chunk_data_size);
1002 return -EINVAL;
1003 }
1004
1005
1006 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1007
1008
1009 tmp = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1)
1010 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1011 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1012 tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
1013 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1014 tmp |= chunk_data_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1015 tmp |= (14 == galois_field ? 1 : 0) <<
1016 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1017 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1018
1019 tmp = (mtd->writesize + mtd->oobsize)
1020 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1021 tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
1022 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1023 tmp |= chunk_data_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1024 tmp |= (14 == galois_field ? 1 : 0) <<
1025 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1026 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1027
1028
1029 writel(0, &bch_regs->hw_bch_layoutselect);
1030
1031
1032 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1033
1034
1035 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1036 nand_info->hooked_read_oob = mtd->_read_oob;
1037 mtd->_read_oob = mxs_nand_hook_read_oob;
1038 }
1039
1040 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1041 nand_info->hooked_write_oob = mtd->_write_oob;
1042 mtd->_write_oob = mxs_nand_hook_write_oob;
1043 }
1044
1045 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1046 nand_info->hooked_block_markbad = mtd->_block_markbad;
1047 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1048 }
1049
1050
1051 return nand_default_bbt(mtd);
1052}
1053
1054
1055
1056
1057int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1058{
1059 uint8_t *buf;
1060 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1061
1062 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1063
1064
1065 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1066 if (!buf) {
1067 printf("MXS NAND: Error allocating DMA buffers\n");
1068 return -ENOMEM;
1069 }
1070
1071 memset(buf, 0, nand_info->data_buf_size);
1072
1073 nand_info->data_buf = buf;
1074 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1075
1076 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1077 MXS_NAND_COMMAND_BUFFER_SIZE);
1078 if (!nand_info->cmd_buf) {
1079 free(buf);
1080 printf("MXS NAND: Error allocating command buffers\n");
1081 return -ENOMEM;
1082 }
1083 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1084 nand_info->cmd_queue_len = 0;
1085
1086 return 0;
1087}
1088
1089
1090
1091
1092int mxs_nand_init(struct mxs_nand_info *info)
1093{
1094 struct mxs_gpmi_regs *gpmi_regs =
1095 (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1096 struct mxs_bch_regs *bch_regs =
1097 (struct mxs_bch_regs *)MXS_BCH_BASE;
1098 int i = 0, j, ret = 0;
1099
1100 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1101 MXS_NAND_DMA_DESCRIPTOR_COUNT);
1102 if (!info->desc) {
1103 ret = -ENOMEM;
1104 goto err1;
1105 }
1106
1107
1108 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1109 info->desc[i] = mxs_dma_desc_alloc();
1110 if (!info->desc[i]) {
1111 ret = -ENOMEM;
1112 goto err2;
1113 }
1114 }
1115
1116
1117 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1118 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1119 ret = mxs_dma_init_channel(j);
1120 if (ret)
1121 goto err3;
1122 }
1123
1124
1125 mxs_reset_block(&gpmi_regs->hw_gpmi_ctrl0_reg);
1126 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1127
1128
1129
1130
1131
1132 clrsetbits_le32(&gpmi_regs->hw_gpmi_ctrl1,
1133 GPMI_CTRL1_GPMI_MODE,
1134 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1135 GPMI_CTRL1_BCH_MODE);
1136
1137 return 0;
1138
1139err3:
1140 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1141 mxs_dma_release(j);
1142err2:
1143 for (--i; i >= 0; i--)
1144 mxs_dma_desc_free(info->desc[i]);
1145 free(info->desc);
1146err1:
1147 if (ret == -ENOMEM)
1148 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1149 return ret;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161int board_nand_init(struct nand_chip *nand)
1162{
1163 struct mxs_nand_info *nand_info;
1164 int err;
1165
1166 nand_info = malloc(sizeof(struct mxs_nand_info));
1167 if (!nand_info) {
1168 printf("MXS NAND: Failed to allocate private data\n");
1169 return -ENOMEM;
1170 }
1171 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1172
1173 err = mxs_nand_alloc_buffers(nand_info);
1174 if (err)
1175 goto err1;
1176
1177 err = mxs_nand_init(nand_info);
1178 if (err)
1179 goto err2;
1180
1181 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1182
1183 nand_set_controller_data(nand, nand_info);
1184 nand->options |= NAND_NO_SUBPAGE_WRITE;
1185
1186 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1187
1188 nand->dev_ready = mxs_nand_device_ready;
1189 nand->select_chip = mxs_nand_select_chip;
1190 nand->block_bad = mxs_nand_block_bad;
1191 nand->scan_bbt = mxs_nand_scan_bbt;
1192
1193 nand->read_byte = mxs_nand_read_byte;
1194
1195 nand->read_buf = mxs_nand_read_buf;
1196 nand->write_buf = mxs_nand_write_buf;
1197
1198 nand->ecc.read_page = mxs_nand_ecc_read_page;
1199 nand->ecc.write_page = mxs_nand_ecc_write_page;
1200 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1201 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1202
1203 nand->ecc.layout = &fake_ecc_layout;
1204 nand->ecc.mode = NAND_ECC_HW;
1205 nand->ecc.bytes = 9;
1206 nand->ecc.size = 512;
1207 nand->ecc.strength = 8;
1208
1209 return 0;
1210
1211err2:
1212 free(nand_info->data_buf);
1213 free(nand_info->cmd_buf);
1214err1:
1215 free(nand_info);
1216 return err;
1217}
1218