1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <common.h>
16#include <dm.h>
17#include <linux/mtd/rawnand.h>
18#include <linux/sizes.h>
19#include <linux/types.h>
20#include <malloc.h>
21#include <linux/errno.h>
22#include <asm/io.h>
23#include <asm/arch/clock.h>
24#include <asm/arch/imx-regs.h>
25#include <asm/mach-imx/regs-bch.h>
26#include <asm/mach-imx/regs-gpmi.h>
27#include <asm/arch/sys_proto.h>
28#include "mxs_nand.h"
29
30#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
31
32#if (defined(CONFIG_MX6) || defined(CONFIG_MX7))
33#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
34#else
35#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
36#endif
37#define MXS_NAND_METADATA_SIZE 10
38#define MXS_NAND_BITS_PER_ECC_LEVEL 13
39
40#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
41#define MXS_NAND_COMMAND_BUFFER_SIZE 32
42#else
43#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
44#endif
45
46#define MXS_NAND_BCH_TIMEOUT 10000
47
48struct nand_ecclayout fake_ecc_layout;
49
50
51
52
53#ifndef CONFIG_SYS_DCACHE_OFF
54static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
55{
56 uint32_t addr = (uint32_t)info->data_buf;
57
58 flush_dcache_range(addr, addr + info->data_buf_size);
59}
60
61static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
62{
63 uint32_t addr = (uint32_t)info->data_buf;
64
65 invalidate_dcache_range(addr, addr + info->data_buf_size);
66}
67
68static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
69{
70 uint32_t addr = (uint32_t)info->cmd_buf;
71
72 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
73}
74#else
75static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
76static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
77static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
78#endif
79
80static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
81{
82 struct mxs_dma_desc *desc;
83
84 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
85 printf("MXS NAND: Too many DMA descriptors requested\n");
86 return NULL;
87 }
88
89 desc = info->desc[info->desc_index];
90 info->desc_index++;
91
92 return desc;
93}
94
95static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
96{
97 int i;
98 struct mxs_dma_desc *desc;
99
100 for (i = 0; i < info->desc_index; i++) {
101 desc = info->desc[i];
102 memset(desc, 0, sizeof(struct mxs_dma_desc));
103 desc->address = (dma_addr_t)desc;
104 }
105
106 info->desc_index = 0;
107}
108
109static uint32_t mxs_nand_aux_status_offset(void)
110{
111 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
112}
113
114static inline int mxs_nand_calc_mark_offset(struct bch_geometry *geo,
115 uint32_t page_data_size)
116{
117 uint32_t chunk_data_size_in_bits = geo->ecc_chunk_size * 8;
118 uint32_t chunk_ecc_size_in_bits = geo->ecc_strength * geo->gf_len;
119 uint32_t chunk_total_size_in_bits;
120 uint32_t block_mark_chunk_number;
121 uint32_t block_mark_chunk_bit_offset;
122 uint32_t block_mark_bit_offset;
123
124 chunk_total_size_in_bits =
125 chunk_data_size_in_bits + chunk_ecc_size_in_bits;
126
127
128 block_mark_bit_offset = page_data_size * 8;
129
130
131 block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8;
132
133
134
135
136
137 block_mark_chunk_number =
138 block_mark_bit_offset / chunk_total_size_in_bits;
139
140
141
142
143
144 block_mark_chunk_bit_offset = block_mark_bit_offset -
145 (block_mark_chunk_number * chunk_total_size_in_bits);
146
147 if (block_mark_chunk_bit_offset > chunk_data_size_in_bits)
148 return -EINVAL;
149
150
151
152
153
154 block_mark_bit_offset -=
155 block_mark_chunk_number * chunk_ecc_size_in_bits;
156
157 geo->block_mark_byte_offset = block_mark_bit_offset >> 3;
158 geo->block_mark_bit_offset = block_mark_bit_offset & 0x7;
159
160 return 0;
161}
162
163static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
164 struct mtd_info *mtd,
165 unsigned int ecc_strength,
166 unsigned int ecc_step)
167{
168 struct nand_chip *chip = mtd_to_nand(mtd);
169 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
170
171 switch (ecc_step) {
172 case SZ_512:
173 geo->gf_len = 13;
174 break;
175 case SZ_1K:
176 geo->gf_len = 14;
177 break;
178 default:
179 return -EINVAL;
180 }
181
182 geo->ecc_chunk_size = ecc_step;
183 geo->ecc_strength = round_up(ecc_strength, 2);
184
185
186 if (geo->ecc_chunk_size < mtd->oobsize)
187 return -EINVAL;
188
189 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
190 return -EINVAL;
191
192 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
193
194 return 0;
195}
196
197static inline int mxs_nand_calc_ecc_layout(struct bch_geometry *geo,
198 struct mtd_info *mtd)
199{
200 struct nand_chip *chip = mtd_to_nand(mtd);
201 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
202
203
204 geo->gf_len = 13;
205
206
207 geo->ecc_chunk_size = 512;
208
209 if (geo->ecc_chunk_size < mtd->oobsize) {
210 geo->gf_len = 14;
211 geo->ecc_chunk_size *= 2;
212 }
213
214 if (mtd->oobsize > geo->ecc_chunk_size) {
215 printf("Not support the NAND chips whose oob size is larger then %d bytes!\n",
216 geo->ecc_chunk_size);
217 return -EINVAL;
218 }
219
220 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
221
222
223
224
225
226
227
228
229
230 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
231 / (geo->gf_len * geo->ecc_chunk_count);
232
233 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
234 nand_info->max_ecc_strength_supported);
235
236 return 0;
237}
238
239
240
241
242static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
243{
244 int timeout = MXS_NAND_BCH_TIMEOUT;
245 int ret;
246
247 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
248 BCH_CTRL_COMPLETE_IRQ, timeout);
249
250 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
251
252 return ret;
253}
254
255
256
257
258
259
260
261
262
263
264
265static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
266{
267 struct nand_chip *nand = mtd_to_nand(mtd);
268 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
269 struct mxs_dma_desc *d;
270 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
271 int ret;
272
273
274
275
276
277 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
278 printf("MXS NAND: Command queue too long\n");
279 return;
280 }
281
282
283
284
285
286
287
288
289
290
291
292
293 if (ctrl & (NAND_ALE | NAND_CLE)) {
294 if (data != NAND_CMD_NONE)
295 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
296 return;
297 }
298
299
300
301
302
303
304 if (nand_info->cmd_queue_len == 0)
305 return;
306
307
308 d = mxs_nand_get_dma_desc(nand_info);
309 d->cmd.data =
310 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
311 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
312 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
313 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
314
315 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
316
317 d->cmd.pio_words[0] =
318 GPMI_CTRL0_COMMAND_MODE_WRITE |
319 GPMI_CTRL0_WORD_LENGTH |
320 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
321 GPMI_CTRL0_ADDRESS_NAND_CLE |
322 GPMI_CTRL0_ADDRESS_INCREMENT |
323 nand_info->cmd_queue_len;
324
325 mxs_dma_desc_append(channel, d);
326
327
328 mxs_nand_flush_cmd_buf(nand_info);
329
330
331 ret = mxs_dma_go(channel);
332 if (ret)
333 printf("MXS NAND: Error sending command\n");
334
335 mxs_nand_return_dma_descs(nand_info);
336
337
338 nand_info->cmd_queue_len = 0;
339}
340
341
342
343
344static int mxs_nand_device_ready(struct mtd_info *mtd)
345{
346 struct nand_chip *chip = mtd_to_nand(mtd);
347 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
348 uint32_t tmp;
349
350 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
351 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
352
353 return tmp & 1;
354}
355
356
357
358
359static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
360{
361 struct nand_chip *nand = mtd_to_nand(mtd);
362 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
363
364 nand_info->cur_chip = chip;
365}
366
367
368
369
370
371
372
373
374static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
375 uint8_t *data_buf, uint8_t *oob_buf)
376{
377 uint32_t bit_offset = geo->block_mark_bit_offset;
378 uint32_t buf_offset = geo->block_mark_byte_offset;
379
380 uint32_t src;
381 uint32_t dst;
382
383
384
385
386
387
388
389 src = data_buf[buf_offset] >> bit_offset;
390 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
391
392 dst = oob_buf[0];
393
394 oob_buf[0] = src;
395
396 data_buf[buf_offset] &= ~(0xff << bit_offset);
397 data_buf[buf_offset + 1] &= 0xff << bit_offset;
398
399 data_buf[buf_offset] |= dst << bit_offset;
400 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
401}
402
403
404
405
406static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
407{
408 struct nand_chip *nand = mtd_to_nand(mtd);
409 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
410 struct mxs_dma_desc *d;
411 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
412 int ret;
413
414 if (length > NAND_MAX_PAGESIZE) {
415 printf("MXS NAND: DMA buffer too big\n");
416 return;
417 }
418
419 if (!buf) {
420 printf("MXS NAND: DMA buffer is NULL\n");
421 return;
422 }
423
424
425 d = mxs_nand_get_dma_desc(nand_info);
426 d->cmd.data =
427 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
428 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
429 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
430 (length << MXS_DMA_DESC_BYTES_OFFSET);
431
432 d->cmd.address = (dma_addr_t)nand_info->data_buf;
433
434 d->cmd.pio_words[0] =
435 GPMI_CTRL0_COMMAND_MODE_READ |
436 GPMI_CTRL0_WORD_LENGTH |
437 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
438 GPMI_CTRL0_ADDRESS_NAND_DATA |
439 length;
440
441 mxs_dma_desc_append(channel, d);
442
443
444
445
446
447
448
449
450
451 d = mxs_nand_get_dma_desc(nand_info);
452 d->cmd.data =
453 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
454 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
455 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
456
457 d->cmd.address = 0;
458
459 d->cmd.pio_words[0] =
460 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
461 GPMI_CTRL0_WORD_LENGTH |
462 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
463 GPMI_CTRL0_ADDRESS_NAND_DATA;
464
465 mxs_dma_desc_append(channel, d);
466
467
468 mxs_nand_inval_data_buf(nand_info);
469
470
471 ret = mxs_dma_go(channel);
472 if (ret) {
473 printf("MXS NAND: DMA read error\n");
474 goto rtn;
475 }
476
477
478 mxs_nand_inval_data_buf(nand_info);
479
480 memcpy(buf, nand_info->data_buf, length);
481
482rtn:
483 mxs_nand_return_dma_descs(nand_info);
484}
485
486
487
488
489static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
490 int length)
491{
492 struct nand_chip *nand = mtd_to_nand(mtd);
493 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
494 struct mxs_dma_desc *d;
495 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
496 int ret;
497
498 if (length > NAND_MAX_PAGESIZE) {
499 printf("MXS NAND: DMA buffer too big\n");
500 return;
501 }
502
503 if (!buf) {
504 printf("MXS NAND: DMA buffer is NULL\n");
505 return;
506 }
507
508 memcpy(nand_info->data_buf, buf, length);
509
510
511 d = mxs_nand_get_dma_desc(nand_info);
512 d->cmd.data =
513 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
514 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
515 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
516 (length << MXS_DMA_DESC_BYTES_OFFSET);
517
518 d->cmd.address = (dma_addr_t)nand_info->data_buf;
519
520 d->cmd.pio_words[0] =
521 GPMI_CTRL0_COMMAND_MODE_WRITE |
522 GPMI_CTRL0_WORD_LENGTH |
523 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
524 GPMI_CTRL0_ADDRESS_NAND_DATA |
525 length;
526
527 mxs_dma_desc_append(channel, d);
528
529
530 mxs_nand_flush_data_buf(nand_info);
531
532
533 ret = mxs_dma_go(channel);
534 if (ret)
535 printf("MXS NAND: DMA write error\n");
536
537 mxs_nand_return_dma_descs(nand_info);
538}
539
540
541
542
543static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
544{
545 uint8_t buf;
546 mxs_nand_read_buf(mtd, &buf, 1);
547 return buf;
548}
549
550
551
552
553static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
554 uint8_t *buf, int oob_required,
555 int page)
556{
557 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
558 struct bch_geometry *geo = &nand_info->bch_geometry;
559 struct mxs_dma_desc *d;
560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
561 uint32_t corrected = 0, failed = 0;
562 uint8_t *status;
563 int i, ret;
564
565
566 d = mxs_nand_get_dma_desc(nand_info);
567 d->cmd.data =
568 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
569 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
570 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
571
572 d->cmd.address = 0;
573
574 d->cmd.pio_words[0] =
575 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
576 GPMI_CTRL0_WORD_LENGTH |
577 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
578 GPMI_CTRL0_ADDRESS_NAND_DATA;
579
580 mxs_dma_desc_append(channel, d);
581
582
583 d = mxs_nand_get_dma_desc(nand_info);
584 d->cmd.data =
585 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
586 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
587
588 d->cmd.address = 0;
589
590 d->cmd.pio_words[0] =
591 GPMI_CTRL0_COMMAND_MODE_READ |
592 GPMI_CTRL0_WORD_LENGTH |
593 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
594 GPMI_CTRL0_ADDRESS_NAND_DATA |
595 (mtd->writesize + mtd->oobsize);
596 d->cmd.pio_words[1] = 0;
597 d->cmd.pio_words[2] =
598 GPMI_ECCCTRL_ENABLE_ECC |
599 GPMI_ECCCTRL_ECC_CMD_DECODE |
600 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
601 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
602 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
603 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
604
605 mxs_dma_desc_append(channel, d);
606
607
608 d = mxs_nand_get_dma_desc(nand_info);
609 d->cmd.data =
610 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
611 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
612 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
613
614 d->cmd.address = 0;
615
616 d->cmd.pio_words[0] =
617 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
618 GPMI_CTRL0_WORD_LENGTH |
619 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
620 GPMI_CTRL0_ADDRESS_NAND_DATA |
621 (mtd->writesize + mtd->oobsize);
622 d->cmd.pio_words[1] = 0;
623 d->cmd.pio_words[2] = 0;
624
625 mxs_dma_desc_append(channel, d);
626
627
628 d = mxs_nand_get_dma_desc(nand_info);
629 d->cmd.data =
630 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
631 MXS_DMA_DESC_DEC_SEM;
632
633 d->cmd.address = 0;
634
635 mxs_dma_desc_append(channel, d);
636
637
638 mxs_nand_inval_data_buf(nand_info);
639
640
641 ret = mxs_dma_go(channel);
642 if (ret) {
643 printf("MXS NAND: DMA read error\n");
644 goto rtn;
645 }
646
647 ret = mxs_nand_wait_for_bch_complete(nand_info);
648 if (ret) {
649 printf("MXS NAND: BCH read timeout\n");
650 goto rtn;
651 }
652
653
654 mxs_nand_inval_data_buf(nand_info);
655
656
657 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
658
659
660 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
661 for (i = 0; i < geo->ecc_chunk_count; i++) {
662 if (status[i] == 0x00)
663 continue;
664
665 if (status[i] == 0xff)
666 continue;
667
668 if (status[i] == 0xfe) {
669 failed++;
670 continue;
671 }
672
673 corrected += status[i];
674 }
675
676
677 mtd->ecc_stats.failed += failed;
678 mtd->ecc_stats.corrected += corrected;
679
680
681
682
683
684
685
686
687
688
689 memset(nand->oob_poi, 0xff, mtd->oobsize);
690
691 nand->oob_poi[0] = nand_info->oob_buf[0];
692
693 memcpy(buf, nand_info->data_buf, mtd->writesize);
694
695rtn:
696 mxs_nand_return_dma_descs(nand_info);
697
698 return ret;
699}
700
701
702
703
704static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
705 struct nand_chip *nand, const uint8_t *buf,
706 int oob_required, int page)
707{
708 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
709 struct bch_geometry *geo = &nand_info->bch_geometry;
710 struct mxs_dma_desc *d;
711 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
712 int ret;
713
714 memcpy(nand_info->data_buf, buf, mtd->writesize);
715 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
716
717
718 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
719
720
721 d = mxs_nand_get_dma_desc(nand_info);
722 d->cmd.data =
723 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
724 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
725 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
726
727 d->cmd.address = 0;
728
729 d->cmd.pio_words[0] =
730 GPMI_CTRL0_COMMAND_MODE_WRITE |
731 GPMI_CTRL0_WORD_LENGTH |
732 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
733 GPMI_CTRL0_ADDRESS_NAND_DATA;
734 d->cmd.pio_words[1] = 0;
735 d->cmd.pio_words[2] =
736 GPMI_ECCCTRL_ENABLE_ECC |
737 GPMI_ECCCTRL_ECC_CMD_ENCODE |
738 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
739 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
740 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
741 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
742
743 mxs_dma_desc_append(channel, d);
744
745
746 mxs_nand_flush_data_buf(nand_info);
747
748
749 ret = mxs_dma_go(channel);
750 if (ret) {
751 printf("MXS NAND: DMA write error\n");
752 goto rtn;
753 }
754
755 ret = mxs_nand_wait_for_bch_complete(nand_info);
756 if (ret) {
757 printf("MXS NAND: BCH write timeout\n");
758 goto rtn;
759 }
760
761rtn:
762 mxs_nand_return_dma_descs(nand_info);
763 return 0;
764}
765
766
767
768
769
770
771
772static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
773 struct mtd_oob_ops *ops)
774{
775 struct nand_chip *chip = mtd_to_nand(mtd);
776 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
777 int ret;
778
779 if (ops->mode == MTD_OPS_RAW)
780 nand_info->raw_oob_mode = 1;
781 else
782 nand_info->raw_oob_mode = 0;
783
784 ret = nand_info->hooked_read_oob(mtd, from, ops);
785
786 nand_info->raw_oob_mode = 0;
787
788 return ret;
789}
790
791
792
793
794
795
796
797static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
798 struct mtd_oob_ops *ops)
799{
800 struct nand_chip *chip = mtd_to_nand(mtd);
801 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
802 int ret;
803
804 if (ops->mode == MTD_OPS_RAW)
805 nand_info->raw_oob_mode = 1;
806 else
807 nand_info->raw_oob_mode = 0;
808
809 ret = nand_info->hooked_write_oob(mtd, to, ops);
810
811 nand_info->raw_oob_mode = 0;
812
813 return ret;
814}
815
816
817
818
819
820
821
822static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
823{
824 struct nand_chip *chip = mtd_to_nand(mtd);
825 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
826 int ret;
827
828 nand_info->marking_block_bad = 1;
829
830 ret = nand_info->hooked_block_markbad(mtd, ofs);
831
832 nand_info->marking_block_bad = 0;
833
834 return ret;
835}
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
882 int page)
883{
884 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
885
886
887
888
889
890
891 if (nand_info->raw_oob_mode) {
892
893
894
895
896 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
897 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
898 } else {
899
900
901
902
903 memset(nand->oob_poi, 0xff, mtd->oobsize);
904
905 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
906 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
907 }
908
909 return 0;
910
911}
912
913
914
915
916static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
917 int page)
918{
919 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
920 uint8_t block_mark = 0;
921
922
923
924
925
926
927
928
929
930
931 if (!nand_info->marking_block_bad) {
932 printf("NXS NAND: Writing OOB isn't supported\n");
933 return -EIO;
934 }
935
936
937 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
938 nand->write_buf(mtd, &block_mark, 1);
939 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
940
941
942 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
943 return -EIO;
944
945 return 0;
946}
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
962{
963 return 0;
964}
965
966static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
967{
968 struct nand_chip *chip = mtd_to_nand(mtd);
969 struct nand_chip *nand = mtd_to_nand(mtd);
970 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
971
972 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
973 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
974 chip->ecc.strength, chip->ecc.size);
975
976 if (nand_info->use_minimum_ecc ||
977 mxs_nand_calc_ecc_layout(geo, mtd)) {
978 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
979 return -EINVAL;
980
981 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
982 chip->ecc_strength_ds, chip->ecc_step_ds);
983 }
984
985 return 0;
986}
987
988
989
990
991
992
993
994
995
996
997int mxs_nand_setup_ecc(struct mtd_info *mtd)
998{
999 struct nand_chip *nand = mtd_to_nand(mtd);
1000 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1001 struct bch_geometry *geo = &nand_info->bch_geometry;
1002 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1003 uint32_t tmp;
1004 int ret;
1005
1006 ret = mxs_nand_set_geometry(mtd, geo);
1007 if (ret)
1008 return ret;
1009
1010 mxs_nand_calc_mark_offset(geo, mtd->writesize);
1011
1012
1013 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1014
1015
1016 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1017 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1018 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1019 tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1020 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1021 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1022 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1023
1024 tmp = (mtd->writesize + mtd->oobsize)
1025 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1026 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1027 tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1028 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1029 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1030 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1031
1032
1033 writel(0, &bch_regs->hw_bch_layoutselect);
1034
1035
1036 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1037
1038
1039 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1040 nand_info->hooked_read_oob = mtd->_read_oob;
1041 mtd->_read_oob = mxs_nand_hook_read_oob;
1042 }
1043
1044 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1045 nand_info->hooked_write_oob = mtd->_write_oob;
1046 mtd->_write_oob = mxs_nand_hook_write_oob;
1047 }
1048
1049 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1050 nand_info->hooked_block_markbad = mtd->_block_markbad;
1051 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1052 }
1053
1054 return 0;
1055}
1056
1057
1058
1059
1060int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1061{
1062 uint8_t *buf;
1063 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1064
1065 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1066
1067
1068 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1069 if (!buf) {
1070 printf("MXS NAND: Error allocating DMA buffers\n");
1071 return -ENOMEM;
1072 }
1073
1074 memset(buf, 0, nand_info->data_buf_size);
1075
1076 nand_info->data_buf = buf;
1077 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1078
1079 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1080 MXS_NAND_COMMAND_BUFFER_SIZE);
1081 if (!nand_info->cmd_buf) {
1082 free(buf);
1083 printf("MXS NAND: Error allocating command buffers\n");
1084 return -ENOMEM;
1085 }
1086 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1087 nand_info->cmd_queue_len = 0;
1088
1089 return 0;
1090}
1091
1092
1093
1094
1095static int mxs_nand_init_dma(struct mxs_nand_info *info)
1096{
1097 int i = 0, j, ret = 0;
1098
1099 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1100 MXS_NAND_DMA_DESCRIPTOR_COUNT);
1101 if (!info->desc) {
1102 ret = -ENOMEM;
1103 goto err1;
1104 }
1105
1106
1107 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1108 info->desc[i] = mxs_dma_desc_alloc();
1109 if (!info->desc[i]) {
1110 ret = -ENOMEM;
1111 goto err2;
1112 }
1113 }
1114
1115
1116 mxs_dma_init();
1117 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1118 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1119 ret = mxs_dma_init_channel(j);
1120 if (ret)
1121 goto err3;
1122 }
1123
1124
1125 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1126 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1127
1128
1129
1130
1131
1132 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1133 GPMI_CTRL1_GPMI_MODE,
1134 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1135 GPMI_CTRL1_BCH_MODE);
1136
1137 return 0;
1138
1139err3:
1140 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1141 mxs_dma_release(j);
1142err2:
1143 for (--i; i >= 0; i--)
1144 mxs_dma_desc_free(info->desc[i]);
1145 free(info->desc);
1146err1:
1147 if (ret == -ENOMEM)
1148 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1149 return ret;
1150}
1151
1152int mxs_nand_init_spl(struct nand_chip *nand)
1153{
1154 struct mxs_nand_info *nand_info;
1155 int err;
1156
1157 nand_info = malloc(sizeof(struct mxs_nand_info));
1158 if (!nand_info) {
1159 printf("MXS NAND: Failed to allocate private data\n");
1160 return -ENOMEM;
1161 }
1162 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1163
1164 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1165 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1166
1167 if (is_mx6sx() || is_mx7())
1168 nand_info->max_ecc_strength_supported = 62;
1169 else
1170 nand_info->max_ecc_strength_supported = 40;
1171
1172 err = mxs_nand_alloc_buffers(nand_info);
1173 if (err)
1174 return err;
1175
1176 err = mxs_nand_init_dma(nand_info);
1177 if (err)
1178 return err;
1179
1180 nand_set_controller_data(nand, nand_info);
1181
1182 nand->options |= NAND_NO_SUBPAGE_WRITE;
1183
1184 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1185 nand->dev_ready = mxs_nand_device_ready;
1186 nand->select_chip = mxs_nand_select_chip;
1187
1188 nand->read_byte = mxs_nand_read_byte;
1189 nand->read_buf = mxs_nand_read_buf;
1190
1191 nand->ecc.read_page = mxs_nand_ecc_read_page;
1192
1193 nand->ecc.mode = NAND_ECC_HW;
1194
1195 return 0;
1196}
1197
1198int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1199{
1200 struct mtd_info *mtd;
1201 struct nand_chip *nand;
1202 int err;
1203
1204 nand = &nand_info->chip;
1205 mtd = nand_to_mtd(nand);
1206 err = mxs_nand_alloc_buffers(nand_info);
1207 if (err)
1208 return err;
1209
1210 err = mxs_nand_init_dma(nand_info);
1211 if (err)
1212 goto err_free_buffers;
1213
1214 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1215
1216#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1217 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1218#endif
1219
1220 nand_set_controller_data(nand, nand_info);
1221 nand->options |= NAND_NO_SUBPAGE_WRITE;
1222
1223 if (nand_info->dev)
1224 nand->flash_node = dev_of_offset(nand_info->dev);
1225
1226 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1227
1228 nand->dev_ready = mxs_nand_device_ready;
1229 nand->select_chip = mxs_nand_select_chip;
1230 nand->block_bad = mxs_nand_block_bad;
1231
1232 nand->read_byte = mxs_nand_read_byte;
1233
1234 nand->read_buf = mxs_nand_read_buf;
1235 nand->write_buf = mxs_nand_write_buf;
1236
1237
1238 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1239 goto err_free_buffers;
1240
1241 if (mxs_nand_setup_ecc(mtd))
1242 goto err_free_buffers;
1243
1244 nand->ecc.read_page = mxs_nand_ecc_read_page;
1245 nand->ecc.write_page = mxs_nand_ecc_write_page;
1246 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1247 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1248
1249 nand->ecc.layout = &fake_ecc_layout;
1250 nand->ecc.mode = NAND_ECC_HW;
1251 nand->ecc.size = nand_info->bch_geometry.ecc_chunk_size;
1252 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
1253
1254
1255 err = nand_scan_tail(mtd);
1256 if (err)
1257 goto err_free_buffers;
1258
1259 err = nand_register(0, mtd);
1260 if (err)
1261 goto err_free_buffers;
1262
1263 return 0;
1264
1265err_free_buffers:
1266 free(nand_info->data_buf);
1267 free(nand_info->cmd_buf);
1268
1269 return err;
1270}
1271
1272#ifndef CONFIG_NAND_MXS_DT
1273void board_nand_init(void)
1274{
1275 struct mxs_nand_info *nand_info;
1276
1277 nand_info = malloc(sizeof(struct mxs_nand_info));
1278 if (!nand_info) {
1279 printf("MXS NAND: Failed to allocate private data\n");
1280 return;
1281 }
1282 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1283
1284 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1285 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1286
1287
1288 if (is_mx6sx() || is_mx7())
1289 nand_info->max_ecc_strength_supported = 62;
1290 else
1291 nand_info->max_ecc_strength_supported = 40;
1292
1293#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1294 nand_info->use_minimum_ecc = true;
1295#endif
1296
1297 if (mxs_nand_init_ctrl(nand_info) < 0)
1298 goto err;
1299
1300 return;
1301
1302err:
1303 free(nand_info);
1304}
1305#endif
1306