1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <common.h>
17#include <cpu_func.h>
18#include <dm.h>
19#include <dm/device_compat.h>
20#include <malloc.h>
21#include <mxs_nand.h>
22#include <asm/arch/clock.h>
23#include <asm/arch/imx-regs.h>
24#include <asm/arch/sys_proto.h>
25#include <asm/cache.h>
26#include <asm/io.h>
27#include <asm/mach-imx/regs-bch.h>
28#include <asm/mach-imx/regs-gpmi.h>
29#include <linux/errno.h>
30#include <linux/mtd/rawnand.h>
31#include <linux/sizes.h>
32#include <linux/types.h>
33
34#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
35
36#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
37 defined(CONFIG_IMX8M)
38#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
39#else
40#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
41#endif
42#define MXS_NAND_METADATA_SIZE 10
43#define MXS_NAND_BITS_PER_ECC_LEVEL 13
44
45#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
46#define MXS_NAND_COMMAND_BUFFER_SIZE 32
47#else
48#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
49#endif
50
51#define MXS_NAND_BCH_TIMEOUT 10000
52
53struct nand_ecclayout fake_ecc_layout;
54
55
56
57
58#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
59static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
60{
61 uint32_t addr = (uintptr_t)info->data_buf;
62
63 flush_dcache_range(addr, addr + info->data_buf_size);
64}
65
66static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
67{
68 uint32_t addr = (uintptr_t)info->data_buf;
69
70 invalidate_dcache_range(addr, addr + info->data_buf_size);
71}
72
73static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
74{
75 uint32_t addr = (uintptr_t)info->cmd_buf;
76
77 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
78}
79#else
80static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
81static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
82static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
83#endif
84
85static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
86{
87 struct mxs_dma_desc *desc;
88
89 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
90 printf("MXS NAND: Too many DMA descriptors requested\n");
91 return NULL;
92 }
93
94 desc = info->desc[info->desc_index];
95 info->desc_index++;
96
97 return desc;
98}
99
100static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
101{
102 int i;
103 struct mxs_dma_desc *desc;
104
105 for (i = 0; i < info->desc_index; i++) {
106 desc = info->desc[i];
107 memset(desc, 0, sizeof(struct mxs_dma_desc));
108 desc->address = (dma_addr_t)desc;
109 }
110
111 info->desc_index = 0;
112}
113
114static uint32_t mxs_nand_aux_status_offset(void)
115{
116 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
117}
118
119static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
120 struct mtd_info *mtd,
121 unsigned int *chunk_num)
122{
123 unsigned int i, j;
124
125 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
126 dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
127 return false;
128 }
129
130 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
131 (geo->gf_len * geo->ecc_strength +
132 geo->ecc_chunkn_size * 8);
133
134 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
135 (geo->gf_len * geo->ecc_strength +
136 geo->ecc_chunkn_size * 8) * i;
137
138 if (j < geo->ecc_chunkn_size * 8) {
139 *chunk_num = i + 1;
140 dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
141 geo->ecc_strength, *chunk_num);
142 return true;
143 }
144
145 return false;
146}
147
148static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
149 struct mtd_info *mtd,
150 unsigned int ecc_strength,
151 unsigned int ecc_step)
152{
153 struct nand_chip *chip = mtd_to_nand(mtd);
154 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
155 unsigned int block_mark_bit_offset;
156
157 switch (ecc_step) {
158 case SZ_512:
159 geo->gf_len = 13;
160 break;
161 case SZ_1K:
162 geo->gf_len = 14;
163 break;
164 default:
165 return -EINVAL;
166 }
167
168 geo->ecc_chunk0_size = ecc_step;
169 geo->ecc_chunkn_size = ecc_step;
170 geo->ecc_strength = round_up(ecc_strength, 2);
171
172
173 if (geo->ecc_chunkn_size < mtd->oobsize)
174 return -EINVAL;
175
176 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
177 return -EINVAL;
178
179 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
180
181
182 block_mark_bit_offset = mtd->writesize * 8 -
183 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
184 + MXS_NAND_METADATA_SIZE * 8);
185
186 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
187 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
188
189 return 0;
190}
191
192static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
193 struct mtd_info *mtd)
194{
195 struct nand_chip *chip = mtd_to_nand(mtd);
196 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
197 unsigned int block_mark_bit_offset;
198
199
200 geo->gf_len = 13;
201
202
203 geo->ecc_chunk0_size = 512;
204 geo->ecc_chunkn_size = 512;
205
206 if (geo->ecc_chunkn_size < mtd->oobsize) {
207 geo->gf_len = 14;
208 geo->ecc_chunk0_size *= 2;
209 geo->ecc_chunkn_size *= 2;
210 }
211
212 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
213
214
215
216
217
218
219
220
221
222 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
223 / (geo->gf_len * geo->ecc_chunk_count);
224
225 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
226 nand_info->max_ecc_strength_supported);
227
228 block_mark_bit_offset = mtd->writesize * 8 -
229 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
230 + MXS_NAND_METADATA_SIZE * 8);
231
232 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
233 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
234
235 return 0;
236}
237
238static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
239 struct mtd_info *mtd)
240{
241 struct nand_chip *chip = mtd_to_nand(mtd);
242 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
243 unsigned int block_mark_bit_offset;
244 unsigned int max_ecc;
245 unsigned int bbm_chunk;
246 unsigned int i;
247
248
249 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
250 return -EINVAL;
251 geo->ecc_strength = chip->ecc_strength_ds;
252
253
254 geo->gf_len = 14;
255 geo->ecc_chunk0_size = 1024;
256 geo->ecc_chunkn_size = 1024;
257 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
258 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
259 / (geo->gf_len * geo->ecc_chunk_count);
260 max_ecc = min(round_down(max_ecc, 2),
261 nand_info->max_ecc_strength_supported);
262
263
264
265
266 geo->ecc_strength = chip->ecc_strength_ds;
267 while (!(geo->ecc_strength > max_ecc)) {
268 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
269 break;
270 geo->ecc_strength += 2;
271 }
272
273
274
275 if (geo->ecc_strength > max_ecc) {
276 geo->ecc_strength = chip->ecc_strength_ds;
277
278 geo->ecc_chunk0_size = 0;
279 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
280 geo->ecc_for_meta = 1;
281
282 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
283 geo->gf_len * geo->ecc_strength
284 * geo->ecc_chunk_count) {
285 printf("unsupported NAND chip with new layout\n");
286 return -EINVAL;
287 }
288
289
290 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
291 geo->gf_len * geo->ecc_strength) /
292 (geo->gf_len * geo->ecc_strength +
293 geo->ecc_chunkn_size * 8) + 1;
294 }
295
296
297 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
298
299 block_mark_bit_offset = mtd->writesize * 8 -
300 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
301 + MXS_NAND_METADATA_SIZE * 8);
302
303 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
304 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
305
306 return 0;
307}
308
309
310
311
312static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
313{
314 int timeout = MXS_NAND_BCH_TIMEOUT;
315 int ret;
316
317 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
318 BCH_CTRL_COMPLETE_IRQ, timeout);
319
320 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
321
322 return ret;
323}
324
325
326
327
328
329
330
331
332
333
334
335static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
336{
337 struct nand_chip *nand = mtd_to_nand(mtd);
338 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
339 struct mxs_dma_desc *d;
340 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
341 int ret;
342
343
344
345
346
347 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
348 printf("MXS NAND: Command queue too long\n");
349 return;
350 }
351
352
353
354
355
356
357
358
359
360
361
362
363 if (ctrl & (NAND_ALE | NAND_CLE)) {
364 if (data != NAND_CMD_NONE)
365 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
366 return;
367 }
368
369
370
371
372
373
374 if (nand_info->cmd_queue_len == 0)
375 return;
376
377
378 d = mxs_nand_get_dma_desc(nand_info);
379 d->cmd.data =
380 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
381 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
382 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
383 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
384
385 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
386
387 d->cmd.pio_words[0] =
388 GPMI_CTRL0_COMMAND_MODE_WRITE |
389 GPMI_CTRL0_WORD_LENGTH |
390 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
391 GPMI_CTRL0_ADDRESS_NAND_CLE |
392 GPMI_CTRL0_ADDRESS_INCREMENT |
393 nand_info->cmd_queue_len;
394
395 mxs_dma_desc_append(channel, d);
396
397
398 mxs_nand_flush_cmd_buf(nand_info);
399
400
401 ret = mxs_dma_go(channel);
402 if (ret)
403 printf("MXS NAND: Error sending command\n");
404
405 mxs_nand_return_dma_descs(nand_info);
406
407
408 nand_info->cmd_queue_len = 0;
409}
410
411
412
413
414static int mxs_nand_device_ready(struct mtd_info *mtd)
415{
416 struct nand_chip *chip = mtd_to_nand(mtd);
417 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
418 uint32_t tmp;
419
420 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
421 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
422
423 return tmp & 1;
424}
425
426
427
428
429static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
430{
431 struct nand_chip *nand = mtd_to_nand(mtd);
432 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
433
434 nand_info->cur_chip = chip;
435}
436
437
438
439
440
441
442
443
444static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
445 uint8_t *data_buf, uint8_t *oob_buf)
446{
447 uint32_t bit_offset = geo->block_mark_bit_offset;
448 uint32_t buf_offset = geo->block_mark_byte_offset;
449
450 uint32_t src;
451 uint32_t dst;
452
453
454
455
456
457
458
459 src = data_buf[buf_offset] >> bit_offset;
460 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
461
462 dst = oob_buf[0];
463
464 oob_buf[0] = src;
465
466 data_buf[buf_offset] &= ~(0xff << bit_offset);
467 data_buf[buf_offset + 1] &= 0xff << bit_offset;
468
469 data_buf[buf_offset] |= dst << bit_offset;
470 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
471}
472
473
474
475
476static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
477{
478 struct nand_chip *nand = mtd_to_nand(mtd);
479 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
480 struct mxs_dma_desc *d;
481 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
482 int ret;
483
484 if (length > NAND_MAX_PAGESIZE) {
485 printf("MXS NAND: DMA buffer too big\n");
486 return;
487 }
488
489 if (!buf) {
490 printf("MXS NAND: DMA buffer is NULL\n");
491 return;
492 }
493
494
495 d = mxs_nand_get_dma_desc(nand_info);
496 d->cmd.data =
497 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
498 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
499 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
500 (length << MXS_DMA_DESC_BYTES_OFFSET);
501
502 d->cmd.address = (dma_addr_t)nand_info->data_buf;
503
504 d->cmd.pio_words[0] =
505 GPMI_CTRL0_COMMAND_MODE_READ |
506 GPMI_CTRL0_WORD_LENGTH |
507 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
508 GPMI_CTRL0_ADDRESS_NAND_DATA |
509 length;
510
511 mxs_dma_desc_append(channel, d);
512
513
514
515
516
517
518
519
520
521 d = mxs_nand_get_dma_desc(nand_info);
522 d->cmd.data =
523 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
524 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
525 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
526
527 d->cmd.address = 0;
528
529 d->cmd.pio_words[0] =
530 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
531 GPMI_CTRL0_WORD_LENGTH |
532 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
533 GPMI_CTRL0_ADDRESS_NAND_DATA;
534
535 mxs_dma_desc_append(channel, d);
536
537
538 mxs_nand_inval_data_buf(nand_info);
539
540
541 ret = mxs_dma_go(channel);
542 if (ret) {
543 printf("MXS NAND: DMA read error\n");
544 goto rtn;
545 }
546
547
548 mxs_nand_inval_data_buf(nand_info);
549
550 memcpy(buf, nand_info->data_buf, length);
551
552rtn:
553 mxs_nand_return_dma_descs(nand_info);
554}
555
556
557
558
559static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
560 int length)
561{
562 struct nand_chip *nand = mtd_to_nand(mtd);
563 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
564 struct mxs_dma_desc *d;
565 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
566 int ret;
567
568 if (length > NAND_MAX_PAGESIZE) {
569 printf("MXS NAND: DMA buffer too big\n");
570 return;
571 }
572
573 if (!buf) {
574 printf("MXS NAND: DMA buffer is NULL\n");
575 return;
576 }
577
578 memcpy(nand_info->data_buf, buf, length);
579
580
581 d = mxs_nand_get_dma_desc(nand_info);
582 d->cmd.data =
583 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
584 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
585 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
586 (length << MXS_DMA_DESC_BYTES_OFFSET);
587
588 d->cmd.address = (dma_addr_t)nand_info->data_buf;
589
590 d->cmd.pio_words[0] =
591 GPMI_CTRL0_COMMAND_MODE_WRITE |
592 GPMI_CTRL0_WORD_LENGTH |
593 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
594 GPMI_CTRL0_ADDRESS_NAND_DATA |
595 length;
596
597 mxs_dma_desc_append(channel, d);
598
599
600 mxs_nand_flush_data_buf(nand_info);
601
602
603 ret = mxs_dma_go(channel);
604 if (ret)
605 printf("MXS NAND: DMA write error\n");
606
607 mxs_nand_return_dma_descs(nand_info);
608}
609
610
611
612
613static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
614{
615 uint8_t buf;
616 mxs_nand_read_buf(mtd, &buf, 1);
617 return buf;
618}
619
620static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
621 u8 *buf, int chunk, int page)
622{
623 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
624 struct bch_geometry *geo = &nand_info->bch_geometry;
625 unsigned int flip_bits = 0, flip_bits_noecc = 0;
626 unsigned int threshold;
627 unsigned int base = geo->ecc_chunkn_size * chunk;
628 u32 *dma_buf = (u32 *)buf;
629 int i;
630
631 threshold = geo->gf_len / 2;
632 if (threshold > geo->ecc_strength)
633 threshold = geo->ecc_strength;
634
635 for (i = 0; i < geo->ecc_chunkn_size; i++) {
636 flip_bits += hweight8(~buf[base + i]);
637 if (flip_bits > threshold)
638 return false;
639 }
640
641 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
642 nand->read_buf(mtd, buf, mtd->writesize);
643
644 for (i = 0; i < mtd->writesize / 4; i++) {
645 flip_bits_noecc += hweight32(~dma_buf[i]);
646 if (flip_bits_noecc > threshold)
647 return false;
648 }
649
650 mtd->ecc_stats.corrected += flip_bits;
651
652 memset(buf, 0xff, mtd->writesize);
653
654 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
655
656 return true;
657}
658
659
660
661
662static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
663 uint8_t *buf, int oob_required,
664 int page)
665{
666 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
667 struct bch_geometry *geo = &nand_info->bch_geometry;
668 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
669 struct mxs_dma_desc *d;
670 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
671 uint32_t corrected = 0, failed = 0;
672 uint8_t *status;
673 int i, ret;
674 int flag = 0;
675
676
677 d = mxs_nand_get_dma_desc(nand_info);
678 d->cmd.data =
679 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
680 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
681 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
682
683 d->cmd.address = 0;
684
685 d->cmd.pio_words[0] =
686 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
687 GPMI_CTRL0_WORD_LENGTH |
688 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
689 GPMI_CTRL0_ADDRESS_NAND_DATA;
690
691 mxs_dma_desc_append(channel, d);
692
693
694 d = mxs_nand_get_dma_desc(nand_info);
695 d->cmd.data =
696 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
697 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
698
699 d->cmd.address = 0;
700
701 d->cmd.pio_words[0] =
702 GPMI_CTRL0_COMMAND_MODE_READ |
703 GPMI_CTRL0_WORD_LENGTH |
704 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
705 GPMI_CTRL0_ADDRESS_NAND_DATA |
706 (mtd->writesize + mtd->oobsize);
707 d->cmd.pio_words[1] = 0;
708 d->cmd.pio_words[2] =
709 GPMI_ECCCTRL_ENABLE_ECC |
710 GPMI_ECCCTRL_ECC_CMD_DECODE |
711 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
712 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
713 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
714 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
715
716 if (nand_info->en_randomizer) {
717 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
718 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
719 d->cmd.pio_words[3] |= (page % 256) << 16;
720 }
721
722 mxs_dma_desc_append(channel, d);
723
724
725 d = mxs_nand_get_dma_desc(nand_info);
726 d->cmd.data =
727 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
728 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
729 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
730
731 d->cmd.address = 0;
732
733 d->cmd.pio_words[0] =
734 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
735 GPMI_CTRL0_WORD_LENGTH |
736 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
737 GPMI_CTRL0_ADDRESS_NAND_DATA |
738 (mtd->writesize + mtd->oobsize);
739 d->cmd.pio_words[1] = 0;
740 d->cmd.pio_words[2] = 0;
741
742 mxs_dma_desc_append(channel, d);
743
744
745 d = mxs_nand_get_dma_desc(nand_info);
746 d->cmd.data =
747 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
748 MXS_DMA_DESC_DEC_SEM;
749
750 d->cmd.address = 0;
751
752 mxs_dma_desc_append(channel, d);
753
754
755 mxs_nand_inval_data_buf(nand_info);
756
757
758 ret = mxs_dma_go(channel);
759 if (ret) {
760 printf("MXS NAND: DMA read error\n");
761 goto rtn;
762 }
763
764 ret = mxs_nand_wait_for_bch_complete(nand_info);
765 if (ret) {
766 printf("MXS NAND: BCH read timeout\n");
767 goto rtn;
768 }
769
770 mxs_nand_return_dma_descs(nand_info);
771
772
773 mxs_nand_inval_data_buf(nand_info);
774
775
776 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
777
778
779 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
780 for (i = 0; i < geo->ecc_chunk_count; i++) {
781 if (status[i] == 0x00)
782 continue;
783
784 if (status[i] == 0xff) {
785 if (!nand_info->en_randomizer &&
786 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
787 is_imx8() || is_imx8m()))
788 if (readl(&bch_regs->hw_bch_debug1))
789 flag = 1;
790 continue;
791 }
792
793 if (status[i] == 0xfe) {
794 if (mxs_nand_erased_page(mtd, nand,
795 nand_info->data_buf, i, page))
796 break;
797 failed++;
798 continue;
799 }
800
801 corrected += status[i];
802 }
803
804
805 mtd->ecc_stats.failed += failed;
806 mtd->ecc_stats.corrected += corrected;
807
808
809
810
811
812
813
814
815
816
817 memset(nand->oob_poi, 0xff, mtd->oobsize);
818
819 nand->oob_poi[0] = nand_info->oob_buf[0];
820
821 memcpy(buf, nand_info->data_buf, mtd->writesize);
822
823 if (flag)
824 memset(buf, 0xff, mtd->writesize);
825rtn:
826 mxs_nand_return_dma_descs(nand_info);
827
828 return ret;
829}
830
831
832
833
834static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
835 struct nand_chip *nand, const uint8_t *buf,
836 int oob_required, int page)
837{
838 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
839 struct bch_geometry *geo = &nand_info->bch_geometry;
840 struct mxs_dma_desc *d;
841 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
842 int ret;
843
844 memcpy(nand_info->data_buf, buf, mtd->writesize);
845 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
846
847
848 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
849
850
851 d = mxs_nand_get_dma_desc(nand_info);
852 d->cmd.data =
853 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
854 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
855 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
856
857 d->cmd.address = 0;
858
859 d->cmd.pio_words[0] =
860 GPMI_CTRL0_COMMAND_MODE_WRITE |
861 GPMI_CTRL0_WORD_LENGTH |
862 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
863 GPMI_CTRL0_ADDRESS_NAND_DATA;
864 d->cmd.pio_words[1] = 0;
865 d->cmd.pio_words[2] =
866 GPMI_ECCCTRL_ENABLE_ECC |
867 GPMI_ECCCTRL_ECC_CMD_ENCODE |
868 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
869 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
870 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
871 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
872
873 if (nand_info->en_randomizer) {
874 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
875 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
876
877
878
879
880
881
882
883 d->cmd.pio_words[3] |= (page % 256) << 16;
884 }
885
886 mxs_dma_desc_append(channel, d);
887
888
889 mxs_nand_flush_data_buf(nand_info);
890
891
892 ret = mxs_dma_go(channel);
893 if (ret) {
894 printf("MXS NAND: DMA write error\n");
895 goto rtn;
896 }
897
898 ret = mxs_nand_wait_for_bch_complete(nand_info);
899 if (ret) {
900 printf("MXS NAND: BCH write timeout\n");
901 goto rtn;
902 }
903
904rtn:
905 mxs_nand_return_dma_descs(nand_info);
906 return 0;
907}
908
909
910
911
912
913
914
915static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
916 struct mtd_oob_ops *ops)
917{
918 struct nand_chip *chip = mtd_to_nand(mtd);
919 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
920 int ret;
921
922 if (ops->mode == MTD_OPS_RAW)
923 nand_info->raw_oob_mode = 1;
924 else
925 nand_info->raw_oob_mode = 0;
926
927 ret = nand_info->hooked_read_oob(mtd, from, ops);
928
929 nand_info->raw_oob_mode = 0;
930
931 return ret;
932}
933
934
935
936
937
938
939
940static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
941 struct mtd_oob_ops *ops)
942{
943 struct nand_chip *chip = mtd_to_nand(mtd);
944 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
945 int ret;
946
947 if (ops->mode == MTD_OPS_RAW)
948 nand_info->raw_oob_mode = 1;
949 else
950 nand_info->raw_oob_mode = 0;
951
952 ret = nand_info->hooked_write_oob(mtd, to, ops);
953
954 nand_info->raw_oob_mode = 0;
955
956 return ret;
957}
958
959
960
961
962
963
964
965static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
966{
967 struct nand_chip *chip = mtd_to_nand(mtd);
968 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
969 int ret;
970
971 nand_info->marking_block_bad = 1;
972
973 ret = nand_info->hooked_block_markbad(mtd, ofs);
974
975 nand_info->marking_block_bad = 0;
976
977 return ret;
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
1025 int page)
1026{
1027 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1028
1029
1030
1031
1032
1033
1034 if (nand_info->raw_oob_mode) {
1035
1036
1037
1038
1039 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1040 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1041 } else {
1042
1043
1044
1045
1046 memset(nand->oob_poi, 0xff, mtd->oobsize);
1047
1048 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1049 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1050 }
1051
1052 return 0;
1053
1054}
1055
1056
1057
1058
1059static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1060 int page)
1061{
1062 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1063 uint8_t block_mark = 0;
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 if (!nand_info->marking_block_bad) {
1075 printf("NXS NAND: Writing OOB isn't supported\n");
1076 return -EIO;
1077 }
1078
1079
1080 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1081 nand->write_buf(mtd, &block_mark, 1);
1082 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1083
1084
1085 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1086 return -EIO;
1087
1088 return 0;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1105{
1106 return 0;
1107}
1108
1109static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1110{
1111 struct nand_chip *chip = mtd_to_nand(mtd);
1112 struct nand_chip *nand = mtd_to_nand(mtd);
1113 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1114
1115 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1116 printf("unsupported NAND chip, minimum ecc required %d\n"
1117 , chip->ecc_strength_ds);
1118 return -EINVAL;
1119 }
1120
1121 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1122 mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) {
1123 dev_warn(mtd->dev, "use legacy bch geometry\n");
1124 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1125 }
1126
1127 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1128 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1129
1130 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
1131 chip->ecc_strength_ds, chip->ecc_step_ds);
1132
1133 return 0;
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145int mxs_nand_setup_ecc(struct mtd_info *mtd)
1146{
1147 struct nand_chip *nand = mtd_to_nand(mtd);
1148 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1149 struct bch_geometry *geo = &nand_info->bch_geometry;
1150 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1151 uint32_t tmp;
1152 int ret;
1153
1154 nand_info->en_randomizer = 0;
1155 nand_info->oobsize = mtd->oobsize;
1156 nand_info->writesize = mtd->writesize;
1157
1158 ret = mxs_nand_set_geometry(mtd, geo);
1159 if (ret)
1160 return ret;
1161
1162
1163 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1164
1165
1166 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1167 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1168 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1169 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1170 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1171 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1172 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1173 nand_info->bch_flash0layout0 = tmp;
1174
1175 tmp = (mtd->writesize + mtd->oobsize)
1176 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1177 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1178 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1179 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1180 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1181 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1182 nand_info->bch_flash0layout1 = tmp;
1183
1184
1185 if (is_mx6dqp() || is_mx7() ||
1186 is_mx6ul() || is_imx8() || is_imx8m())
1187 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1188 &bch_regs->hw_bch_mode);
1189
1190
1191 writel(0, &bch_regs->hw_bch_layoutselect);
1192
1193
1194 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1195
1196
1197 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1198 nand_info->hooked_read_oob = mtd->_read_oob;
1199 mtd->_read_oob = mxs_nand_hook_read_oob;
1200 }
1201
1202 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1203 nand_info->hooked_write_oob = mtd->_write_oob;
1204 mtd->_write_oob = mxs_nand_hook_write_oob;
1205 }
1206
1207 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1208 nand_info->hooked_block_markbad = mtd->_block_markbad;
1209 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1210 }
1211
1212 return 0;
1213}
1214
1215
1216
1217
1218int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1219{
1220 uint8_t *buf;
1221 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1222
1223 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1224
1225
1226 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1227 if (!buf) {
1228 printf("MXS NAND: Error allocating DMA buffers\n");
1229 return -ENOMEM;
1230 }
1231
1232 memset(buf, 0, nand_info->data_buf_size);
1233
1234 nand_info->data_buf = buf;
1235 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1236
1237 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1238 MXS_NAND_COMMAND_BUFFER_SIZE);
1239 if (!nand_info->cmd_buf) {
1240 free(buf);
1241 printf("MXS NAND: Error allocating command buffers\n");
1242 return -ENOMEM;
1243 }
1244 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1245 nand_info->cmd_queue_len = 0;
1246
1247 return 0;
1248}
1249
1250
1251
1252
1253static int mxs_nand_init_dma(struct mxs_nand_info *info)
1254{
1255 int i = 0, j, ret = 0;
1256
1257 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1258 MXS_NAND_DMA_DESCRIPTOR_COUNT);
1259 if (!info->desc) {
1260 ret = -ENOMEM;
1261 goto err1;
1262 }
1263
1264
1265 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1266 info->desc[i] = mxs_dma_desc_alloc();
1267 if (!info->desc[i]) {
1268 ret = -ENOMEM;
1269 goto err2;
1270 }
1271 }
1272
1273
1274 mxs_dma_init();
1275 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1276 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1277 ret = mxs_dma_init_channel(j);
1278 if (ret)
1279 goto err3;
1280 }
1281
1282
1283 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1284 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1285
1286
1287
1288
1289
1290 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1291 GPMI_CTRL1_GPMI_MODE,
1292 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1293 GPMI_CTRL1_BCH_MODE);
1294
1295 return 0;
1296
1297err3:
1298 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1299 mxs_dma_release(j);
1300err2:
1301 for (--i; i >= 0; i--)
1302 mxs_dma_desc_free(info->desc[i]);
1303 free(info->desc);
1304err1:
1305 if (ret == -ENOMEM)
1306 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1307 return ret;
1308}
1309
1310int mxs_nand_init_spl(struct nand_chip *nand)
1311{
1312 struct mxs_nand_info *nand_info;
1313 int err;
1314
1315 nand_info = malloc(sizeof(struct mxs_nand_info));
1316 if (!nand_info) {
1317 printf("MXS NAND: Failed to allocate private data\n");
1318 return -ENOMEM;
1319 }
1320 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1321
1322 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1323 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1324
1325 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
1326 nand_info->max_ecc_strength_supported = 62;
1327 else
1328 nand_info->max_ecc_strength_supported = 40;
1329
1330 err = mxs_nand_alloc_buffers(nand_info);
1331 if (err)
1332 return err;
1333
1334 err = mxs_nand_init_dma(nand_info);
1335 if (err)
1336 return err;
1337
1338 nand_set_controller_data(nand, nand_info);
1339
1340 nand->options |= NAND_NO_SUBPAGE_WRITE;
1341
1342 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1343 nand->dev_ready = mxs_nand_device_ready;
1344 nand->select_chip = mxs_nand_select_chip;
1345
1346 nand->read_byte = mxs_nand_read_byte;
1347 nand->read_buf = mxs_nand_read_buf;
1348
1349 nand->ecc.read_page = mxs_nand_ecc_read_page;
1350
1351 nand->ecc.mode = NAND_ECC_HW;
1352
1353 return 0;
1354}
1355
1356int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1357{
1358 struct mtd_info *mtd;
1359 struct nand_chip *nand;
1360 int err;
1361
1362 nand = &nand_info->chip;
1363 mtd = nand_to_mtd(nand);
1364 err = mxs_nand_alloc_buffers(nand_info);
1365 if (err)
1366 return err;
1367
1368 err = mxs_nand_init_dma(nand_info);
1369 if (err)
1370 goto err_free_buffers;
1371
1372 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1373
1374#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1375 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1376#endif
1377
1378 nand_set_controller_data(nand, nand_info);
1379 nand->options |= NAND_NO_SUBPAGE_WRITE;
1380
1381 if (nand_info->dev)
1382 nand->flash_node = dev_ofnode(nand_info->dev);
1383
1384 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1385
1386 nand->dev_ready = mxs_nand_device_ready;
1387 nand->select_chip = mxs_nand_select_chip;
1388 nand->block_bad = mxs_nand_block_bad;
1389
1390 nand->read_byte = mxs_nand_read_byte;
1391
1392 nand->read_buf = mxs_nand_read_buf;
1393 nand->write_buf = mxs_nand_write_buf;
1394
1395
1396 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1397 goto err_free_buffers;
1398
1399 if (mxs_nand_setup_ecc(mtd))
1400 goto err_free_buffers;
1401
1402 nand->ecc.read_page = mxs_nand_ecc_read_page;
1403 nand->ecc.write_page = mxs_nand_ecc_write_page;
1404 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1405 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1406
1407 nand->ecc.layout = &fake_ecc_layout;
1408 nand->ecc.mode = NAND_ECC_HW;
1409 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
1410 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
1411
1412
1413 err = nand_scan_tail(mtd);
1414 if (err)
1415 goto err_free_buffers;
1416
1417 err = nand_register(0, mtd);
1418 if (err)
1419 goto err_free_buffers;
1420
1421 return 0;
1422
1423err_free_buffers:
1424 free(nand_info->data_buf);
1425 free(nand_info->cmd_buf);
1426
1427 return err;
1428}
1429
1430#ifndef CONFIG_NAND_MXS_DT
1431void board_nand_init(void)
1432{
1433 struct mxs_nand_info *nand_info;
1434
1435 nand_info = malloc(sizeof(struct mxs_nand_info));
1436 if (!nand_info) {
1437 printf("MXS NAND: Failed to allocate private data\n");
1438 return;
1439 }
1440 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1441
1442 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1443 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1444
1445
1446 if (is_mx6sx() || is_mx7())
1447 nand_info->max_ecc_strength_supported = 62;
1448 else
1449 nand_info->max_ecc_strength_supported = 40;
1450
1451#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1452 nand_info->use_minimum_ecc = true;
1453#endif
1454
1455 if (mxs_nand_init_ctrl(nand_info) < 0)
1456 goto err;
1457
1458 return;
1459
1460err:
1461 free(nand_info);
1462}
1463#endif
1464
1465
1466
1467
1468void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1469{
1470 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1471 u32 tmp;
1472
1473 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1474 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1475 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1476 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1477 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1478
1479 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1480 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1481 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1482 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1483 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1484 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1485 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1486 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1487 BCH_FLASHLAYOUT1_ECCN_OFFSET;
1488 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1489 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1490}
1491
1492
1493
1494
1495void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
1496{
1497 u32 tmp;
1498 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1499 struct nand_chip *nand = mtd_to_nand(mtd);
1500 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1501
1502 nand_info->en_randomizer = 1;
1503
1504 mtd->writesize = 1024;
1505 mtd->oobsize = 1862 - 1024;
1506
1507
1508 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1509
1510 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1511
1512 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1513
1514 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1515 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1516 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1517
1518
1519 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1520
1521 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1522
1523 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1524 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1525 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1526}
1527
1528
1529
1530
1531void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1532{
1533 u32 tmp;
1534 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1535 struct nand_chip *nand = mtd_to_nand(mtd);
1536 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1537
1538
1539 nand_info->en_randomizer = 0;
1540
1541 mtd->writesize = 1024;
1542 mtd->oobsize = 1576 - 1024;
1543
1544
1545 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1546
1547 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1548
1549 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1550
1551 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1552 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1553 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1554
1555
1556 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1557
1558 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1559
1560 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1561 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1562 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1563}
1564
1565
1566
1567
1568void mxs_nand_mode_normal(struct mtd_info *mtd)
1569{
1570 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1571 struct nand_chip *nand = mtd_to_nand(mtd);
1572 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1573
1574 nand_info->en_randomizer = 0;
1575
1576 mtd->writesize = nand_info->writesize;
1577 mtd->oobsize = nand_info->oobsize;
1578
1579 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1580 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1581}
1582
1583uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1584{
1585 struct nand_chip *chip = mtd_to_nand(mtd);
1586 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1587 struct bch_geometry *geo = &nand_info->bch_geometry;
1588
1589 return geo->block_mark_byte_offset;
1590}
1591
1592uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1593{
1594 struct nand_chip *chip = mtd_to_nand(mtd);
1595 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1596 struct bch_geometry *geo = &nand_info->bch_geometry;
1597
1598 return geo->block_mark_bit_offset;
1599}
1600