1
2
3
4
5
6
7
8#include <linux/platform_device.h>
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/delay.h>
12#include <linux/gpio/consumer.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/jiffies.h>
16#include <linux/sched.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand-ecc-sw-bch.h>
19#include <linux/mtd/rawnand.h>
20#include <linux/mtd/partitions.h>
21#include <linux/omap-dma.h>
22#include <linux/io.h>
23#include <linux/slab.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26
27#include <linux/platform_data/elm.h>
28
29#include <linux/omap-gpmc.h>
30#include <linux/platform_data/mtd-nand-omap2.h>
31
32#define DRIVER_NAME "omap2-nand"
33#define OMAP_NAND_TIMEOUT_MS 5000
34
35#define NAND_Ecc_P1e (1 << 0)
36#define NAND_Ecc_P2e (1 << 1)
37#define NAND_Ecc_P4e (1 << 2)
38#define NAND_Ecc_P8e (1 << 3)
39#define NAND_Ecc_P16e (1 << 4)
40#define NAND_Ecc_P32e (1 << 5)
41#define NAND_Ecc_P64e (1 << 6)
42#define NAND_Ecc_P128e (1 << 7)
43#define NAND_Ecc_P256e (1 << 8)
44#define NAND_Ecc_P512e (1 << 9)
45#define NAND_Ecc_P1024e (1 << 10)
46#define NAND_Ecc_P2048e (1 << 11)
47
48#define NAND_Ecc_P1o (1 << 16)
49#define NAND_Ecc_P2o (1 << 17)
50#define NAND_Ecc_P4o (1 << 18)
51#define NAND_Ecc_P8o (1 << 19)
52#define NAND_Ecc_P16o (1 << 20)
53#define NAND_Ecc_P32o (1 << 21)
54#define NAND_Ecc_P64o (1 << 22)
55#define NAND_Ecc_P128o (1 << 23)
56#define NAND_Ecc_P256o (1 << 24)
57#define NAND_Ecc_P512o (1 << 25)
58#define NAND_Ecc_P1024o (1 << 26)
59#define NAND_Ecc_P2048o (1 << 27)
60
61#define TF(value) (value ? 1 : 0)
62
63#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
64#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
65#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
66#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
67#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
68#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
69#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
70#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
71
72#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
73#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
74#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
75#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
76#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
77#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
78#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
79#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
80
81#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
82#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
83#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
84#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
85#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
86#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
87#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
88#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
89
90#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
91#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
92#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
93#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
94#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
95#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
96#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
97#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
98
99#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
100#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
101
102#define PREFETCH_CONFIG1_CS_SHIFT 24
103#define ECC_CONFIG_CS_SHIFT 1
104#define CS_MASK 0x7
105#define ENABLE_PREFETCH (0x1 << 7)
106#define DMA_MPU_MODE_SHIFT 2
107#define ECCSIZE0_SHIFT 12
108#define ECCSIZE1_SHIFT 22
109#define ECC1RESULTSIZE 0x1
110#define ECCCLEAR 0x100
111#define ECC1 0x1
112#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
113#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
114#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
115#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
116#define STATUS_BUFF_EMPTY 0x00000001
117
118#define SECTOR_BYTES 512
119
120#define BCH4_BIT_PAD 4
121
122
123#define BCH_WRAPMODE_1 1
124#define BCH8R_ECC_SIZE0 0x1a
125#define BCH8R_ECC_SIZE1 0x2
126#define BCH4R_ECC_SIZE0 0xd
127#define BCH4R_ECC_SIZE1 0x3
128
129
130#define BCH_WRAPMODE_6 6
131#define BCH_ECC_SIZE0 0x0
132#define BCH_ECC_SIZE1 0x20
133
134#define BADBLOCK_MARKER_LENGTH 2
135
136static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
137 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
138 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
139 0x07, 0x0e};
140static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
141 0xac, 0x6b, 0xff, 0x99, 0x7b};
142static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
143
144struct omap_nand_info {
145 struct nand_chip nand;
146 struct platform_device *pdev;
147
148 int gpmc_cs;
149 bool dev_ready;
150 enum nand_io xfer_type;
151 int devsize;
152 enum omap_ecc ecc_opt;
153 struct device_node *elm_of_node;
154
155 unsigned long phys_base;
156 struct completion comp;
157 struct dma_chan *dma;
158 int gpmc_irq_fifo;
159 int gpmc_irq_count;
160 enum {
161 OMAP_NAND_IO_READ = 0,
162 OMAP_NAND_IO_WRITE,
163 } iomode;
164 u_char *buf;
165 int buf_len;
166
167 struct gpmc_nand_regs reg;
168 struct gpmc_nand_ops *ops;
169 bool flash_bbt;
170
171 struct device *elm_dev;
172
173 struct gpio_desc *ready_gpiod;
174};
175
176static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
177{
178 return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
179}
180
181
182
183
184
185
186
187
188
189
190static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
191 unsigned int u32_count, int is_write, struct omap_nand_info *info)
192{
193 u32 val;
194
195 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
196 return -1;
197
198 if (readl(info->reg.gpmc_prefetch_control))
199 return -EBUSY;
200
201
202 writel(u32_count, info->reg.gpmc_prefetch_config2);
203
204
205
206
207 val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
208 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
209 (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
210 writel(val, info->reg.gpmc_prefetch_config1);
211
212
213 writel(0x1, info->reg.gpmc_prefetch_control);
214
215 return 0;
216}
217
218
219
220
221static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
222{
223 u32 config1;
224
225
226 config1 = readl(info->reg.gpmc_prefetch_config1);
227 if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
228 return -EINVAL;
229
230
231 writel(0x0, info->reg.gpmc_prefetch_control);
232
233
234 writel(0x0, info->reg.gpmc_prefetch_config1);
235
236 return 0;
237}
238
239
240
241
242
243
244
245
246
247
248
249
250static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
251{
252 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
253
254 if (cmd != NAND_CMD_NONE) {
255 if (ctrl & NAND_CLE)
256 writeb(cmd, info->reg.gpmc_nand_command);
257
258 else if (ctrl & NAND_ALE)
259 writeb(cmd, info->reg.gpmc_nand_address);
260
261 else
262 writeb(cmd, info->reg.gpmc_nand_data);
263 }
264}
265
266
267
268
269
270
271
272static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
273{
274 struct nand_chip *nand = mtd_to_nand(mtd);
275
276 ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
277}
278
279
280
281
282
283
284
285static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
286{
287 struct omap_nand_info *info = mtd_to_omap(mtd);
288 u_char *p = (u_char *)buf;
289 bool status;
290
291 while (len--) {
292 iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
293
294 do {
295 status = info->ops->nand_writebuffer_empty();
296 } while (!status);
297 }
298}
299
300
301
302
303
304
305
306static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
307{
308 struct nand_chip *nand = mtd_to_nand(mtd);
309
310 ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
311}
312
313
314
315
316
317
318
319static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
320{
321 struct omap_nand_info *info = mtd_to_omap(mtd);
322 u16 *p = (u16 *) buf;
323 bool status;
324
325 len >>= 1;
326
327 while (len--) {
328 iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
329
330 do {
331 status = info->ops->nand_writebuffer_empty();
332 } while (!status);
333 }
334}
335
336
337
338
339
340
341
342static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
343{
344 struct mtd_info *mtd = nand_to_mtd(chip);
345 struct omap_nand_info *info = mtd_to_omap(mtd);
346 uint32_t r_count = 0;
347 int ret = 0;
348 u32 *p = (u32 *)buf;
349
350
351 if (len % 4) {
352 if (info->nand.options & NAND_BUSWIDTH_16)
353 omap_read_buf16(mtd, buf, len % 4);
354 else
355 omap_read_buf8(mtd, buf, len % 4);
356 p = (u32 *) (buf + len % 4);
357 len -= len % 4;
358 }
359
360
361 ret = omap_prefetch_enable(info->gpmc_cs,
362 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
363 if (ret) {
364
365 if (info->nand.options & NAND_BUSWIDTH_16)
366 omap_read_buf16(mtd, (u_char *)p, len);
367 else
368 omap_read_buf8(mtd, (u_char *)p, len);
369 } else {
370 do {
371 r_count = readl(info->reg.gpmc_prefetch_status);
372 r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
373 r_count = r_count >> 2;
374 ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
375 p += r_count;
376 len -= r_count << 2;
377 } while (len);
378
379 omap_prefetch_reset(info->gpmc_cs, info);
380 }
381}
382
383
384
385
386
387
388
389static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
390 int len)
391{
392 struct mtd_info *mtd = nand_to_mtd(chip);
393 struct omap_nand_info *info = mtd_to_omap(mtd);
394 uint32_t w_count = 0;
395 int i = 0, ret = 0;
396 u16 *p = (u16 *)buf;
397 unsigned long tim, limit;
398 u32 val;
399
400
401 if (len % 2 != 0) {
402 writeb(*buf, info->nand.legacy.IO_ADDR_W);
403 p = (u16 *)(buf + 1);
404 len--;
405 }
406
407
408 ret = omap_prefetch_enable(info->gpmc_cs,
409 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
410 if (ret) {
411
412 if (info->nand.options & NAND_BUSWIDTH_16)
413 omap_write_buf16(mtd, (u_char *)p, len);
414 else
415 omap_write_buf8(mtd, (u_char *)p, len);
416 } else {
417 while (len) {
418 w_count = readl(info->reg.gpmc_prefetch_status);
419 w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
420 w_count = w_count >> 1;
421 for (i = 0; (i < w_count) && len; i++, len -= 2)
422 iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
423 }
424
425 tim = 0;
426 limit = (loops_per_jiffy *
427 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
428 do {
429 cpu_relax();
430 val = readl(info->reg.gpmc_prefetch_status);
431 val = PREFETCH_STATUS_COUNT(val);
432 } while (val && (tim++ < limit));
433
434
435 omap_prefetch_reset(info->gpmc_cs, info);
436 }
437}
438
439
440
441
442
443static void omap_nand_dma_callback(void *data)
444{
445 complete((struct completion *) data);
446}
447
448
449
450
451
452
453
454
455static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
456 unsigned int len, int is_write)
457{
458 struct omap_nand_info *info = mtd_to_omap(mtd);
459 struct dma_async_tx_descriptor *tx;
460 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
461 DMA_FROM_DEVICE;
462 struct scatterlist sg;
463 unsigned long tim, limit;
464 unsigned n;
465 int ret;
466 u32 val;
467
468 if (!virt_addr_valid(addr))
469 goto out_copy;
470
471 sg_init_one(&sg, addr, len);
472 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
473 if (n == 0) {
474 dev_err(&info->pdev->dev,
475 "Couldn't DMA map a %d byte buffer\n", len);
476 goto out_copy;
477 }
478
479 tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
480 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
481 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
482 if (!tx)
483 goto out_copy_unmap;
484
485 tx->callback = omap_nand_dma_callback;
486 tx->callback_param = &info->comp;
487 dmaengine_submit(tx);
488
489 init_completion(&info->comp);
490
491
492 dma_async_issue_pending(info->dma);
493
494
495 ret = omap_prefetch_enable(info->gpmc_cs,
496 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
497 if (ret)
498
499 goto out_copy_unmap;
500
501 wait_for_completion(&info->comp);
502 tim = 0;
503 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
504
505 do {
506 cpu_relax();
507 val = readl(info->reg.gpmc_prefetch_status);
508 val = PREFETCH_STATUS_COUNT(val);
509 } while (val && (tim++ < limit));
510
511
512 omap_prefetch_reset(info->gpmc_cs, info);
513
514 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
515 return 0;
516
517out_copy_unmap:
518 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
519out_copy:
520 if (info->nand.options & NAND_BUSWIDTH_16)
521 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
522 : omap_write_buf16(mtd, (u_char *) addr, len);
523 else
524 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
525 : omap_write_buf8(mtd, (u_char *) addr, len);
526 return 0;
527}
528
529
530
531
532
533
534
535static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
536 int len)
537{
538 struct mtd_info *mtd = nand_to_mtd(chip);
539
540 if (len <= mtd->oobsize)
541 omap_read_buf_pref(chip, buf, len);
542 else
543
544 omap_nand_dma_transfer(mtd, buf, len, 0x0);
545}
546
547
548
549
550
551
552
553static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
554 int len)
555{
556 struct mtd_info *mtd = nand_to_mtd(chip);
557
558 if (len <= mtd->oobsize)
559 omap_write_buf_pref(chip, buf, len);
560 else
561
562 omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
563}
564
565
566
567
568
569
570static irqreturn_t omap_nand_irq(int this_irq, void *dev)
571{
572 struct omap_nand_info *info = (struct omap_nand_info *) dev;
573 u32 bytes;
574
575 bytes = readl(info->reg.gpmc_prefetch_status);
576 bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
577 bytes = bytes & 0xFFFC;
578 if (info->iomode == OMAP_NAND_IO_WRITE) {
579 if (this_irq == info->gpmc_irq_count)
580 goto done;
581
582 if (info->buf_len && (info->buf_len < bytes))
583 bytes = info->buf_len;
584 else if (!info->buf_len)
585 bytes = 0;
586 iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
587 bytes >> 2);
588 info->buf = info->buf + bytes;
589 info->buf_len -= bytes;
590
591 } else {
592 ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
593 bytes >> 2);
594 info->buf = info->buf + bytes;
595
596 if (this_irq == info->gpmc_irq_count)
597 goto done;
598 }
599
600 return IRQ_HANDLED;
601
602done:
603 complete(&info->comp);
604
605 disable_irq_nosync(info->gpmc_irq_fifo);
606 disable_irq_nosync(info->gpmc_irq_count);
607
608 return IRQ_HANDLED;
609}
610
611
612
613
614
615
616
617static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
618 int len)
619{
620 struct mtd_info *mtd = nand_to_mtd(chip);
621 struct omap_nand_info *info = mtd_to_omap(mtd);
622 int ret = 0;
623
624 if (len <= mtd->oobsize) {
625 omap_read_buf_pref(chip, buf, len);
626 return;
627 }
628
629 info->iomode = OMAP_NAND_IO_READ;
630 info->buf = buf;
631 init_completion(&info->comp);
632
633
634 ret = omap_prefetch_enable(info->gpmc_cs,
635 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
636 if (ret)
637
638 goto out_copy;
639
640 info->buf_len = len;
641
642 enable_irq(info->gpmc_irq_count);
643 enable_irq(info->gpmc_irq_fifo);
644
645
646 wait_for_completion(&info->comp);
647
648
649 omap_prefetch_reset(info->gpmc_cs, info);
650 return;
651
652out_copy:
653 if (info->nand.options & NAND_BUSWIDTH_16)
654 omap_read_buf16(mtd, buf, len);
655 else
656 omap_read_buf8(mtd, buf, len);
657}
658
659
660
661
662
663
664
665static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
666 int len)
667{
668 struct mtd_info *mtd = nand_to_mtd(chip);
669 struct omap_nand_info *info = mtd_to_omap(mtd);
670 int ret = 0;
671 unsigned long tim, limit;
672 u32 val;
673
674 if (len <= mtd->oobsize) {
675 omap_write_buf_pref(chip, buf, len);
676 return;
677 }
678
679 info->iomode = OMAP_NAND_IO_WRITE;
680 info->buf = (u_char *) buf;
681 init_completion(&info->comp);
682
683
684 ret = omap_prefetch_enable(info->gpmc_cs,
685 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
686 if (ret)
687
688 goto out_copy;
689
690 info->buf_len = len;
691
692 enable_irq(info->gpmc_irq_count);
693 enable_irq(info->gpmc_irq_fifo);
694
695
696 wait_for_completion(&info->comp);
697
698
699 tim = 0;
700 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
701 do {
702 val = readl(info->reg.gpmc_prefetch_status);
703 val = PREFETCH_STATUS_COUNT(val);
704 cpu_relax();
705 } while (val && (tim++ < limit));
706
707
708 omap_prefetch_reset(info->gpmc_cs, info);
709 return;
710
711out_copy:
712 if (info->nand.options & NAND_BUSWIDTH_16)
713 omap_write_buf16(mtd, buf, len);
714 else
715 omap_write_buf8(mtd, buf, len);
716}
717
718
719
720
721
722
723
724
725static void gen_true_ecc(u8 *ecc_buf)
726{
727 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
728 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
729
730 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
731 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
732 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
733 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
734 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
735 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
736}
737
738
739
740
741
742
743
744
745
746
747
748
749static int omap_compare_ecc(u8 *ecc_data1,
750 u8 *ecc_data2,
751 u8 *page_data)
752{
753 uint i;
754 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
755 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
756 u8 ecc_bit[24];
757 u8 ecc_sum = 0;
758 u8 find_bit = 0;
759 uint find_byte = 0;
760 int isEccFF;
761
762 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
763
764 gen_true_ecc(ecc_data1);
765 gen_true_ecc(ecc_data2);
766
767 for (i = 0; i <= 2; i++) {
768 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
769 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
770 }
771
772 for (i = 0; i < 8; i++) {
773 tmp0_bit[i] = *ecc_data1 % 2;
774 *ecc_data1 = *ecc_data1 / 2;
775 }
776
777 for (i = 0; i < 8; i++) {
778 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
779 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
780 }
781
782 for (i = 0; i < 8; i++) {
783 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
784 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
785 }
786
787 for (i = 0; i < 8; i++) {
788 comp0_bit[i] = *ecc_data2 % 2;
789 *ecc_data2 = *ecc_data2 / 2;
790 }
791
792 for (i = 0; i < 8; i++) {
793 comp1_bit[i] = *(ecc_data2 + 1) % 2;
794 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
795 }
796
797 for (i = 0; i < 8; i++) {
798 comp2_bit[i] = *(ecc_data2 + 2) % 2;
799 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
800 }
801
802 for (i = 0; i < 6; i++)
803 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
804
805 for (i = 0; i < 8; i++)
806 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
807
808 for (i = 0; i < 8; i++)
809 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
810
811 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
812 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
813
814 for (i = 0; i < 24; i++)
815 ecc_sum += ecc_bit[i];
816
817 switch (ecc_sum) {
818 case 0:
819
820
821
822 return 0;
823
824 case 1:
825
826 pr_debug("ECC UNCORRECTED_ERROR 1\n");
827 return -EBADMSG;
828
829 case 11:
830
831 pr_debug("ECC UNCORRECTED_ERROR B\n");
832 return -EBADMSG;
833
834 case 12:
835
836 find_byte = (ecc_bit[23] << 8) +
837 (ecc_bit[21] << 7) +
838 (ecc_bit[19] << 6) +
839 (ecc_bit[17] << 5) +
840 (ecc_bit[15] << 4) +
841 (ecc_bit[13] << 3) +
842 (ecc_bit[11] << 2) +
843 (ecc_bit[9] << 1) +
844 ecc_bit[7];
845
846 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
847
848 pr_debug("Correcting single bit ECC error at offset: "
849 "%d, bit: %d\n", find_byte, find_bit);
850
851 page_data[find_byte] ^= (1 << find_bit);
852
853 return 1;
854 default:
855 if (isEccFF) {
856 if (ecc_data2[0] == 0 &&
857 ecc_data2[1] == 0 &&
858 ecc_data2[2] == 0)
859 return 0;
860 }
861 pr_debug("UNCORRECTED_ERROR default\n");
862 return -EBADMSG;
863 }
864}
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880static int omap_correct_data(struct nand_chip *chip, u_char *dat,
881 u_char *read_ecc, u_char *calc_ecc)
882{
883 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
884 int blockCnt = 0, i = 0, ret = 0;
885 int stat = 0;
886
887
888 if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
889 info->nand.ecc.size == 2048)
890 blockCnt = 4;
891 else
892 blockCnt = 1;
893
894 for (i = 0; i < blockCnt; i++) {
895 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
896 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
897 if (ret < 0)
898 return ret;
899
900 stat += ret;
901 }
902 read_ecc += 3;
903 calc_ecc += 3;
904 dat += 512;
905 }
906 return stat;
907}
908
909
910
911
912
913
914
915
916
917
918
919
920
921static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
922 u_char *ecc_code)
923{
924 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
925 u32 val;
926
927 val = readl(info->reg.gpmc_ecc_config);
928 if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
929 return -EINVAL;
930
931
932 val = readl(info->reg.gpmc_ecc1_result);
933 *ecc_code++ = val;
934 *ecc_code++ = val >> 16;
935
936 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
937
938 return 0;
939}
940
941
942
943
944
945
946static void omap_enable_hwecc(struct nand_chip *chip, int mode)
947{
948 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
949 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
950 u32 val;
951
952
953 val = ECCCLEAR | ECC1;
954 writel(val, info->reg.gpmc_ecc_control);
955
956
957 val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
958 ECC1RESULTSIZE);
959 writel(val, info->reg.gpmc_ecc_size_config);
960
961 switch (mode) {
962 case NAND_ECC_READ:
963 case NAND_ECC_WRITE:
964 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
965 break;
966 case NAND_ECC_READSYN:
967 writel(ECCCLEAR, info->reg.gpmc_ecc_control);
968 break;
969 default:
970 dev_info(&info->pdev->dev,
971 "error: unrecognized Mode[%d]!\n", mode);
972 break;
973 }
974
975
976 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
977 writel(val, info->reg.gpmc_ecc_config);
978}
979
980
981
982
983
984
985
986
987
988
989
990
991static int omap_wait(struct nand_chip *this)
992{
993 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
994 unsigned long timeo = jiffies;
995 int status;
996
997 timeo += msecs_to_jiffies(400);
998
999 writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
1000 while (time_before(jiffies, timeo)) {
1001 status = readb(info->reg.gpmc_nand_data);
1002 if (status & NAND_STATUS_READY)
1003 break;
1004 cond_resched();
1005 }
1006
1007 status = readb(info->reg.gpmc_nand_data);
1008 return status;
1009}
1010
1011
1012
1013
1014
1015
1016
1017static int omap_dev_ready(struct nand_chip *chip)
1018{
1019 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1020
1021 return gpiod_get_value(info->ready_gpiod);
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
1036 int mode)
1037{
1038 unsigned int bch_type;
1039 unsigned int dev_width, nsectors;
1040 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1041 enum omap_ecc ecc_opt = info->ecc_opt;
1042 u32 val, wr_mode;
1043 unsigned int ecc_size1, ecc_size0;
1044
1045
1046 switch (ecc_opt) {
1047 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1048 bch_type = 0;
1049 nsectors = 1;
1050 wr_mode = BCH_WRAPMODE_6;
1051 ecc_size0 = BCH_ECC_SIZE0;
1052 ecc_size1 = BCH_ECC_SIZE1;
1053 break;
1054 case OMAP_ECC_BCH4_CODE_HW:
1055 bch_type = 0;
1056 nsectors = chip->ecc.steps;
1057 if (mode == NAND_ECC_READ) {
1058 wr_mode = BCH_WRAPMODE_1;
1059 ecc_size0 = BCH4R_ECC_SIZE0;
1060 ecc_size1 = BCH4R_ECC_SIZE1;
1061 } else {
1062 wr_mode = BCH_WRAPMODE_6;
1063 ecc_size0 = BCH_ECC_SIZE0;
1064 ecc_size1 = BCH_ECC_SIZE1;
1065 }
1066 break;
1067 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1068 bch_type = 1;
1069 nsectors = 1;
1070 wr_mode = BCH_WRAPMODE_6;
1071 ecc_size0 = BCH_ECC_SIZE0;
1072 ecc_size1 = BCH_ECC_SIZE1;
1073 break;
1074 case OMAP_ECC_BCH8_CODE_HW:
1075 bch_type = 1;
1076 nsectors = chip->ecc.steps;
1077 if (mode == NAND_ECC_READ) {
1078 wr_mode = BCH_WRAPMODE_1;
1079 ecc_size0 = BCH8R_ECC_SIZE0;
1080 ecc_size1 = BCH8R_ECC_SIZE1;
1081 } else {
1082 wr_mode = BCH_WRAPMODE_6;
1083 ecc_size0 = BCH_ECC_SIZE0;
1084 ecc_size1 = BCH_ECC_SIZE1;
1085 }
1086 break;
1087 case OMAP_ECC_BCH16_CODE_HW:
1088 bch_type = 0x2;
1089 nsectors = chip->ecc.steps;
1090 if (mode == NAND_ECC_READ) {
1091 wr_mode = 0x01;
1092 ecc_size0 = 52;
1093 ecc_size1 = 0;
1094 } else {
1095 wr_mode = 0x01;
1096 ecc_size0 = 0;
1097 ecc_size1 = 52;
1098 }
1099 break;
1100 default:
1101 return;
1102 }
1103
1104 writel(ECC1, info->reg.gpmc_ecc_control);
1105
1106
1107 val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
1108 writel(val, info->reg.gpmc_ecc_size_config);
1109
1110 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1111
1112
1113 val = ((1 << 16) |
1114 (bch_type << 12) |
1115 (wr_mode << 8) |
1116 (dev_width << 7) |
1117 (((nsectors-1) & 0x7) << 4) |
1118 (info->gpmc_cs << 1) |
1119 (0x1));
1120
1121 writel(val, info->reg.gpmc_ecc_config);
1122
1123
1124 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
1125}
1126
1127static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
1128static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
1129 0x97, 0x79, 0xe5, 0x24, 0xb5};
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
1142 const u_char *dat, u_char *ecc_calc, int i)
1143{
1144 struct omap_nand_info *info = mtd_to_omap(mtd);
1145 int eccbytes = info->nand.ecc.bytes;
1146 struct gpmc_nand_regs *gpmc_regs = &info->reg;
1147 u8 *ecc_code;
1148 unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
1149 u32 val;
1150 int j;
1151
1152 ecc_code = ecc_calc;
1153 switch (info->ecc_opt) {
1154 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1155 case OMAP_ECC_BCH8_CODE_HW:
1156 bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1157 bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1158 bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1159 bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1160 *ecc_code++ = (bch_val4 & 0xFF);
1161 *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1162 *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1163 *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1164 *ecc_code++ = (bch_val3 & 0xFF);
1165 *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1166 *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1167 *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1168 *ecc_code++ = (bch_val2 & 0xFF);
1169 *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1170 *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1171 *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1172 *ecc_code++ = (bch_val1 & 0xFF);
1173 break;
1174 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1175 case OMAP_ECC_BCH4_CODE_HW:
1176 bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1177 bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1178 *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1179 *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1180 *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1181 ((bch_val1 >> 28) & 0xF);
1182 *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1183 *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1184 *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1185 *ecc_code++ = ((bch_val1 & 0xF) << 4);
1186 break;
1187 case OMAP_ECC_BCH16_CODE_HW:
1188 val = readl(gpmc_regs->gpmc_bch_result6[i]);
1189 ecc_code[0] = ((val >> 8) & 0xFF);
1190 ecc_code[1] = ((val >> 0) & 0xFF);
1191 val = readl(gpmc_regs->gpmc_bch_result5[i]);
1192 ecc_code[2] = ((val >> 24) & 0xFF);
1193 ecc_code[3] = ((val >> 16) & 0xFF);
1194 ecc_code[4] = ((val >> 8) & 0xFF);
1195 ecc_code[5] = ((val >> 0) & 0xFF);
1196 val = readl(gpmc_regs->gpmc_bch_result4[i]);
1197 ecc_code[6] = ((val >> 24) & 0xFF);
1198 ecc_code[7] = ((val >> 16) & 0xFF);
1199 ecc_code[8] = ((val >> 8) & 0xFF);
1200 ecc_code[9] = ((val >> 0) & 0xFF);
1201 val = readl(gpmc_regs->gpmc_bch_result3[i]);
1202 ecc_code[10] = ((val >> 24) & 0xFF);
1203 ecc_code[11] = ((val >> 16) & 0xFF);
1204 ecc_code[12] = ((val >> 8) & 0xFF);
1205 ecc_code[13] = ((val >> 0) & 0xFF);
1206 val = readl(gpmc_regs->gpmc_bch_result2[i]);
1207 ecc_code[14] = ((val >> 24) & 0xFF);
1208 ecc_code[15] = ((val >> 16) & 0xFF);
1209 ecc_code[16] = ((val >> 8) & 0xFF);
1210 ecc_code[17] = ((val >> 0) & 0xFF);
1211 val = readl(gpmc_regs->gpmc_bch_result1[i]);
1212 ecc_code[18] = ((val >> 24) & 0xFF);
1213 ecc_code[19] = ((val >> 16) & 0xFF);
1214 ecc_code[20] = ((val >> 8) & 0xFF);
1215 ecc_code[21] = ((val >> 0) & 0xFF);
1216 val = readl(gpmc_regs->gpmc_bch_result0[i]);
1217 ecc_code[22] = ((val >> 24) & 0xFF);
1218 ecc_code[23] = ((val >> 16) & 0xFF);
1219 ecc_code[24] = ((val >> 8) & 0xFF);
1220 ecc_code[25] = ((val >> 0) & 0xFF);
1221 break;
1222 default:
1223 return -EINVAL;
1224 }
1225
1226
1227 switch (info->ecc_opt) {
1228 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1229
1230
1231
1232 for (j = 0; j < eccbytes; j++)
1233 ecc_calc[j] ^= bch4_polynomial[j];
1234 break;
1235 case OMAP_ECC_BCH4_CODE_HW:
1236
1237 ecc_calc[eccbytes - 1] = 0x0;
1238 break;
1239 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1240
1241
1242
1243 for (j = 0; j < eccbytes; j++)
1244 ecc_calc[j] ^= bch8_polynomial[j];
1245 break;
1246 case OMAP_ECC_BCH8_CODE_HW:
1247
1248 ecc_calc[eccbytes - 1] = 0x0;
1249 break;
1250 case OMAP_ECC_BCH16_CODE_HW:
1251 break;
1252 default:
1253 return -EINVAL;
1254 }
1255
1256 return 0;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
1270 const u_char *dat, u_char *ecc_calc)
1271{
1272 return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
1284 const u_char *dat, u_char *ecc_calc)
1285{
1286 struct omap_nand_info *info = mtd_to_omap(mtd);
1287 int eccbytes = info->nand.ecc.bytes;
1288 unsigned long nsectors;
1289 int i, ret;
1290
1291 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1292 for (i = 0; i < nsectors; i++) {
1293 ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
1294 if (ret)
1295 return ret;
1296
1297 ecc_calc += eccbytes;
1298 }
1299
1300 return 0;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static int erased_sector_bitflips(u_char *data, u_char *oob,
1314 struct omap_nand_info *info)
1315{
1316 int flip_bits = 0, i;
1317
1318 for (i = 0; i < info->nand.ecc.size; i++) {
1319 flip_bits += hweight8(~data[i]);
1320 if (flip_bits > info->nand.ecc.strength)
1321 return 0;
1322 }
1323
1324 for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
1325 flip_bits += hweight8(~oob[i]);
1326 if (flip_bits > info->nand.ecc.strength)
1327 return 0;
1328 }
1329
1330
1331
1332
1333
1334 if (flip_bits) {
1335 memset(data, 0xFF, info->nand.ecc.size);
1336 memset(oob, 0xFF, info->nand.ecc.bytes);
1337 }
1338
1339 return flip_bits;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
1354 u_char *read_ecc, u_char *calc_ecc)
1355{
1356 struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1357 struct nand_ecc_ctrl *ecc = &info->nand.ecc;
1358 int eccsteps = info->nand.ecc.steps;
1359 int i , j, stat = 0;
1360 int eccflag, actual_eccbytes;
1361 struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
1362 u_char *ecc_vec = calc_ecc;
1363 u_char *spare_ecc = read_ecc;
1364 u_char *erased_ecc_vec;
1365 u_char *buf;
1366 int bitflip_count;
1367 bool is_error_reported = false;
1368 u32 bit_pos, byte_pos, error_max, pos;
1369 int err;
1370
1371 switch (info->ecc_opt) {
1372 case OMAP_ECC_BCH4_CODE_HW:
1373
1374 actual_eccbytes = ecc->bytes - 1;
1375 erased_ecc_vec = bch4_vector;
1376 break;
1377 case OMAP_ECC_BCH8_CODE_HW:
1378
1379 actual_eccbytes = ecc->bytes - 1;
1380 erased_ecc_vec = bch8_vector;
1381 break;
1382 case OMAP_ECC_BCH16_CODE_HW:
1383 actual_eccbytes = ecc->bytes;
1384 erased_ecc_vec = bch16_vector;
1385 break;
1386 default:
1387 dev_err(&info->pdev->dev, "invalid driver configuration\n");
1388 return -EINVAL;
1389 }
1390
1391
1392 memset(err_vec, 0, sizeof(err_vec));
1393
1394 for (i = 0; i < eccsteps ; i++) {
1395 eccflag = 0;
1396
1397
1398
1399
1400
1401 for (j = 0; j < actual_eccbytes; j++) {
1402 if (calc_ecc[j] != 0) {
1403 eccflag = 1;
1404 break;
1405 }
1406 }
1407
1408 if (eccflag == 1) {
1409 if (memcmp(calc_ecc, erased_ecc_vec,
1410 actual_eccbytes) == 0) {
1411
1412
1413
1414
1415 } else {
1416 buf = &data[info->nand.ecc.size * i];
1417
1418
1419
1420
1421
1422 bitflip_count = erased_sector_bitflips(
1423 buf, read_ecc, info);
1424 if (bitflip_count) {
1425
1426
1427
1428
1429 stat += bitflip_count;
1430 } else {
1431
1432
1433
1434
1435
1436
1437 err_vec[i].error_reported = true;
1438 is_error_reported = true;
1439 }
1440 }
1441 }
1442
1443
1444 calc_ecc += ecc->bytes;
1445 read_ecc += ecc->bytes;
1446 }
1447
1448
1449 if (!is_error_reported)
1450 return stat;
1451
1452
1453 elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
1454
1455 err = 0;
1456 for (i = 0; i < eccsteps; i++) {
1457 if (err_vec[i].error_uncorrectable) {
1458 dev_err(&info->pdev->dev,
1459 "uncorrectable bit-flips found\n");
1460 err = -EBADMSG;
1461 } else if (err_vec[i].error_reported) {
1462 for (j = 0; j < err_vec[i].error_count; j++) {
1463 switch (info->ecc_opt) {
1464 case OMAP_ECC_BCH4_CODE_HW:
1465
1466 pos = err_vec[i].error_loc[j] +
1467 BCH4_BIT_PAD;
1468 break;
1469 case OMAP_ECC_BCH8_CODE_HW:
1470 case OMAP_ECC_BCH16_CODE_HW:
1471 pos = err_vec[i].error_loc[j];
1472 break;
1473 default:
1474 return -EINVAL;
1475 }
1476 error_max = (ecc->size + actual_eccbytes) * 8;
1477
1478 bit_pos = pos % 8;
1479
1480
1481 byte_pos = (error_max - pos - 1) / 8;
1482
1483 if (pos < error_max) {
1484 if (byte_pos < 512) {
1485 pr_debug("bitflip@dat[%d]=%x\n",
1486 byte_pos, data[byte_pos]);
1487 data[byte_pos] ^= 1 << bit_pos;
1488 } else {
1489 pr_debug("bitflip@oob[%d]=%x\n",
1490 (byte_pos - 512),
1491 spare_ecc[byte_pos - 512]);
1492 spare_ecc[byte_pos - 512] ^=
1493 1 << bit_pos;
1494 }
1495 } else {
1496 dev_err(&info->pdev->dev,
1497 "invalid bit-flip @ %d:%d\n",
1498 byte_pos, bit_pos);
1499 err = -EBADMSG;
1500 }
1501 }
1502 }
1503
1504
1505 stat = max_t(unsigned int, stat, err_vec[i].error_count);
1506
1507
1508 data += ecc->size;
1509 spare_ecc += ecc->bytes;
1510 }
1511
1512 return (err) ? err : stat;
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
1525 int oob_required, int page)
1526{
1527 struct mtd_info *mtd = nand_to_mtd(chip);
1528 int ret;
1529 uint8_t *ecc_calc = chip->ecc.calc_buf;
1530
1531 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1532
1533
1534 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1535
1536
1537 chip->legacy.write_buf(chip, buf, mtd->writesize);
1538
1539
1540 omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
1541
1542 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1543 chip->ecc.total);
1544 if (ret)
1545 return ret;
1546
1547
1548 chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
1549
1550 return nand_prog_page_end_op(chip);
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
1565 u32 data_len, const u8 *buf,
1566 int oob_required, int page)
1567{
1568 struct mtd_info *mtd = nand_to_mtd(chip);
1569 u8 *ecc_calc = chip->ecc.calc_buf;
1570 int ecc_size = chip->ecc.size;
1571 int ecc_bytes = chip->ecc.bytes;
1572 int ecc_steps = chip->ecc.steps;
1573 u32 start_step = offset / ecc_size;
1574 u32 end_step = (offset + data_len - 1) / ecc_size;
1575 int step, ret = 0;
1576
1577
1578
1579
1580
1581
1582
1583 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1584
1585
1586 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1587
1588
1589 chip->legacy.write_buf(chip, buf, mtd->writesize);
1590
1591 for (step = 0; step < ecc_steps; step++) {
1592
1593 if (step < start_step || step > end_step)
1594 memset(ecc_calc, 0xff, ecc_bytes);
1595 else
1596 ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
1597
1598 if (ret)
1599 return ret;
1600
1601 buf += ecc_size;
1602 ecc_calc += ecc_bytes;
1603 }
1604
1605
1606
1607 ecc_calc = chip->ecc.calc_buf;
1608 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1609 chip->ecc.total);
1610 if (ret)
1611 return ret;
1612
1613
1614 chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
1615
1616 return nand_prog_page_end_op(chip);
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
1634 int oob_required, int page)
1635{
1636 struct mtd_info *mtd = nand_to_mtd(chip);
1637 uint8_t *ecc_calc = chip->ecc.calc_buf;
1638 uint8_t *ecc_code = chip->ecc.code_buf;
1639 int stat, ret;
1640 unsigned int max_bitflips = 0;
1641
1642 nand_read_page_op(chip, page, 0, NULL, 0);
1643
1644
1645 chip->ecc.hwctl(chip, NAND_ECC_READ);
1646
1647
1648 chip->legacy.read_buf(chip, buf, mtd->writesize);
1649
1650
1651 nand_change_read_column_op(chip,
1652 mtd->writesize + BADBLOCK_MARKER_LENGTH,
1653 chip->oob_poi + BADBLOCK_MARKER_LENGTH,
1654 chip->ecc.total, false);
1655
1656
1657 omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
1658
1659 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1660 chip->ecc.total);
1661 if (ret)
1662 return ret;
1663
1664 stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
1665
1666 if (stat < 0) {
1667 mtd->ecc_stats.failed++;
1668 } else {
1669 mtd->ecc_stats.corrected += stat;
1670 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1671 }
1672
1673 return max_bitflips;
1674}
1675
1676
1677
1678
1679
1680
1681static bool is_elm_present(struct omap_nand_info *info,
1682 struct device_node *elm_node)
1683{
1684 struct platform_device *pdev;
1685
1686
1687 if (!elm_node) {
1688 dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
1689 return false;
1690 }
1691 pdev = of_find_device_by_node(elm_node);
1692
1693 if (!pdev) {
1694 dev_err(&info->pdev->dev, "ELM device not found\n");
1695 return false;
1696 }
1697
1698 info->elm_dev = &pdev->dev;
1699 return true;
1700}
1701
1702static bool omap2_nand_ecc_check(struct omap_nand_info *info)
1703{
1704 bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
1705
1706 switch (info->ecc_opt) {
1707 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1708 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1709 ecc_needs_omap_bch = false;
1710 ecc_needs_bch = true;
1711 ecc_needs_elm = false;
1712 break;
1713 case OMAP_ECC_BCH4_CODE_HW:
1714 case OMAP_ECC_BCH8_CODE_HW:
1715 case OMAP_ECC_BCH16_CODE_HW:
1716 ecc_needs_omap_bch = true;
1717 ecc_needs_bch = false;
1718 ecc_needs_elm = true;
1719 break;
1720 default:
1721 ecc_needs_omap_bch = false;
1722 ecc_needs_bch = false;
1723 ecc_needs_elm = false;
1724 break;
1725 }
1726
1727 if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
1728 dev_err(&info->pdev->dev,
1729 "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
1730 return false;
1731 }
1732 if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
1733 dev_err(&info->pdev->dev,
1734 "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
1735 return false;
1736 }
1737 if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
1738 dev_err(&info->pdev->dev, "ELM not available\n");
1739 return false;
1740 }
1741
1742 return true;
1743}
1744
1745static const char * const nand_xfer_types[] = {
1746 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1747 [NAND_OMAP_POLLED] = "polled",
1748 [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1749 [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1750};
1751
1752static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
1753{
1754 struct device_node *child = dev->of_node;
1755 int i;
1756 const char *s;
1757 u32 cs;
1758
1759 if (of_property_read_u32(child, "reg", &cs) < 0) {
1760 dev_err(dev, "reg not found in DT\n");
1761 return -EINVAL;
1762 }
1763
1764 info->gpmc_cs = cs;
1765
1766
1767 info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1768 if (!info->elm_of_node) {
1769 info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
1770 if (!info->elm_of_node)
1771 dev_dbg(dev, "ti,elm-id not in DT\n");
1772 }
1773
1774
1775 if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1776 dev_err(dev, "ti,nand-ecc-opt not found\n");
1777 return -EINVAL;
1778 }
1779
1780 if (!strcmp(s, "sw")) {
1781 info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1782 } else if (!strcmp(s, "ham1") ||
1783 !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
1784 info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
1785 } else if (!strcmp(s, "bch4")) {
1786 if (info->elm_of_node)
1787 info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
1788 else
1789 info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1790 } else if (!strcmp(s, "bch8")) {
1791 if (info->elm_of_node)
1792 info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
1793 else
1794 info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1795 } else if (!strcmp(s, "bch16")) {
1796 info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
1797 } else {
1798 dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
1799 return -EINVAL;
1800 }
1801
1802
1803 if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
1804 for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
1805 if (!strcasecmp(s, nand_xfer_types[i])) {
1806 info->xfer_type = i;
1807 return 0;
1808 }
1809 }
1810
1811 dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
1812 return -EINVAL;
1813 }
1814
1815 return 0;
1816}
1817
1818static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
1819 struct mtd_oob_region *oobregion)
1820{
1821 struct omap_nand_info *info = mtd_to_omap(mtd);
1822 struct nand_chip *chip = &info->nand;
1823 int off = BADBLOCK_MARKER_LENGTH;
1824
1825 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1826 !(chip->options & NAND_BUSWIDTH_16))
1827 off = 1;
1828
1829 if (section)
1830 return -ERANGE;
1831
1832 oobregion->offset = off;
1833 oobregion->length = chip->ecc.total;
1834
1835 return 0;
1836}
1837
1838static int omap_ooblayout_free(struct mtd_info *mtd, int section,
1839 struct mtd_oob_region *oobregion)
1840{
1841 struct omap_nand_info *info = mtd_to_omap(mtd);
1842 struct nand_chip *chip = &info->nand;
1843 int off = BADBLOCK_MARKER_LENGTH;
1844
1845 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1846 !(chip->options & NAND_BUSWIDTH_16))
1847 off = 1;
1848
1849 if (section)
1850 return -ERANGE;
1851
1852 off += chip->ecc.total;
1853 if (off >= mtd->oobsize)
1854 return -ERANGE;
1855
1856 oobregion->offset = off;
1857 oobregion->length = mtd->oobsize - off;
1858
1859 return 0;
1860}
1861
1862static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
1863 .ecc = omap_ooblayout_ecc,
1864 .free = omap_ooblayout_free,
1865};
1866
1867static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
1868 struct mtd_oob_region *oobregion)
1869{
1870 struct nand_device *nand = mtd_to_nanddev(mtd);
1871 const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
1872 int off = BADBLOCK_MARKER_LENGTH;
1873
1874 if (section >= engine_conf->nsteps)
1875 return -ERANGE;
1876
1877
1878
1879
1880
1881 oobregion->offset = off + (section * (engine_conf->code_size + 1));
1882 oobregion->length = engine_conf->code_size;
1883
1884 return 0;
1885}
1886
1887static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
1888 struct mtd_oob_region *oobregion)
1889{
1890 struct nand_device *nand = mtd_to_nanddev(mtd);
1891 const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
1892 int off = BADBLOCK_MARKER_LENGTH;
1893
1894 if (section)
1895 return -ERANGE;
1896
1897
1898
1899
1900
1901 off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
1902 if (off >= mtd->oobsize)
1903 return -ERANGE;
1904
1905 oobregion->offset = off;
1906 oobregion->length = mtd->oobsize - off;
1907
1908 return 0;
1909}
1910
1911static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
1912 .ecc = omap_sw_ooblayout_ecc,
1913 .free = omap_sw_ooblayout_free,
1914};
1915
1916static int omap_nand_attach_chip(struct nand_chip *chip)
1917{
1918 struct mtd_info *mtd = nand_to_mtd(chip);
1919 struct omap_nand_info *info = mtd_to_omap(mtd);
1920 struct device *dev = &info->pdev->dev;
1921 int min_oobbytes = BADBLOCK_MARKER_LENGTH;
1922 int oobbytes_per_step;
1923 dma_cap_mask_t mask;
1924 int err;
1925
1926 if (chip->bbt_options & NAND_BBT_USE_FLASH)
1927 chip->bbt_options |= NAND_BBT_NO_OOB;
1928 else
1929 chip->options |= NAND_SKIP_BBTSCAN;
1930
1931
1932 switch (info->xfer_type) {
1933 case NAND_OMAP_PREFETCH_POLLED:
1934 chip->legacy.read_buf = omap_read_buf_pref;
1935 chip->legacy.write_buf = omap_write_buf_pref;
1936 break;
1937
1938 case NAND_OMAP_POLLED:
1939
1940 break;
1941
1942 case NAND_OMAP_PREFETCH_DMA:
1943 dma_cap_zero(mask);
1944 dma_cap_set(DMA_SLAVE, mask);
1945 info->dma = dma_request_chan(dev->parent, "rxtx");
1946
1947 if (IS_ERR(info->dma)) {
1948 dev_err(dev, "DMA engine request failed\n");
1949 return PTR_ERR(info->dma);
1950 } else {
1951 struct dma_slave_config cfg;
1952
1953 memset(&cfg, 0, sizeof(cfg));
1954 cfg.src_addr = info->phys_base;
1955 cfg.dst_addr = info->phys_base;
1956 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1957 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1958 cfg.src_maxburst = 16;
1959 cfg.dst_maxburst = 16;
1960 err = dmaengine_slave_config(info->dma, &cfg);
1961 if (err) {
1962 dev_err(dev,
1963 "DMA engine slave config failed: %d\n",
1964 err);
1965 return err;
1966 }
1967 chip->legacy.read_buf = omap_read_buf_dma_pref;
1968 chip->legacy.write_buf = omap_write_buf_dma_pref;
1969 }
1970 break;
1971
1972 case NAND_OMAP_PREFETCH_IRQ:
1973 info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
1974 if (info->gpmc_irq_fifo <= 0)
1975 return -ENODEV;
1976 err = devm_request_irq(dev, info->gpmc_irq_fifo,
1977 omap_nand_irq, IRQF_SHARED,
1978 "gpmc-nand-fifo", info);
1979 if (err) {
1980 dev_err(dev, "Requesting IRQ %d, error %d\n",
1981 info->gpmc_irq_fifo, err);
1982 info->gpmc_irq_fifo = 0;
1983 return err;
1984 }
1985
1986 info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
1987 if (info->gpmc_irq_count <= 0)
1988 return -ENODEV;
1989 err = devm_request_irq(dev, info->gpmc_irq_count,
1990 omap_nand_irq, IRQF_SHARED,
1991 "gpmc-nand-count", info);
1992 if (err) {
1993 dev_err(dev, "Requesting IRQ %d, error %d\n",
1994 info->gpmc_irq_count, err);
1995 info->gpmc_irq_count = 0;
1996 return err;
1997 }
1998
1999 chip->legacy.read_buf = omap_read_buf_irq_pref;
2000 chip->legacy.write_buf = omap_write_buf_irq_pref;
2001
2002 break;
2003
2004 default:
2005 dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
2006 return -EINVAL;
2007 }
2008
2009 if (!omap2_nand_ecc_check(info))
2010 return -EINVAL;
2011
2012
2013
2014
2015
2016 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
2017 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
2018 chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
2019 return 0;
2020 }
2021
2022
2023 switch (info->ecc_opt) {
2024 case OMAP_ECC_HAM1_CODE_HW:
2025 dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
2026 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2027 chip->ecc.bytes = 3;
2028 chip->ecc.size = 512;
2029 chip->ecc.strength = 1;
2030 chip->ecc.calculate = omap_calculate_ecc;
2031 chip->ecc.hwctl = omap_enable_hwecc;
2032 chip->ecc.correct = omap_correct_data;
2033 mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2034 oobbytes_per_step = chip->ecc.bytes;
2035
2036 if (!(chip->options & NAND_BUSWIDTH_16))
2037 min_oobbytes = 1;
2038
2039 break;
2040
2041 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
2042 pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
2043 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2044 chip->ecc.size = 512;
2045 chip->ecc.bytes = 7;
2046 chip->ecc.strength = 4;
2047 chip->ecc.hwctl = omap_enable_hwecc_bch;
2048 chip->ecc.correct = rawnand_sw_bch_correct;
2049 chip->ecc.calculate = omap_calculate_ecc_bch_sw;
2050 mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
2051
2052 oobbytes_per_step = chip->ecc.bytes + 1;
2053
2054 err = rawnand_sw_bch_init(chip);
2055 if (err) {
2056 dev_err(dev, "Unable to use BCH library\n");
2057 return err;
2058 }
2059 break;
2060
2061 case OMAP_ECC_BCH4_CODE_HW:
2062 pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
2063 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2064 chip->ecc.size = 512;
2065
2066 chip->ecc.bytes = 7 + 1;
2067 chip->ecc.strength = 4;
2068 chip->ecc.hwctl = omap_enable_hwecc_bch;
2069 chip->ecc.correct = omap_elm_correct_data;
2070 chip->ecc.read_page = omap_read_page_bch;
2071 chip->ecc.write_page = omap_write_page_bch;
2072 chip->ecc.write_subpage = omap_write_subpage_bch;
2073 mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2074 oobbytes_per_step = chip->ecc.bytes;
2075
2076 err = elm_config(info->elm_dev, BCH4_ECC,
2077 mtd->writesize / chip->ecc.size,
2078 chip->ecc.size, chip->ecc.bytes);
2079 if (err < 0)
2080 return err;
2081 break;
2082
2083 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
2084 pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
2085 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2086 chip->ecc.size = 512;
2087 chip->ecc.bytes = 13;
2088 chip->ecc.strength = 8;
2089 chip->ecc.hwctl = omap_enable_hwecc_bch;
2090 chip->ecc.correct = rawnand_sw_bch_correct;
2091 chip->ecc.calculate = omap_calculate_ecc_bch_sw;
2092 mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
2093
2094 oobbytes_per_step = chip->ecc.bytes + 1;
2095
2096 err = rawnand_sw_bch_init(chip);
2097 if (err) {
2098 dev_err(dev, "unable to use BCH library\n");
2099 return err;
2100 }
2101 break;
2102
2103 case OMAP_ECC_BCH8_CODE_HW:
2104 pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
2105 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2106 chip->ecc.size = 512;
2107
2108 chip->ecc.bytes = 13 + 1;
2109 chip->ecc.strength = 8;
2110 chip->ecc.hwctl = omap_enable_hwecc_bch;
2111 chip->ecc.correct = omap_elm_correct_data;
2112 chip->ecc.read_page = omap_read_page_bch;
2113 chip->ecc.write_page = omap_write_page_bch;
2114 chip->ecc.write_subpage = omap_write_subpage_bch;
2115 mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2116 oobbytes_per_step = chip->ecc.bytes;
2117
2118 err = elm_config(info->elm_dev, BCH8_ECC,
2119 mtd->writesize / chip->ecc.size,
2120 chip->ecc.size, chip->ecc.bytes);
2121 if (err < 0)
2122 return err;
2123
2124 break;
2125
2126 case OMAP_ECC_BCH16_CODE_HW:
2127 pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
2128 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2129 chip->ecc.size = 512;
2130 chip->ecc.bytes = 26;
2131 chip->ecc.strength = 16;
2132 chip->ecc.hwctl = omap_enable_hwecc_bch;
2133 chip->ecc.correct = omap_elm_correct_data;
2134 chip->ecc.read_page = omap_read_page_bch;
2135 chip->ecc.write_page = omap_write_page_bch;
2136 chip->ecc.write_subpage = omap_write_subpage_bch;
2137 mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2138 oobbytes_per_step = chip->ecc.bytes;
2139
2140 err = elm_config(info->elm_dev, BCH16_ECC,
2141 mtd->writesize / chip->ecc.size,
2142 chip->ecc.size, chip->ecc.bytes);
2143 if (err < 0)
2144 return err;
2145
2146 break;
2147 default:
2148 dev_err(dev, "Invalid or unsupported ECC scheme\n");
2149 return -EINVAL;
2150 }
2151
2152
2153 min_oobbytes += (oobbytes_per_step *
2154 (mtd->writesize / chip->ecc.size));
2155 if (mtd->oobsize < min_oobbytes) {
2156 dev_err(dev,
2157 "Not enough OOB bytes: required = %d, available=%d\n",
2158 min_oobbytes, mtd->oobsize);
2159 return -EINVAL;
2160 }
2161
2162 return 0;
2163}
2164
2165static const struct nand_controller_ops omap_nand_controller_ops = {
2166 .attach_chip = omap_nand_attach_chip,
2167};
2168
2169
2170static struct nand_controller omap_gpmc_controller;
2171static bool omap_gpmc_controller_initialized;
2172
2173static int omap_nand_probe(struct platform_device *pdev)
2174{
2175 struct omap_nand_info *info;
2176 struct mtd_info *mtd;
2177 struct nand_chip *nand_chip;
2178 int err;
2179 struct resource *res;
2180 struct device *dev = &pdev->dev;
2181
2182 info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
2183 GFP_KERNEL);
2184 if (!info)
2185 return -ENOMEM;
2186
2187 info->pdev = pdev;
2188
2189 err = omap_get_dt_info(dev, info);
2190 if (err)
2191 return err;
2192
2193 info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
2194 if (!info->ops) {
2195 dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
2196 return -ENODEV;
2197 }
2198
2199 nand_chip = &info->nand;
2200 mtd = nand_to_mtd(nand_chip);
2201 mtd->dev.parent = &pdev->dev;
2202 nand_set_flash_node(nand_chip, dev->of_node);
2203
2204 if (!mtd->name) {
2205 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
2206 "omap2-nand.%d", info->gpmc_cs);
2207 if (!mtd->name) {
2208 dev_err(&pdev->dev, "Failed to set MTD name\n");
2209 return -ENOMEM;
2210 }
2211 }
2212
2213 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2214 nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
2215 if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
2216 return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
2217
2218 info->phys_base = res->start;
2219
2220 if (!omap_gpmc_controller_initialized) {
2221 omap_gpmc_controller.ops = &omap_nand_controller_ops;
2222 nand_controller_init(&omap_gpmc_controller);
2223 omap_gpmc_controller_initialized = true;
2224 }
2225
2226 nand_chip->controller = &omap_gpmc_controller;
2227
2228 nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
2229 nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
2230
2231 info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
2232 GPIOD_IN);
2233 if (IS_ERR(info->ready_gpiod)) {
2234 dev_err(dev, "failed to get ready gpio\n");
2235 return PTR_ERR(info->ready_gpiod);
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245 if (info->ready_gpiod) {
2246 nand_chip->legacy.dev_ready = omap_dev_ready;
2247 nand_chip->legacy.chip_delay = 0;
2248 } else {
2249 nand_chip->legacy.waitfunc = omap_wait;
2250 nand_chip->legacy.chip_delay = 50;
2251 }
2252
2253 if (info->flash_bbt)
2254 nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
2255
2256
2257 nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
2258
2259 err = nand_scan(nand_chip, 1);
2260 if (err)
2261 goto return_error;
2262
2263 err = mtd_device_register(mtd, NULL, 0);
2264 if (err)
2265 goto cleanup_nand;
2266
2267 platform_set_drvdata(pdev, mtd);
2268
2269 return 0;
2270
2271cleanup_nand:
2272 nand_cleanup(nand_chip);
2273
2274return_error:
2275 if (!IS_ERR_OR_NULL(info->dma))
2276 dma_release_channel(info->dma);
2277
2278 rawnand_sw_bch_cleanup(nand_chip);
2279
2280 return err;
2281}
2282
2283static int omap_nand_remove(struct platform_device *pdev)
2284{
2285 struct mtd_info *mtd = platform_get_drvdata(pdev);
2286 struct nand_chip *nand_chip = mtd_to_nand(mtd);
2287 struct omap_nand_info *info = mtd_to_omap(mtd);
2288 int ret;
2289
2290 rawnand_sw_bch_cleanup(nand_chip);
2291
2292 if (info->dma)
2293 dma_release_channel(info->dma);
2294 ret = mtd_device_unregister(mtd);
2295 WARN_ON(ret);
2296 nand_cleanup(nand_chip);
2297 return ret;
2298}
2299
2300static const struct of_device_id omap_nand_ids[] = {
2301 { .compatible = "ti,omap2-nand", },
2302 {},
2303};
2304MODULE_DEVICE_TABLE(of, omap_nand_ids);
2305
2306static struct platform_driver omap_nand_driver = {
2307 .probe = omap_nand_probe,
2308 .remove = omap_nand_remove,
2309 .driver = {
2310 .name = DRIVER_NAME,
2311 .of_match_table = of_match_ptr(omap_nand_ids),
2312 },
2313};
2314
2315module_platform_driver(omap_nand_driver);
2316
2317MODULE_ALIAS("platform:" DRIVER_NAME);
2318MODULE_LICENSE("GPL");
2319MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
2320