1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/module.h>
33#include <linux/delay.h>
34#include <linux/errno.h>
35#include <linux/err.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/mm.h>
39#include <linux/nmi.h>
40#include <linux/types.h>
41#include <linux/mtd/mtd.h>
42#include <linux/mtd/nand.h>
43#include <linux/mtd/nand_ecc.h>
44#include <linux/mtd/nand_bch.h>
45#include <linux/interrupt.h>
46#include <linux/bitops.h>
47#include <linux/io.h>
48#include <linux/mtd/partitions.h>
49#include <linux/of.h>
50
51static int nand_get_device(struct mtd_info *mtd, int new_state);
52
53static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
55
56
57static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59{
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
62
63 if (section > 1)
64 return -ERANGE;
65
66 if (!section) {
67 oobregion->offset = 0;
68 oobregion->length = 4;
69 } else {
70 oobregion->offset = 6;
71 oobregion->length = ecc->total - 4;
72 }
73
74 return 0;
75}
76
77static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
78 struct mtd_oob_region *oobregion)
79{
80 if (section > 1)
81 return -ERANGE;
82
83 if (mtd->oobsize == 16) {
84 if (section)
85 return -ERANGE;
86
87 oobregion->length = 8;
88 oobregion->offset = 8;
89 } else {
90 oobregion->length = 2;
91 if (!section)
92 oobregion->offset = 3;
93 else
94 oobregion->offset = 6;
95 }
96
97 return 0;
98}
99
100const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
101 .ecc = nand_ooblayout_ecc_sp,
102 .free = nand_ooblayout_free_sp,
103};
104EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
105
106static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
107 struct mtd_oob_region *oobregion)
108{
109 struct nand_chip *chip = mtd_to_nand(mtd);
110 struct nand_ecc_ctrl *ecc = &chip->ecc;
111
112 if (section)
113 return -ERANGE;
114
115 oobregion->length = ecc->total;
116 oobregion->offset = mtd->oobsize - oobregion->length;
117
118 return 0;
119}
120
121static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
122 struct mtd_oob_region *oobregion)
123{
124 struct nand_chip *chip = mtd_to_nand(mtd);
125 struct nand_ecc_ctrl *ecc = &chip->ecc;
126
127 if (section)
128 return -ERANGE;
129
130 oobregion->length = mtd->oobsize - ecc->total - 2;
131 oobregion->offset = 2;
132
133 return 0;
134}
135
136const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
137 .ecc = nand_ooblayout_ecc_lp,
138 .free = nand_ooblayout_free_lp,
139};
140EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
141
142
143
144
145
146static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
147 struct mtd_oob_region *oobregion)
148{
149 struct nand_chip *chip = mtd_to_nand(mtd);
150 struct nand_ecc_ctrl *ecc = &chip->ecc;
151
152 if (section)
153 return -ERANGE;
154
155 switch (mtd->oobsize) {
156 case 64:
157 oobregion->offset = 40;
158 break;
159 case 128:
160 oobregion->offset = 80;
161 break;
162 default:
163 return -EINVAL;
164 }
165
166 oobregion->length = ecc->total;
167 if (oobregion->offset + oobregion->length > mtd->oobsize)
168 return -ERANGE;
169
170 return 0;
171}
172
173static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
174 struct mtd_oob_region *oobregion)
175{
176 struct nand_chip *chip = mtd_to_nand(mtd);
177 struct nand_ecc_ctrl *ecc = &chip->ecc;
178 int ecc_offset = 0;
179
180 if (section < 0 || section > 1)
181 return -ERANGE;
182
183 switch (mtd->oobsize) {
184 case 64:
185 ecc_offset = 40;
186 break;
187 case 128:
188 ecc_offset = 80;
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 if (section == 0) {
195 oobregion->offset = 2;
196 oobregion->length = ecc_offset - 2;
197 } else {
198 oobregion->offset = ecc_offset + ecc->total;
199 oobregion->length = mtd->oobsize - oobregion->offset;
200 }
201
202 return 0;
203}
204
205static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
206 .ecc = nand_ooblayout_ecc_lp_hamming,
207 .free = nand_ooblayout_free_lp_hamming,
208};
209
210static int check_offs_len(struct mtd_info *mtd,
211 loff_t ofs, uint64_t len)
212{
213 struct nand_chip *chip = mtd_to_nand(mtd);
214 int ret = 0;
215
216
217 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 pr_debug("%s: unaligned address\n", __func__);
219 ret = -EINVAL;
220 }
221
222
223 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: length not block aligned\n", __func__);
225 ret = -EINVAL;
226 }
227
228 return ret;
229}
230
231
232
233
234
235
236
237static void nand_release_device(struct mtd_info *mtd)
238{
239 struct nand_chip *chip = mtd_to_nand(mtd);
240
241
242 spin_lock(&chip->controller->lock);
243 chip->controller->active = NULL;
244 chip->state = FL_READY;
245 wake_up(&chip->controller->wq);
246 spin_unlock(&chip->controller->lock);
247}
248
249
250
251
252
253
254
255static uint8_t nand_read_byte(struct mtd_info *mtd)
256{
257 struct nand_chip *chip = mtd_to_nand(mtd);
258 return readb(chip->IO_ADDR_R);
259}
260
261
262
263
264
265
266
267
268static uint8_t nand_read_byte16(struct mtd_info *mtd)
269{
270 struct nand_chip *chip = mtd_to_nand(mtd);
271 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
272}
273
274
275
276
277
278
279
280static u16 nand_read_word(struct mtd_info *mtd)
281{
282 struct nand_chip *chip = mtd_to_nand(mtd);
283 return readw(chip->IO_ADDR_R);
284}
285
286
287
288
289
290
291
292
293static void nand_select_chip(struct mtd_info *mtd, int chipnr)
294{
295 struct nand_chip *chip = mtd_to_nand(mtd);
296
297 switch (chipnr) {
298 case -1:
299 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
300 break;
301 case 0:
302 break;
303
304 default:
305 BUG();
306 }
307}
308
309
310
311
312
313
314
315
316static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
317{
318 struct nand_chip *chip = mtd_to_nand(mtd);
319
320 chip->write_buf(mtd, &byte, 1);
321}
322
323
324
325
326
327
328
329
330static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
331{
332 struct nand_chip *chip = mtd_to_nand(mtd);
333 uint16_t word = byte;
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351 chip->write_buf(mtd, (uint8_t *)&word, 2);
352}
353
354
355
356
357
358
359
360
361
362static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
363{
364 struct nand_chip *chip = mtd_to_nand(mtd);
365
366 iowrite8_rep(chip->IO_ADDR_W, buf, len);
367}
368
369
370
371
372
373
374
375
376
377static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
378{
379 struct nand_chip *chip = mtd_to_nand(mtd);
380
381 ioread8_rep(chip->IO_ADDR_R, buf, len);
382}
383
384
385
386
387
388
389
390
391
392static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
393{
394 struct nand_chip *chip = mtd_to_nand(mtd);
395 u16 *p = (u16 *) buf;
396
397 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
398}
399
400
401
402
403
404
405
406
407
408static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
409{
410 struct nand_chip *chip = mtd_to_nand(mtd);
411 u16 *p = (u16 *) buf;
412
413 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
414}
415
416
417
418
419
420
421
422
423static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
424{
425 int page, page_end, res;
426 struct nand_chip *chip = mtd_to_nand(mtd);
427 u8 bad;
428
429 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
430 ofs += mtd->erasesize - mtd->writesize;
431
432 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
433 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
434
435 for (; page < page_end; page++) {
436 res = chip->ecc.read_oob(mtd, chip, page);
437 if (res)
438 return res;
439
440 bad = chip->oob_poi[chip->badblockpos];
441
442 if (likely(chip->badblockbits == 8))
443 res = bad != 0xFF;
444 else
445 res = hweight8(bad) < chip->badblockbits;
446 if (res)
447 return res;
448 }
449
450 return 0;
451}
452
453
454
455
456
457
458
459
460
461
462static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
463{
464 struct nand_chip *chip = mtd_to_nand(mtd);
465 struct mtd_oob_ops ops;
466 uint8_t buf[2] = { 0, 0 };
467 int ret = 0, res, i = 0;
468
469 memset(&ops, 0, sizeof(ops));
470 ops.oobbuf = buf;
471 ops.ooboffs = chip->badblockpos;
472 if (chip->options & NAND_BUSWIDTH_16) {
473 ops.ooboffs &= ~0x01;
474 ops.len = ops.ooblen = 2;
475 } else {
476 ops.len = ops.ooblen = 1;
477 }
478 ops.mode = MTD_OPS_PLACE_OOB;
479
480
481 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
482 ofs += mtd->erasesize - mtd->writesize;
483 do {
484 res = nand_do_write_oob(mtd, ofs, &ops);
485 if (!ret)
486 ret = res;
487
488 i++;
489 ofs += mtd->writesize;
490 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
491
492 return ret;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
513{
514 struct nand_chip *chip = mtd_to_nand(mtd);
515 int res, ret = 0;
516
517 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
518 struct erase_info einfo;
519
520
521 memset(&einfo, 0, sizeof(einfo));
522 einfo.mtd = mtd;
523 einfo.addr = ofs;
524 einfo.len = 1ULL << chip->phys_erase_shift;
525 nand_erase_nand(mtd, &einfo, 0);
526
527
528 nand_get_device(mtd, FL_WRITING);
529 ret = chip->block_markbad(mtd, ofs);
530 nand_release_device(mtd);
531 }
532
533
534 if (chip->bbt) {
535 res = nand_markbad_bbt(mtd, ofs);
536 if (!ret)
537 ret = res;
538 }
539
540 if (!ret)
541 mtd->ecc_stats.badblocks++;
542
543 return ret;
544}
545
546
547
548
549
550
551
552
553static int nand_check_wp(struct mtd_info *mtd)
554{
555 struct nand_chip *chip = mtd_to_nand(mtd);
556
557
558 if (chip->options & NAND_BROKEN_XD)
559 return 0;
560
561
562 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
563 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
564}
565
566
567
568
569
570
571
572
573static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
574{
575 struct nand_chip *chip = mtd_to_nand(mtd);
576
577 if (!chip->bbt)
578 return 0;
579
580 return nand_isreserved_bbt(mtd, ofs);
581}
582
583
584
585
586
587
588
589
590
591
592static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
593{
594 struct nand_chip *chip = mtd_to_nand(mtd);
595
596 if (!chip->bbt)
597 return chip->block_bad(mtd, ofs);
598
599
600 return nand_isbad_bbt(mtd, ofs, allowbbt);
601}
602
603
604
605
606
607
608
609
610
611static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
612{
613 struct nand_chip *chip = mtd_to_nand(mtd);
614 int i;
615
616
617 for (i = 0; i < timeo; i++) {
618 if (chip->dev_ready(mtd))
619 break;
620 touch_softlockup_watchdog();
621 mdelay(1);
622 }
623}
624
625
626
627
628
629
630
631void nand_wait_ready(struct mtd_info *mtd)
632{
633 struct nand_chip *chip = mtd_to_nand(mtd);
634 unsigned long timeo = 400;
635
636 if (in_interrupt() || oops_in_progress)
637 return panic_nand_wait_ready(mtd, timeo);
638
639
640 timeo = jiffies + msecs_to_jiffies(timeo);
641 do {
642 if (chip->dev_ready(mtd))
643 return;
644 cond_resched();
645 } while (time_before(jiffies, timeo));
646
647 if (!chip->dev_ready(mtd))
648 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
649}
650EXPORT_SYMBOL_GPL(nand_wait_ready);
651
652
653
654
655
656
657
658
659static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
660{
661 register struct nand_chip *chip = mtd_to_nand(mtd);
662
663 timeo = jiffies + msecs_to_jiffies(timeo);
664 do {
665 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
666 break;
667 touch_softlockup_watchdog();
668 } while (time_before(jiffies, timeo));
669};
670
671
672
673
674
675
676
677
678
679
680
681static void nand_command(struct mtd_info *mtd, unsigned int command,
682 int column, int page_addr)
683{
684 register struct nand_chip *chip = mtd_to_nand(mtd);
685 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
686
687
688 if (command == NAND_CMD_SEQIN) {
689 int readcmd;
690
691 if (column >= mtd->writesize) {
692
693 column -= mtd->writesize;
694 readcmd = NAND_CMD_READOOB;
695 } else if (column < 256) {
696
697 readcmd = NAND_CMD_READ0;
698 } else {
699 column -= 256;
700 readcmd = NAND_CMD_READ1;
701 }
702 chip->cmd_ctrl(mtd, readcmd, ctrl);
703 ctrl &= ~NAND_CTRL_CHANGE;
704 }
705 chip->cmd_ctrl(mtd, command, ctrl);
706
707
708 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
709
710 if (column != -1) {
711
712 if (chip->options & NAND_BUSWIDTH_16 &&
713 !nand_opcode_8bits(command))
714 column >>= 1;
715 chip->cmd_ctrl(mtd, column, ctrl);
716 ctrl &= ~NAND_CTRL_CHANGE;
717 }
718 if (page_addr != -1) {
719 chip->cmd_ctrl(mtd, page_addr, ctrl);
720 ctrl &= ~NAND_CTRL_CHANGE;
721 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
722
723 if (chip->chipsize > (32 << 20))
724 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
725 }
726 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
727
728
729
730
731
732 switch (command) {
733
734 case NAND_CMD_PAGEPROG:
735 case NAND_CMD_ERASE1:
736 case NAND_CMD_ERASE2:
737 case NAND_CMD_SEQIN:
738 case NAND_CMD_STATUS:
739 case NAND_CMD_READID:
740 case NAND_CMD_SET_FEATURES:
741 return;
742
743 case NAND_CMD_RESET:
744 if (chip->dev_ready)
745 break;
746 udelay(chip->chip_delay);
747 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
748 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
749 chip->cmd_ctrl(mtd,
750 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
751
752 nand_wait_status_ready(mtd, 250);
753 return;
754
755
756 default:
757
758
759
760
761 if (!chip->dev_ready) {
762 udelay(chip->chip_delay);
763 return;
764 }
765 }
766
767
768
769
770 ndelay(100);
771
772 nand_wait_ready(mtd);
773}
774
775static void nand_ccs_delay(struct nand_chip *chip)
776{
777
778
779
780
781 if (!(chip->options & NAND_WAIT_TCCS))
782 return;
783
784
785
786
787
788 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
789 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
790 else
791 ndelay(500);
792}
793
794
795
796
797
798
799
800
801
802
803
804
805static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
806 int column, int page_addr)
807{
808 register struct nand_chip *chip = mtd_to_nand(mtd);
809
810
811 if (command == NAND_CMD_READOOB) {
812 column += mtd->writesize;
813 command = NAND_CMD_READ0;
814 }
815
816
817 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
818
819 if (column != -1 || page_addr != -1) {
820 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
821
822
823 if (column != -1) {
824
825 if (chip->options & NAND_BUSWIDTH_16 &&
826 !nand_opcode_8bits(command))
827 column >>= 1;
828 chip->cmd_ctrl(mtd, column, ctrl);
829 ctrl &= ~NAND_CTRL_CHANGE;
830
831
832 if (!nand_opcode_8bits(command))
833 chip->cmd_ctrl(mtd, column >> 8, ctrl);
834 }
835 if (page_addr != -1) {
836 chip->cmd_ctrl(mtd, page_addr, ctrl);
837 chip->cmd_ctrl(mtd, page_addr >> 8,
838 NAND_NCE | NAND_ALE);
839
840 if (chip->chipsize > (128 << 20))
841 chip->cmd_ctrl(mtd, page_addr >> 16,
842 NAND_NCE | NAND_ALE);
843 }
844 }
845 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
846
847
848
849
850
851 switch (command) {
852
853 case NAND_CMD_CACHEDPROG:
854 case NAND_CMD_PAGEPROG:
855 case NAND_CMD_ERASE1:
856 case NAND_CMD_ERASE2:
857 case NAND_CMD_SEQIN:
858 case NAND_CMD_STATUS:
859 case NAND_CMD_READID:
860 case NAND_CMD_SET_FEATURES:
861 return;
862
863 case NAND_CMD_RNDIN:
864 nand_ccs_delay(chip);
865 return;
866
867 case NAND_CMD_RESET:
868 if (chip->dev_ready)
869 break;
870 udelay(chip->chip_delay);
871 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
872 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
873 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
874 NAND_NCE | NAND_CTRL_CHANGE);
875
876 nand_wait_status_ready(mtd, 250);
877 return;
878
879 case NAND_CMD_RNDOUT:
880
881 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
882 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
883 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
884 NAND_NCE | NAND_CTRL_CHANGE);
885
886 nand_ccs_delay(chip);
887 return;
888
889 case NAND_CMD_READ0:
890 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
891 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
892 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
893 NAND_NCE | NAND_CTRL_CHANGE);
894
895
896 default:
897
898
899
900
901 if (!chip->dev_ready) {
902 udelay(chip->chip_delay);
903 return;
904 }
905 }
906
907
908
909
910
911 ndelay(100);
912
913 nand_wait_ready(mtd);
914}
915
916
917
918
919
920
921
922
923
924static void panic_nand_get_device(struct nand_chip *chip,
925 struct mtd_info *mtd, int new_state)
926{
927
928 chip->controller->active = chip;
929 chip->state = new_state;
930}
931
932
933
934
935
936
937
938
939static int
940nand_get_device(struct mtd_info *mtd, int new_state)
941{
942 struct nand_chip *chip = mtd_to_nand(mtd);
943 spinlock_t *lock = &chip->controller->lock;
944 wait_queue_head_t *wq = &chip->controller->wq;
945 DECLARE_WAITQUEUE(wait, current);
946retry:
947 spin_lock(lock);
948
949
950 if (!chip->controller->active)
951 chip->controller->active = chip;
952
953 if (chip->controller->active == chip && chip->state == FL_READY) {
954 chip->state = new_state;
955 spin_unlock(lock);
956 return 0;
957 }
958 if (new_state == FL_PM_SUSPENDED) {
959 if (chip->controller->active->state == FL_PM_SUSPENDED) {
960 chip->state = FL_PM_SUSPENDED;
961 spin_unlock(lock);
962 return 0;
963 }
964 }
965 set_current_state(TASK_UNINTERRUPTIBLE);
966 add_wait_queue(wq, &wait);
967 spin_unlock(lock);
968 schedule();
969 remove_wait_queue(wq, &wait);
970 goto retry;
971}
972
973
974
975
976
977
978
979
980
981
982
983static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
984 unsigned long timeo)
985{
986 int i;
987 for (i = 0; i < timeo; i++) {
988 if (chip->dev_ready) {
989 if (chip->dev_ready(mtd))
990 break;
991 } else {
992 if (chip->read_byte(mtd) & NAND_STATUS_READY)
993 break;
994 }
995 mdelay(1);
996 }
997}
998
999
1000
1001
1002
1003
1004
1005
1006static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1007{
1008
1009 int status;
1010 unsigned long timeo = 400;
1011
1012
1013
1014
1015
1016 ndelay(100);
1017
1018 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1019
1020 if (in_interrupt() || oops_in_progress)
1021 panic_nand_wait(mtd, chip, timeo);
1022 else {
1023 timeo = jiffies + msecs_to_jiffies(timeo);
1024 do {
1025 if (chip->dev_ready) {
1026 if (chip->dev_ready(mtd))
1027 break;
1028 } else {
1029 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1030 break;
1031 }
1032 cond_resched();
1033 } while (time_before(jiffies, timeo));
1034 }
1035
1036 status = (int)chip->read_byte(mtd);
1037
1038 WARN_ON(!(status & NAND_STATUS_READY));
1039 return status;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static int nand_reset_data_interface(struct nand_chip *chip)
1051{
1052 struct mtd_info *mtd = nand_to_mtd(chip);
1053 const struct nand_data_interface *conf;
1054 int ret;
1055
1056 if (!chip->setup_data_interface)
1057 return 0;
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 conf = nand_get_default_data_interface();
1074 ret = chip->setup_data_interface(mtd, conf, false);
1075 if (ret)
1076 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1077
1078 return ret;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static int nand_setup_data_interface(struct nand_chip *chip)
1094{
1095 struct mtd_info *mtd = nand_to_mtd(chip);
1096 int ret;
1097
1098 if (!chip->setup_data_interface || !chip->data_interface)
1099 return 0;
1100
1101
1102
1103
1104
1105 if (chip->onfi_version) {
1106 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1107 chip->onfi_timing_mode_default,
1108 };
1109
1110 ret = chip->onfi_set_features(mtd, chip,
1111 ONFI_FEATURE_ADDR_TIMING_MODE,
1112 tmode_param);
1113 if (ret)
1114 goto err;
1115 }
1116
1117 ret = chip->setup_data_interface(mtd, chip->data_interface, false);
1118err:
1119 return ret;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136static int nand_init_data_interface(struct nand_chip *chip)
1137{
1138 struct mtd_info *mtd = nand_to_mtd(chip);
1139 int modes, mode, ret;
1140
1141 if (!chip->setup_data_interface)
1142 return 0;
1143
1144
1145
1146
1147
1148
1149 modes = onfi_get_async_timing_mode(chip);
1150 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1151 if (!chip->onfi_timing_mode_default)
1152 return 0;
1153
1154 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1155 }
1156
1157 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1158 GFP_KERNEL);
1159 if (!chip->data_interface)
1160 return -ENOMEM;
1161
1162 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1163 ret = onfi_init_data_interface(chip, chip->data_interface,
1164 NAND_SDR_IFACE, mode);
1165 if (ret)
1166 continue;
1167
1168 ret = chip->setup_data_interface(mtd, chip->data_interface,
1169 true);
1170 if (!ret) {
1171 chip->onfi_timing_mode_default = mode;
1172 break;
1173 }
1174 }
1175
1176 return 0;
1177}
1178
1179static void nand_release_data_interface(struct nand_chip *chip)
1180{
1181 kfree(chip->data_interface);
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191int nand_reset(struct nand_chip *chip, int chipnr)
1192{
1193 struct mtd_info *mtd = nand_to_mtd(chip);
1194 int ret;
1195
1196 ret = nand_reset_data_interface(chip);
1197 if (ret)
1198 return ret;
1199
1200
1201
1202
1203
1204 chip->select_chip(mtd, chipnr);
1205 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1206 chip->select_chip(mtd, -1);
1207
1208 chip->select_chip(mtd, chipnr);
1209 ret = nand_setup_data_interface(chip);
1210 chip->select_chip(mtd, -1);
1211 if (ret)
1212 return ret;
1213
1214 return 0;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1230 uint64_t len, int invert)
1231{
1232 int ret = 0;
1233 int status, page;
1234 struct nand_chip *chip = mtd_to_nand(mtd);
1235
1236
1237 page = ofs >> chip->page_shift;
1238 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1239
1240
1241 page = (ofs + len) >> chip->page_shift;
1242 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1243 (page | invert) & chip->pagemask);
1244
1245
1246 status = chip->waitfunc(mtd, chip);
1247
1248 if (status & NAND_STATUS_FAIL) {
1249 pr_debug("%s: error status = 0x%08x\n",
1250 __func__, status);
1251 ret = -EIO;
1252 }
1253
1254 return ret;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1266{
1267 int ret = 0;
1268 int chipnr;
1269 struct nand_chip *chip = mtd_to_nand(mtd);
1270
1271 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1272 __func__, (unsigned long long)ofs, len);
1273
1274 if (check_offs_len(mtd, ofs, len))
1275 return -EINVAL;
1276
1277
1278 if (ofs + len == mtd->size)
1279 len -= mtd->erasesize;
1280
1281 nand_get_device(mtd, FL_UNLOCKING);
1282
1283
1284 chipnr = ofs >> chip->chip_shift;
1285
1286
1287
1288
1289
1290
1291
1292
1293 nand_reset(chip, chipnr);
1294
1295 chip->select_chip(mtd, chipnr);
1296
1297
1298 if (nand_check_wp(mtd)) {
1299 pr_debug("%s: device is write protected!\n",
1300 __func__);
1301 ret = -EIO;
1302 goto out;
1303 }
1304
1305 ret = __nand_unlock(mtd, ofs, len, 0);
1306
1307out:
1308 chip->select_chip(mtd, -1);
1309 nand_release_device(mtd);
1310
1311 return ret;
1312}
1313EXPORT_SYMBOL(nand_unlock);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1329{
1330 int ret = 0;
1331 int chipnr, status, page;
1332 struct nand_chip *chip = mtd_to_nand(mtd);
1333
1334 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1335 __func__, (unsigned long long)ofs, len);
1336
1337 if (check_offs_len(mtd, ofs, len))
1338 return -EINVAL;
1339
1340 nand_get_device(mtd, FL_LOCKING);
1341
1342
1343 chipnr = ofs >> chip->chip_shift;
1344
1345
1346
1347
1348
1349
1350
1351
1352 nand_reset(chip, chipnr);
1353
1354 chip->select_chip(mtd, chipnr);
1355
1356
1357 if (nand_check_wp(mtd)) {
1358 pr_debug("%s: device is write protected!\n",
1359 __func__);
1360 status = MTD_ERASE_FAILED;
1361 ret = -EIO;
1362 goto out;
1363 }
1364
1365
1366 page = ofs >> chip->page_shift;
1367 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1368
1369
1370 status = chip->waitfunc(mtd, chip);
1371
1372 if (status & NAND_STATUS_FAIL) {
1373 pr_debug("%s: error status = 0x%08x\n",
1374 __func__, status);
1375 ret = -EIO;
1376 goto out;
1377 }
1378
1379 ret = __nand_unlock(mtd, ofs, len, 0x1);
1380
1381out:
1382 chip->select_chip(mtd, -1);
1383 nand_release_device(mtd);
1384
1385 return ret;
1386}
1387EXPORT_SYMBOL(nand_lock);
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1409{
1410 const unsigned char *bitmap = buf;
1411 int bitflips = 0;
1412 int weight;
1413
1414 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1415 len--, bitmap++) {
1416 weight = hweight8(*bitmap);
1417 bitflips += BITS_PER_BYTE - weight;
1418 if (unlikely(bitflips > bitflips_threshold))
1419 return -EBADMSG;
1420 }
1421
1422 for (; len >= sizeof(long);
1423 len -= sizeof(long), bitmap += sizeof(long)) {
1424 weight = hweight_long(*((unsigned long *)bitmap));
1425 bitflips += BITS_PER_LONG - weight;
1426 if (unlikely(bitflips > bitflips_threshold))
1427 return -EBADMSG;
1428 }
1429
1430 for (; len > 0; len--, bitmap++) {
1431 weight = hweight8(*bitmap);
1432 bitflips += BITS_PER_BYTE - weight;
1433 if (unlikely(bitflips > bitflips_threshold))
1434 return -EBADMSG;
1435 }
1436
1437 return bitflips;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479int nand_check_erased_ecc_chunk(void *data, int datalen,
1480 void *ecc, int ecclen,
1481 void *extraoob, int extraooblen,
1482 int bitflips_threshold)
1483{
1484 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1485
1486 data_bitflips = nand_check_erased_buf(data, datalen,
1487 bitflips_threshold);
1488 if (data_bitflips < 0)
1489 return data_bitflips;
1490
1491 bitflips_threshold -= data_bitflips;
1492
1493 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1494 if (ecc_bitflips < 0)
1495 return ecc_bitflips;
1496
1497 bitflips_threshold -= ecc_bitflips;
1498
1499 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1500 bitflips_threshold);
1501 if (extraoob_bitflips < 0)
1502 return extraoob_bitflips;
1503
1504 if (data_bitflips)
1505 memset(data, 0xff, datalen);
1506
1507 if (ecc_bitflips)
1508 memset(ecc, 0xff, ecclen);
1509
1510 if (extraoob_bitflips)
1511 memset(extraoob, 0xff, extraooblen);
1512
1513 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1514}
1515EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1528 uint8_t *buf, int oob_required, int page)
1529{
1530 chip->read_buf(mtd, buf, mtd->writesize);
1531 if (oob_required)
1532 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1533 return 0;
1534}
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1547 struct nand_chip *chip, uint8_t *buf,
1548 int oob_required, int page)
1549{
1550 int eccsize = chip->ecc.size;
1551 int eccbytes = chip->ecc.bytes;
1552 uint8_t *oob = chip->oob_poi;
1553 int steps, size;
1554
1555 for (steps = chip->ecc.steps; steps > 0; steps--) {
1556 chip->read_buf(mtd, buf, eccsize);
1557 buf += eccsize;
1558
1559 if (chip->ecc.prepad) {
1560 chip->read_buf(mtd, oob, chip->ecc.prepad);
1561 oob += chip->ecc.prepad;
1562 }
1563
1564 chip->read_buf(mtd, oob, eccbytes);
1565 oob += eccbytes;
1566
1567 if (chip->ecc.postpad) {
1568 chip->read_buf(mtd, oob, chip->ecc.postpad);
1569 oob += chip->ecc.postpad;
1570 }
1571 }
1572
1573 size = mtd->oobsize - (oob - chip->oob_poi);
1574 if (size)
1575 chip->read_buf(mtd, oob, size);
1576
1577 return 0;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1589 uint8_t *buf, int oob_required, int page)
1590{
1591 int i, eccsize = chip->ecc.size, ret;
1592 int eccbytes = chip->ecc.bytes;
1593 int eccsteps = chip->ecc.steps;
1594 uint8_t *p = buf;
1595 uint8_t *ecc_calc = chip->buffers->ecccalc;
1596 uint8_t *ecc_code = chip->buffers->ecccode;
1597 unsigned int max_bitflips = 0;
1598
1599 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1600
1601 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1602 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1603
1604 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1605 chip->ecc.total);
1606 if (ret)
1607 return ret;
1608
1609 eccsteps = chip->ecc.steps;
1610 p = buf;
1611
1612 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1613 int stat;
1614
1615 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1616 if (stat < 0) {
1617 mtd->ecc_stats.failed++;
1618 } else {
1619 mtd->ecc_stats.corrected += stat;
1620 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1621 }
1622 }
1623 return max_bitflips;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1636 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1637 int page)
1638{
1639 int start_step, end_step, num_steps, ret;
1640 uint8_t *p;
1641 int data_col_addr, i, gaps = 0;
1642 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1643 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1644 int index, section = 0;
1645 unsigned int max_bitflips = 0;
1646 struct mtd_oob_region oobregion = { };
1647
1648
1649 start_step = data_offs / chip->ecc.size;
1650 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1651 num_steps = end_step - start_step + 1;
1652 index = start_step * chip->ecc.bytes;
1653
1654
1655 datafrag_len = num_steps * chip->ecc.size;
1656 eccfrag_len = num_steps * chip->ecc.bytes;
1657
1658 data_col_addr = start_step * chip->ecc.size;
1659
1660 if (data_col_addr != 0)
1661 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1662
1663 p = bufpoi + data_col_addr;
1664 chip->read_buf(mtd, p, datafrag_len);
1665
1666
1667 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1668 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1669
1670
1671
1672
1673
1674 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
1675 if (ret)
1676 return ret;
1677
1678 if (oobregion.length < eccfrag_len)
1679 gaps = 1;
1680
1681 if (gaps) {
1682 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1683 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1684 } else {
1685
1686
1687
1688
1689 aligned_pos = oobregion.offset & ~(busw - 1);
1690 aligned_len = eccfrag_len;
1691 if (oobregion.offset & (busw - 1))
1692 aligned_len++;
1693 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1694 (busw - 1))
1695 aligned_len++;
1696
1697 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1698 mtd->writesize + aligned_pos, -1);
1699 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1700 }
1701
1702 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1703 chip->oob_poi, index, eccfrag_len);
1704 if (ret)
1705 return ret;
1706
1707 p = bufpoi + data_col_addr;
1708 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1709 int stat;
1710
1711 stat = chip->ecc.correct(mtd, p,
1712 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1713 if (stat == -EBADMSG &&
1714 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1715
1716 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1717 &chip->buffers->ecccode[i],
1718 chip->ecc.bytes,
1719 NULL, 0,
1720 chip->ecc.strength);
1721 }
1722
1723 if (stat < 0) {
1724 mtd->ecc_stats.failed++;
1725 } else {
1726 mtd->ecc_stats.corrected += stat;
1727 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1728 }
1729 }
1730 return max_bitflips;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1744 uint8_t *buf, int oob_required, int page)
1745{
1746 int i, eccsize = chip->ecc.size, ret;
1747 int eccbytes = chip->ecc.bytes;
1748 int eccsteps = chip->ecc.steps;
1749 uint8_t *p = buf;
1750 uint8_t *ecc_calc = chip->buffers->ecccalc;
1751 uint8_t *ecc_code = chip->buffers->ecccode;
1752 unsigned int max_bitflips = 0;
1753
1754 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1755 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1756 chip->read_buf(mtd, p, eccsize);
1757 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1758 }
1759 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1760
1761 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1762 chip->ecc.total);
1763 if (ret)
1764 return ret;
1765
1766 eccsteps = chip->ecc.steps;
1767 p = buf;
1768
1769 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1770 int stat;
1771
1772 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1773 if (stat == -EBADMSG &&
1774 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1775
1776 stat = nand_check_erased_ecc_chunk(p, eccsize,
1777 &ecc_code[i], eccbytes,
1778 NULL, 0,
1779 chip->ecc.strength);
1780 }
1781
1782 if (stat < 0) {
1783 mtd->ecc_stats.failed++;
1784 } else {
1785 mtd->ecc_stats.corrected += stat;
1786 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1787 }
1788 }
1789 return max_bitflips;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1807 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1808{
1809 int i, eccsize = chip->ecc.size, ret;
1810 int eccbytes = chip->ecc.bytes;
1811 int eccsteps = chip->ecc.steps;
1812 uint8_t *p = buf;
1813 uint8_t *ecc_code = chip->buffers->ecccode;
1814 uint8_t *ecc_calc = chip->buffers->ecccalc;
1815 unsigned int max_bitflips = 0;
1816
1817
1818 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1819 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1820 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1821
1822 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1823 chip->ecc.total);
1824 if (ret)
1825 return ret;
1826
1827 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1828 int stat;
1829
1830 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1831 chip->read_buf(mtd, p, eccsize);
1832 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1833
1834 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1835 if (stat == -EBADMSG &&
1836 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1837
1838 stat = nand_check_erased_ecc_chunk(p, eccsize,
1839 &ecc_code[i], eccbytes,
1840 NULL, 0,
1841 chip->ecc.strength);
1842 }
1843
1844 if (stat < 0) {
1845 mtd->ecc_stats.failed++;
1846 } else {
1847 mtd->ecc_stats.corrected += stat;
1848 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1849 }
1850 }
1851 return max_bitflips;
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1866 uint8_t *buf, int oob_required, int page)
1867{
1868 int i, eccsize = chip->ecc.size;
1869 int eccbytes = chip->ecc.bytes;
1870 int eccsteps = chip->ecc.steps;
1871 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1872 uint8_t *p = buf;
1873 uint8_t *oob = chip->oob_poi;
1874 unsigned int max_bitflips = 0;
1875
1876 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1877 int stat;
1878
1879 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1880 chip->read_buf(mtd, p, eccsize);
1881
1882 if (chip->ecc.prepad) {
1883 chip->read_buf(mtd, oob, chip->ecc.prepad);
1884 oob += chip->ecc.prepad;
1885 }
1886
1887 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1888 chip->read_buf(mtd, oob, eccbytes);
1889 stat = chip->ecc.correct(mtd, p, oob, NULL);
1890
1891 oob += eccbytes;
1892
1893 if (chip->ecc.postpad) {
1894 chip->read_buf(mtd, oob, chip->ecc.postpad);
1895 oob += chip->ecc.postpad;
1896 }
1897
1898 if (stat == -EBADMSG &&
1899 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1900
1901 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1902 oob - eccpadbytes,
1903 eccpadbytes,
1904 NULL, 0,
1905 chip->ecc.strength);
1906 }
1907
1908 if (stat < 0) {
1909 mtd->ecc_stats.failed++;
1910 } else {
1911 mtd->ecc_stats.corrected += stat;
1912 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1913 }
1914 }
1915
1916
1917 i = mtd->oobsize - (oob - chip->oob_poi);
1918 if (i)
1919 chip->read_buf(mtd, oob, i);
1920
1921 return max_bitflips;
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1932 struct mtd_oob_ops *ops, size_t len)
1933{
1934 struct nand_chip *chip = mtd_to_nand(mtd);
1935 int ret;
1936
1937 switch (ops->mode) {
1938
1939 case MTD_OPS_PLACE_OOB:
1940 case MTD_OPS_RAW:
1941 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1942 return oob + len;
1943
1944 case MTD_OPS_AUTO_OOB:
1945 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1946 ops->ooboffs, len);
1947 BUG_ON(ret);
1948 return oob + len;
1949
1950 default:
1951 BUG();
1952 }
1953 return NULL;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1966{
1967 struct nand_chip *chip = mtd_to_nand(mtd);
1968
1969 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1970
1971 if (retry_mode >= chip->read_retries)
1972 return -EINVAL;
1973
1974 if (!chip->setup_read_retry)
1975 return -EOPNOTSUPP;
1976
1977 return chip->setup_read_retry(mtd, retry_mode);
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1989 struct mtd_oob_ops *ops)
1990{
1991 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1992 struct nand_chip *chip = mtd_to_nand(mtd);
1993 int ret = 0;
1994 uint32_t readlen = ops->len;
1995 uint32_t oobreadlen = ops->ooblen;
1996 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1997
1998 uint8_t *bufpoi, *oob, *buf;
1999 int use_bufpoi;
2000 unsigned int max_bitflips = 0;
2001 int retry_mode = 0;
2002 bool ecc_fail = false;
2003
2004 chipnr = (int)(from >> chip->chip_shift);
2005 chip->select_chip(mtd, chipnr);
2006
2007 realpage = (int)(from >> chip->page_shift);
2008 page = realpage & chip->pagemask;
2009
2010 col = (int)(from & (mtd->writesize - 1));
2011
2012 buf = ops->datbuf;
2013 oob = ops->oobbuf;
2014 oob_required = oob ? 1 : 0;
2015
2016 while (1) {
2017 unsigned int ecc_failures = mtd->ecc_stats.failed;
2018
2019 bytes = min(mtd->writesize - col, readlen);
2020 aligned = (bytes == mtd->writesize);
2021
2022 if (!aligned)
2023 use_bufpoi = 1;
2024 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2025 use_bufpoi = !virt_addr_valid(buf) ||
2026 !IS_ALIGNED((unsigned long)buf,
2027 chip->buf_align);
2028 else
2029 use_bufpoi = 0;
2030
2031
2032 if (realpage != chip->pagebuf || oob) {
2033 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
2034
2035 if (use_bufpoi && aligned)
2036 pr_debug("%s: using read bounce buffer for buf@%p\n",
2037 __func__, buf);
2038
2039read_retry:
2040 if (nand_standard_page_accessors(&chip->ecc))
2041 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
2042
2043
2044
2045
2046
2047 if (unlikely(ops->mode == MTD_OPS_RAW))
2048 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
2049 oob_required,
2050 page);
2051 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
2052 !oob)
2053 ret = chip->ecc.read_subpage(mtd, chip,
2054 col, bytes, bufpoi,
2055 page);
2056 else
2057 ret = chip->ecc.read_page(mtd, chip, bufpoi,
2058 oob_required, page);
2059 if (ret < 0) {
2060 if (use_bufpoi)
2061
2062 chip->pagebuf = -1;
2063 break;
2064 }
2065
2066
2067 if (use_bufpoi) {
2068 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2069 !(mtd->ecc_stats.failed - ecc_failures) &&
2070 (ops->mode != MTD_OPS_RAW)) {
2071 chip->pagebuf = realpage;
2072 chip->pagebuf_bitflips = ret;
2073 } else {
2074
2075 chip->pagebuf = -1;
2076 }
2077 memcpy(buf, chip->buffers->databuf + col, bytes);
2078 }
2079
2080 if (unlikely(oob)) {
2081 int toread = min(oobreadlen, max_oobsize);
2082
2083 if (toread) {
2084 oob = nand_transfer_oob(mtd,
2085 oob, ops, toread);
2086 oobreadlen -= toread;
2087 }
2088 }
2089
2090 if (chip->options & NAND_NEED_READRDY) {
2091
2092 if (!chip->dev_ready)
2093 udelay(chip->chip_delay);
2094 else
2095 nand_wait_ready(mtd);
2096 }
2097
2098 if (mtd->ecc_stats.failed - ecc_failures) {
2099 if (retry_mode + 1 < chip->read_retries) {
2100 retry_mode++;
2101 ret = nand_setup_read_retry(mtd,
2102 retry_mode);
2103 if (ret < 0)
2104 break;
2105
2106
2107 mtd->ecc_stats.failed = ecc_failures;
2108 goto read_retry;
2109 } else {
2110
2111 ecc_fail = true;
2112 }
2113 }
2114
2115 buf += bytes;
2116 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2117 } else {
2118 memcpy(buf, chip->buffers->databuf + col, bytes);
2119 buf += bytes;
2120 max_bitflips = max_t(unsigned int, max_bitflips,
2121 chip->pagebuf_bitflips);
2122 }
2123
2124 readlen -= bytes;
2125
2126
2127 if (retry_mode) {
2128 ret = nand_setup_read_retry(mtd, 0);
2129 if (ret < 0)
2130 break;
2131 retry_mode = 0;
2132 }
2133
2134 if (!readlen)
2135 break;
2136
2137
2138 col = 0;
2139
2140 realpage++;
2141
2142 page = realpage & chip->pagemask;
2143
2144 if (!page) {
2145 chipnr++;
2146 chip->select_chip(mtd, -1);
2147 chip->select_chip(mtd, chipnr);
2148 }
2149 }
2150 chip->select_chip(mtd, -1);
2151
2152 ops->retlen = ops->len - (size_t) readlen;
2153 if (oob)
2154 ops->oobretlen = ops->ooblen - oobreadlen;
2155
2156 if (ret < 0)
2157 return ret;
2158
2159 if (ecc_fail)
2160 return -EBADMSG;
2161
2162 return max_bitflips;
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2176 size_t *retlen, uint8_t *buf)
2177{
2178 struct mtd_oob_ops ops;
2179 int ret;
2180
2181 nand_get_device(mtd, FL_READING);
2182 memset(&ops, 0, sizeof(ops));
2183 ops.len = len;
2184 ops.datbuf = buf;
2185 ops.mode = MTD_OPS_PLACE_OOB;
2186 ret = nand_do_read_ops(mtd, from, &ops);
2187 *retlen = ops.retlen;
2188 nand_release_device(mtd);
2189 return ret;
2190}
2191
2192
2193
2194
2195
2196
2197
2198int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2199{
2200 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2201 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2202 return 0;
2203}
2204EXPORT_SYMBOL(nand_read_oob_std);
2205
2206
2207
2208
2209
2210
2211
2212
2213int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2214 int page)
2215{
2216 int length = mtd->oobsize;
2217 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2218 int eccsize = chip->ecc.size;
2219 uint8_t *bufpoi = chip->oob_poi;
2220 int i, toread, sndrnd = 0, pos;
2221
2222 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2223 for (i = 0; i < chip->ecc.steps; i++) {
2224 if (sndrnd) {
2225 pos = eccsize + i * (eccsize + chunk);
2226 if (mtd->writesize > 512)
2227 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2228 else
2229 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2230 } else
2231 sndrnd = 1;
2232 toread = min_t(int, length, chunk);
2233 chip->read_buf(mtd, bufpoi, toread);
2234 bufpoi += toread;
2235 length -= toread;
2236 }
2237 if (length > 0)
2238 chip->read_buf(mtd, bufpoi, length);
2239
2240 return 0;
2241}
2242EXPORT_SYMBOL(nand_read_oob_syndrome);
2243
2244
2245
2246
2247
2248
2249
2250int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2251{
2252 int status = 0;
2253 const uint8_t *buf = chip->oob_poi;
2254 int length = mtd->oobsize;
2255
2256 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2257 chip->write_buf(mtd, buf, length);
2258
2259 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2260
2261 status = chip->waitfunc(mtd, chip);
2262
2263 return status & NAND_STATUS_FAIL ? -EIO : 0;
2264}
2265EXPORT_SYMBOL(nand_write_oob_std);
2266
2267
2268
2269
2270
2271
2272
2273
2274int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2275 int page)
2276{
2277 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2278 int eccsize = chip->ecc.size, length = mtd->oobsize;
2279 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2280 const uint8_t *bufpoi = chip->oob_poi;
2281
2282
2283
2284
2285
2286
2287 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2288 pos = steps * (eccsize + chunk);
2289 steps = 0;
2290 } else
2291 pos = eccsize;
2292
2293 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2294 for (i = 0; i < steps; i++) {
2295 if (sndcmd) {
2296 if (mtd->writesize <= 512) {
2297 uint32_t fill = 0xFFFFFFFF;
2298
2299 len = eccsize;
2300 while (len > 0) {
2301 int num = min_t(int, len, 4);
2302 chip->write_buf(mtd, (uint8_t *)&fill,
2303 num);
2304 len -= num;
2305 }
2306 } else {
2307 pos = eccsize + i * (eccsize + chunk);
2308 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2309 }
2310 } else
2311 sndcmd = 1;
2312 len = min_t(int, length, chunk);
2313 chip->write_buf(mtd, bufpoi, len);
2314 bufpoi += len;
2315 length -= len;
2316 }
2317 if (length > 0)
2318 chip->write_buf(mtd, bufpoi, length);
2319
2320 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2321 status = chip->waitfunc(mtd, chip);
2322
2323 return status & NAND_STATUS_FAIL ? -EIO : 0;
2324}
2325EXPORT_SYMBOL(nand_write_oob_syndrome);
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2336 struct mtd_oob_ops *ops)
2337{
2338 int page, realpage, chipnr;
2339 struct nand_chip *chip = mtd_to_nand(mtd);
2340 struct mtd_ecc_stats stats;
2341 int readlen = ops->ooblen;
2342 int len;
2343 uint8_t *buf = ops->oobbuf;
2344 int ret = 0;
2345
2346 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2347 __func__, (unsigned long long)from, readlen);
2348
2349 stats = mtd->ecc_stats;
2350
2351 len = mtd_oobavail(mtd, ops);
2352
2353 if (unlikely(ops->ooboffs >= len)) {
2354 pr_debug("%s: attempt to start read outside oob\n",
2355 __func__);
2356 return -EINVAL;
2357 }
2358
2359
2360 if (unlikely(from >= mtd->size ||
2361 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2362 (from >> chip->page_shift)) * len)) {
2363 pr_debug("%s: attempt to read beyond end of device\n",
2364 __func__);
2365 return -EINVAL;
2366 }
2367
2368 chipnr = (int)(from >> chip->chip_shift);
2369 chip->select_chip(mtd, chipnr);
2370
2371
2372 realpage = (int)(from >> chip->page_shift);
2373 page = realpage & chip->pagemask;
2374
2375 while (1) {
2376 if (ops->mode == MTD_OPS_RAW)
2377 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2378 else
2379 ret = chip->ecc.read_oob(mtd, chip, page);
2380
2381 if (ret < 0)
2382 break;
2383
2384 len = min(len, readlen);
2385 buf = nand_transfer_oob(mtd, buf, ops, len);
2386
2387 if (chip->options & NAND_NEED_READRDY) {
2388
2389 if (!chip->dev_ready)
2390 udelay(chip->chip_delay);
2391 else
2392 nand_wait_ready(mtd);
2393 }
2394
2395 readlen -= len;
2396 if (!readlen)
2397 break;
2398
2399
2400 realpage++;
2401
2402 page = realpage & chip->pagemask;
2403
2404 if (!page) {
2405 chipnr++;
2406 chip->select_chip(mtd, -1);
2407 chip->select_chip(mtd, chipnr);
2408 }
2409 }
2410 chip->select_chip(mtd, -1);
2411
2412 ops->oobretlen = ops->ooblen - readlen;
2413
2414 if (ret < 0)
2415 return ret;
2416
2417 if (mtd->ecc_stats.failed - stats.failed)
2418 return -EBADMSG;
2419
2420 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2421}
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2432 struct mtd_oob_ops *ops)
2433{
2434 int ret;
2435
2436 ops->retlen = 0;
2437
2438
2439 if (ops->datbuf && (from + ops->len) > mtd->size) {
2440 pr_debug("%s: attempt to read beyond end of device\n",
2441 __func__);
2442 return -EINVAL;
2443 }
2444
2445 if (ops->mode != MTD_OPS_PLACE_OOB &&
2446 ops->mode != MTD_OPS_AUTO_OOB &&
2447 ops->mode != MTD_OPS_RAW)
2448 return -ENOTSUPP;
2449
2450 nand_get_device(mtd, FL_READING);
2451
2452 if (!ops->datbuf)
2453 ret = nand_do_read_oob(mtd, from, ops);
2454 else
2455 ret = nand_do_read_ops(mtd, from, ops);
2456
2457 nand_release_device(mtd);
2458 return ret;
2459}
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2473 const uint8_t *buf, int oob_required, int page)
2474{
2475 chip->write_buf(mtd, buf, mtd->writesize);
2476 if (oob_required)
2477 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2478
2479 return 0;
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2493 struct nand_chip *chip,
2494 const uint8_t *buf, int oob_required,
2495 int page)
2496{
2497 int eccsize = chip->ecc.size;
2498 int eccbytes = chip->ecc.bytes;
2499 uint8_t *oob = chip->oob_poi;
2500 int steps, size;
2501
2502 for (steps = chip->ecc.steps; steps > 0; steps--) {
2503 chip->write_buf(mtd, buf, eccsize);
2504 buf += eccsize;
2505
2506 if (chip->ecc.prepad) {
2507 chip->write_buf(mtd, oob, chip->ecc.prepad);
2508 oob += chip->ecc.prepad;
2509 }
2510
2511 chip->write_buf(mtd, oob, eccbytes);
2512 oob += eccbytes;
2513
2514 if (chip->ecc.postpad) {
2515 chip->write_buf(mtd, oob, chip->ecc.postpad);
2516 oob += chip->ecc.postpad;
2517 }
2518 }
2519
2520 size = mtd->oobsize - (oob - chip->oob_poi);
2521 if (size)
2522 chip->write_buf(mtd, oob, size);
2523
2524 return 0;
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2535 const uint8_t *buf, int oob_required,
2536 int page)
2537{
2538 int i, eccsize = chip->ecc.size, ret;
2539 int eccbytes = chip->ecc.bytes;
2540 int eccsteps = chip->ecc.steps;
2541 uint8_t *ecc_calc = chip->buffers->ecccalc;
2542 const uint8_t *p = buf;
2543
2544
2545 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2546 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2547
2548 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2549 chip->ecc.total);
2550 if (ret)
2551 return ret;
2552
2553 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2565 const uint8_t *buf, int oob_required,
2566 int page)
2567{
2568 int i, eccsize = chip->ecc.size, ret;
2569 int eccbytes = chip->ecc.bytes;
2570 int eccsteps = chip->ecc.steps;
2571 uint8_t *ecc_calc = chip->buffers->ecccalc;
2572 const uint8_t *p = buf;
2573
2574 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2575 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2576 chip->write_buf(mtd, p, eccsize);
2577 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2578 }
2579
2580 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2581 chip->ecc.total);
2582 if (ret)
2583 return ret;
2584
2585 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2586
2587 return 0;
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2602 struct nand_chip *chip, uint32_t offset,
2603 uint32_t data_len, const uint8_t *buf,
2604 int oob_required, int page)
2605{
2606 uint8_t *oob_buf = chip->oob_poi;
2607 uint8_t *ecc_calc = chip->buffers->ecccalc;
2608 int ecc_size = chip->ecc.size;
2609 int ecc_bytes = chip->ecc.bytes;
2610 int ecc_steps = chip->ecc.steps;
2611 uint32_t start_step = offset / ecc_size;
2612 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2613 int oob_bytes = mtd->oobsize / ecc_steps;
2614 int step, ret;
2615
2616 for (step = 0; step < ecc_steps; step++) {
2617
2618 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2619
2620
2621 chip->write_buf(mtd, buf, ecc_size);
2622
2623
2624 if ((step < start_step) || (step > end_step))
2625 memset(ecc_calc, 0xff, ecc_bytes);
2626 else
2627 chip->ecc.calculate(mtd, buf, ecc_calc);
2628
2629
2630
2631 if (!oob_required || (step < start_step) || (step > end_step))
2632 memset(oob_buf, 0xff, oob_bytes);
2633
2634 buf += ecc_size;
2635 ecc_calc += ecc_bytes;
2636 oob_buf += oob_bytes;
2637 }
2638
2639
2640
2641 ecc_calc = chip->buffers->ecccalc;
2642 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2643 chip->ecc.total);
2644 if (ret)
2645 return ret;
2646
2647
2648 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2649
2650 return 0;
2651}
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665static int nand_write_page_syndrome(struct mtd_info *mtd,
2666 struct nand_chip *chip,
2667 const uint8_t *buf, int oob_required,
2668 int page)
2669{
2670 int i, eccsize = chip->ecc.size;
2671 int eccbytes = chip->ecc.bytes;
2672 int eccsteps = chip->ecc.steps;
2673 const uint8_t *p = buf;
2674 uint8_t *oob = chip->oob_poi;
2675
2676 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2677
2678 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2679 chip->write_buf(mtd, p, eccsize);
2680
2681 if (chip->ecc.prepad) {
2682 chip->write_buf(mtd, oob, chip->ecc.prepad);
2683 oob += chip->ecc.prepad;
2684 }
2685
2686 chip->ecc.calculate(mtd, p, oob);
2687 chip->write_buf(mtd, oob, eccbytes);
2688 oob += eccbytes;
2689
2690 if (chip->ecc.postpad) {
2691 chip->write_buf(mtd, oob, chip->ecc.postpad);
2692 oob += chip->ecc.postpad;
2693 }
2694 }
2695
2696
2697 i = mtd->oobsize - (oob - chip->oob_poi);
2698 if (i)
2699 chip->write_buf(mtd, oob, i);
2700
2701 return 0;
2702}
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2717 uint32_t offset, int data_len, const uint8_t *buf,
2718 int oob_required, int page, int cached, int raw)
2719{
2720 int status, subpage;
2721
2722 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2723 chip->ecc.write_subpage)
2724 subpage = offset || (data_len < mtd->writesize);
2725 else
2726 subpage = 0;
2727
2728 if (nand_standard_page_accessors(&chip->ecc))
2729 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2730
2731 if (unlikely(raw))
2732 status = chip->ecc.write_page_raw(mtd, chip, buf,
2733 oob_required, page);
2734 else if (subpage)
2735 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2736 buf, oob_required, page);
2737 else
2738 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2739 page);
2740
2741 if (status < 0)
2742 return status;
2743
2744
2745
2746
2747
2748 cached = 0;
2749
2750 if (!cached || !NAND_HAS_CACHEPROG(chip)) {
2751
2752 if (nand_standard_page_accessors(&chip->ecc))
2753 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2754 status = chip->waitfunc(mtd, chip);
2755
2756
2757
2758
2759 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2760 status = chip->errstat(mtd, chip, FL_WRITING, status,
2761 page);
2762
2763 if (status & NAND_STATUS_FAIL)
2764 return -EIO;
2765 } else {
2766 chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
2767 status = chip->waitfunc(mtd, chip);
2768 }
2769
2770 return 0;
2771}
2772
2773
2774
2775
2776
2777
2778
2779
2780static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2781 struct mtd_oob_ops *ops)
2782{
2783 struct nand_chip *chip = mtd_to_nand(mtd);
2784 int ret;
2785
2786
2787
2788
2789
2790 memset(chip->oob_poi, 0xff, mtd->oobsize);
2791
2792 switch (ops->mode) {
2793
2794 case MTD_OPS_PLACE_OOB:
2795 case MTD_OPS_RAW:
2796 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2797 return oob + len;
2798
2799 case MTD_OPS_AUTO_OOB:
2800 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2801 ops->ooboffs, len);
2802 BUG_ON(ret);
2803 return oob + len;
2804
2805 default:
2806 BUG();
2807 }
2808 return NULL;
2809}
2810
2811#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2822 struct mtd_oob_ops *ops)
2823{
2824 int chipnr, realpage, page, blockmask, column;
2825 struct nand_chip *chip = mtd_to_nand(mtd);
2826 uint32_t writelen = ops->len;
2827
2828 uint32_t oobwritelen = ops->ooblen;
2829 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2830
2831 uint8_t *oob = ops->oobbuf;
2832 uint8_t *buf = ops->datbuf;
2833 int ret;
2834 int oob_required = oob ? 1 : 0;
2835
2836 ops->retlen = 0;
2837 if (!writelen)
2838 return 0;
2839
2840
2841 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2842 pr_notice("%s: attempt to write non page aligned data\n",
2843 __func__);
2844 return -EINVAL;
2845 }
2846
2847 column = to & (mtd->writesize - 1);
2848
2849 chipnr = (int)(to >> chip->chip_shift);
2850 chip->select_chip(mtd, chipnr);
2851
2852
2853 if (nand_check_wp(mtd)) {
2854 ret = -EIO;
2855 goto err_out;
2856 }
2857
2858 realpage = (int)(to >> chip->page_shift);
2859 page = realpage & chip->pagemask;
2860 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2861
2862
2863 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2864 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2865 chip->pagebuf = -1;
2866
2867
2868 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2869 ret = -EINVAL;
2870 goto err_out;
2871 }
2872
2873 while (1) {
2874 int bytes = mtd->writesize;
2875 int cached = writelen > bytes && page != blockmask;
2876 uint8_t *wbuf = buf;
2877 int use_bufpoi;
2878 int part_pagewr = (column || writelen < mtd->writesize);
2879
2880 if (part_pagewr)
2881 use_bufpoi = 1;
2882 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2883 use_bufpoi = !virt_addr_valid(buf) ||
2884 !IS_ALIGNED((unsigned long)buf,
2885 chip->buf_align);
2886 else
2887 use_bufpoi = 0;
2888
2889
2890 if (use_bufpoi) {
2891 pr_debug("%s: using write bounce buffer for buf@%p\n",
2892 __func__, buf);
2893 cached = 0;
2894 if (part_pagewr)
2895 bytes = min_t(int, bytes - column, writelen);
2896 chip->pagebuf = -1;
2897 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2898 memcpy(&chip->buffers->databuf[column], buf, bytes);
2899 wbuf = chip->buffers->databuf;
2900 }
2901
2902 if (unlikely(oob)) {
2903 size_t len = min(oobwritelen, oobmaxlen);
2904 oob = nand_fill_oob(mtd, oob, len, ops);
2905 oobwritelen -= len;
2906 } else {
2907
2908 memset(chip->oob_poi, 0xff, mtd->oobsize);
2909 }
2910
2911 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2912 oob_required, page, cached,
2913 (ops->mode == MTD_OPS_RAW));
2914 if (ret)
2915 break;
2916
2917 writelen -= bytes;
2918 if (!writelen)
2919 break;
2920
2921 column = 0;
2922 buf += bytes;
2923 realpage++;
2924
2925 page = realpage & chip->pagemask;
2926
2927 if (!page) {
2928 chipnr++;
2929 chip->select_chip(mtd, -1);
2930 chip->select_chip(mtd, chipnr);
2931 }
2932 }
2933
2934 ops->retlen = ops->len - writelen;
2935 if (unlikely(oob))
2936 ops->oobretlen = ops->ooblen;
2937
2938err_out:
2939 chip->select_chip(mtd, -1);
2940 return ret;
2941}
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2955 size_t *retlen, const uint8_t *buf)
2956{
2957 struct nand_chip *chip = mtd_to_nand(mtd);
2958 struct mtd_oob_ops ops;
2959 int ret;
2960
2961
2962 panic_nand_wait(mtd, chip, 400);
2963
2964
2965 panic_nand_get_device(chip, mtd, FL_WRITING);
2966
2967 memset(&ops, 0, sizeof(ops));
2968 ops.len = len;
2969 ops.datbuf = (uint8_t *)buf;
2970 ops.mode = MTD_OPS_PLACE_OOB;
2971
2972 ret = nand_do_write_ops(mtd, to, &ops);
2973
2974 *retlen = ops.retlen;
2975 return ret;
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2989 size_t *retlen, const uint8_t *buf)
2990{
2991 struct mtd_oob_ops ops;
2992 int ret;
2993
2994 nand_get_device(mtd, FL_WRITING);
2995 memset(&ops, 0, sizeof(ops));
2996 ops.len = len;
2997 ops.datbuf = (uint8_t *)buf;
2998 ops.mode = MTD_OPS_PLACE_OOB;
2999 ret = nand_do_write_ops(mtd, to, &ops);
3000 *retlen = ops.retlen;
3001 nand_release_device(mtd);
3002 return ret;
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3014 struct mtd_oob_ops *ops)
3015{
3016 int chipnr, page, status, len;
3017 struct nand_chip *chip = mtd_to_nand(mtd);
3018
3019 pr_debug("%s: to = 0x%08x, len = %i\n",
3020 __func__, (unsigned int)to, (int)ops->ooblen);
3021
3022 len = mtd_oobavail(mtd, ops);
3023
3024
3025 if ((ops->ooboffs + ops->ooblen) > len) {
3026 pr_debug("%s: attempt to write past end of page\n",
3027 __func__);
3028 return -EINVAL;
3029 }
3030
3031 if (unlikely(ops->ooboffs >= len)) {
3032 pr_debug("%s: attempt to start write outside oob\n",
3033 __func__);
3034 return -EINVAL;
3035 }
3036
3037
3038 if (unlikely(to >= mtd->size ||
3039 ops->ooboffs + ops->ooblen >
3040 ((mtd->size >> chip->page_shift) -
3041 (to >> chip->page_shift)) * len)) {
3042 pr_debug("%s: attempt to write beyond end of device\n",
3043 __func__);
3044 return -EINVAL;
3045 }
3046
3047 chipnr = (int)(to >> chip->chip_shift);
3048
3049
3050
3051
3052
3053
3054
3055 nand_reset(chip, chipnr);
3056
3057 chip->select_chip(mtd, chipnr);
3058
3059
3060 page = (int)(to >> chip->page_shift);
3061
3062
3063 if (nand_check_wp(mtd)) {
3064 chip->select_chip(mtd, -1);
3065 return -EROFS;
3066 }
3067
3068
3069 if (page == chip->pagebuf)
3070 chip->pagebuf = -1;
3071
3072 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3073
3074 if (ops->mode == MTD_OPS_RAW)
3075 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3076 else
3077 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3078
3079 chip->select_chip(mtd, -1);
3080
3081 if (status)
3082 return status;
3083
3084 ops->oobretlen = ops->ooblen;
3085
3086 return 0;
3087}
3088
3089
3090
3091
3092
3093
3094
3095static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3096 struct mtd_oob_ops *ops)
3097{
3098 int ret = -ENOTSUPP;
3099
3100 ops->retlen = 0;
3101
3102
3103 if (ops->datbuf && (to + ops->len) > mtd->size) {
3104 pr_debug("%s: attempt to write beyond end of device\n",
3105 __func__);
3106 return -EINVAL;
3107 }
3108
3109 nand_get_device(mtd, FL_WRITING);
3110
3111 switch (ops->mode) {
3112 case MTD_OPS_PLACE_OOB:
3113 case MTD_OPS_AUTO_OOB:
3114 case MTD_OPS_RAW:
3115 break;
3116
3117 default:
3118 goto out;
3119 }
3120
3121 if (!ops->datbuf)
3122 ret = nand_do_write_oob(mtd, to, ops);
3123 else
3124 ret = nand_do_write_ops(mtd, to, ops);
3125
3126out:
3127 nand_release_device(mtd);
3128 return ret;
3129}
3130
3131
3132
3133
3134
3135
3136
3137
3138static int single_erase(struct mtd_info *mtd, int page)
3139{
3140 struct nand_chip *chip = mtd_to_nand(mtd);
3141
3142 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3143 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3144
3145 return chip->waitfunc(mtd, chip);
3146}
3147
3148
3149
3150
3151
3152
3153
3154
3155static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3156{
3157 return nand_erase_nand(mtd, instr, 0);
3158}
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3169 int allowbbt)
3170{
3171 int page, status, pages_per_block, ret, chipnr;
3172 struct nand_chip *chip = mtd_to_nand(mtd);
3173 loff_t len;
3174
3175 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3176 __func__, (unsigned long long)instr->addr,
3177 (unsigned long long)instr->len);
3178
3179 if (check_offs_len(mtd, instr->addr, instr->len))
3180 return -EINVAL;
3181
3182
3183 nand_get_device(mtd, FL_ERASING);
3184
3185
3186 page = (int)(instr->addr >> chip->page_shift);
3187 chipnr = (int)(instr->addr >> chip->chip_shift);
3188
3189
3190 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3191
3192
3193 chip->select_chip(mtd, chipnr);
3194
3195
3196 if (nand_check_wp(mtd)) {
3197 pr_debug("%s: device is write protected!\n",
3198 __func__);
3199 instr->state = MTD_ERASE_FAILED;
3200 goto erase_exit;
3201 }
3202
3203
3204 len = instr->len;
3205
3206 instr->state = MTD_ERASING;
3207
3208 while (len) {
3209
3210 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3211 chip->page_shift, allowbbt)) {
3212 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3213 __func__, page);
3214 instr->state = MTD_ERASE_FAILED;
3215 goto erase_exit;
3216 }
3217
3218
3219
3220
3221
3222 if (page <= chip->pagebuf && chip->pagebuf <
3223 (page + pages_per_block))
3224 chip->pagebuf = -1;
3225
3226 status = chip->erase(mtd, page & chip->pagemask);
3227
3228
3229
3230
3231
3232 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
3233 status = chip->errstat(mtd, chip, FL_ERASING,
3234 status, page);
3235
3236
3237 if (status & NAND_STATUS_FAIL) {
3238 pr_debug("%s: failed erase, page 0x%08x\n",
3239 __func__, page);
3240 instr->state = MTD_ERASE_FAILED;
3241 instr->fail_addr =
3242 ((loff_t)page << chip->page_shift);
3243 goto erase_exit;
3244 }
3245
3246
3247 len -= (1ULL << chip->phys_erase_shift);
3248 page += pages_per_block;
3249
3250
3251 if (len && !(page & chip->pagemask)) {
3252 chipnr++;
3253 chip->select_chip(mtd, -1);
3254 chip->select_chip(mtd, chipnr);
3255 }
3256 }
3257 instr->state = MTD_ERASE_DONE;
3258
3259erase_exit:
3260
3261 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3262
3263
3264 chip->select_chip(mtd, -1);
3265 nand_release_device(mtd);
3266
3267
3268 if (!ret)
3269 mtd_erase_callback(instr);
3270
3271
3272 return ret;
3273}
3274
3275
3276
3277
3278
3279
3280
3281static void nand_sync(struct mtd_info *mtd)
3282{
3283 pr_debug("%s: called\n", __func__);
3284
3285
3286 nand_get_device(mtd, FL_SYNCING);
3287
3288 nand_release_device(mtd);
3289}
3290
3291
3292
3293
3294
3295
3296static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3297{
3298 struct nand_chip *chip = mtd_to_nand(mtd);
3299 int chipnr = (int)(offs >> chip->chip_shift);
3300 int ret;
3301
3302
3303 nand_get_device(mtd, FL_READING);
3304 chip->select_chip(mtd, chipnr);
3305
3306 ret = nand_block_checkbad(mtd, offs, 0);
3307
3308 chip->select_chip(mtd, -1);
3309 nand_release_device(mtd);
3310
3311 return ret;
3312}
3313
3314
3315
3316
3317
3318
3319static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3320{
3321 int ret;
3322
3323 ret = nand_block_isbad(mtd, ofs);
3324 if (ret) {
3325
3326 if (ret > 0)
3327 return 0;
3328 return ret;
3329 }
3330
3331 return nand_block_markbad_lowlevel(mtd, ofs);
3332}
3333
3334
3335
3336
3337
3338
3339
3340static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3341{
3342 struct nand_chip *chip = mtd_to_nand(mtd);
3343 u32 part_start_block;
3344 u32 part_end_block;
3345 u32 part_start_die;
3346 u32 part_end_die;
3347
3348
3349
3350
3351
3352 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3353 return -ENOTSUPP;
3354
3355
3356 part_start_block = mtd_div_by_eb(ofs, mtd);
3357 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3358
3359
3360 part_start_die = part_start_block / chip->blocks_per_die;
3361 part_end_die = part_end_block / chip->blocks_per_die;
3362
3363
3364
3365
3366
3367 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3368}
3369
3370
3371
3372
3373
3374
3375
3376
3377static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3378 int addr, uint8_t *subfeature_param)
3379{
3380 int status;
3381 int i;
3382
3383 if (!chip->onfi_version ||
3384 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3385 & ONFI_OPT_CMD_SET_GET_FEATURES))
3386 return -EINVAL;
3387
3388 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3389 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3390 chip->write_byte(mtd, subfeature_param[i]);
3391
3392 status = chip->waitfunc(mtd, chip);
3393 if (status & NAND_STATUS_FAIL)
3394 return -EIO;
3395 return 0;
3396}
3397
3398
3399
3400
3401
3402
3403
3404
3405static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3406 int addr, uint8_t *subfeature_param)
3407{
3408 int i;
3409
3410 if (!chip->onfi_version ||
3411 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3412 & ONFI_OPT_CMD_SET_GET_FEATURES))
3413 return -EINVAL;
3414
3415 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3416 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3417 *subfeature_param++ = chip->read_byte(mtd);
3418 return 0;
3419}
3420
3421
3422
3423
3424
3425static int nand_suspend(struct mtd_info *mtd)
3426{
3427 return nand_get_device(mtd, FL_PM_SUSPENDED);
3428}
3429
3430
3431
3432
3433
3434static void nand_resume(struct mtd_info *mtd)
3435{
3436 struct nand_chip *chip = mtd_to_nand(mtd);
3437
3438 if (chip->state == FL_PM_SUSPENDED)
3439 nand_release_device(mtd);
3440 else
3441 pr_err("%s called for a chip which is not in suspended state\n",
3442 __func__);
3443}
3444
3445
3446
3447
3448
3449
3450static void nand_shutdown(struct mtd_info *mtd)
3451{
3452 nand_get_device(mtd, FL_PM_SUSPENDED);
3453}
3454
3455
3456static void nand_set_defaults(struct nand_chip *chip)
3457{
3458 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3459
3460
3461 if (!chip->chip_delay)
3462 chip->chip_delay = 20;
3463
3464
3465 if (chip->cmdfunc == NULL)
3466 chip->cmdfunc = nand_command;
3467
3468
3469 if (chip->waitfunc == NULL)
3470 chip->waitfunc = nand_wait;
3471
3472 if (!chip->select_chip)
3473 chip->select_chip = nand_select_chip;
3474
3475
3476 if (!chip->onfi_set_features)
3477 chip->onfi_set_features = nand_onfi_set_features;
3478 if (!chip->onfi_get_features)
3479 chip->onfi_get_features = nand_onfi_get_features;
3480
3481
3482 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3483 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3484 if (!chip->read_word)
3485 chip->read_word = nand_read_word;
3486 if (!chip->block_bad)
3487 chip->block_bad = nand_block_bad;
3488 if (!chip->block_markbad)
3489 chip->block_markbad = nand_default_block_markbad;
3490 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3491 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3492 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3493 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3494 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3495 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3496 if (!chip->scan_bbt)
3497 chip->scan_bbt = nand_default_bbt;
3498
3499 if (!chip->controller) {
3500 chip->controller = &chip->hwcontrol;
3501 nand_hw_control_init(chip->controller);
3502 }
3503
3504 if (!chip->buf_align)
3505 chip->buf_align = 1;
3506}
3507
3508
3509static void sanitize_string(uint8_t *s, size_t len)
3510{
3511 ssize_t i;
3512
3513
3514 s[len - 1] = 0;
3515
3516
3517 for (i = 0; i < len - 1; i++) {
3518 if (s[i] < ' ' || s[i] > 127)
3519 s[i] = '?';
3520 }
3521
3522
3523 strim(s);
3524}
3525
3526static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3527{
3528 int i;
3529 while (len--) {
3530 crc ^= *p++ << 8;
3531 for (i = 0; i < 8; i++)
3532 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3533 }
3534
3535 return crc;
3536}
3537
3538
3539static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3540 struct nand_onfi_params *p)
3541{
3542 struct mtd_info *mtd = nand_to_mtd(chip);
3543 struct onfi_ext_param_page *ep;
3544 struct onfi_ext_section *s;
3545 struct onfi_ext_ecc_info *ecc;
3546 uint8_t *cursor;
3547 int ret = -EINVAL;
3548 int len;
3549 int i;
3550
3551 len = le16_to_cpu(p->ext_param_page_length) * 16;
3552 ep = kmalloc(len, GFP_KERNEL);
3553 if (!ep)
3554 return -ENOMEM;
3555
3556
3557 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3558
3559
3560 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3561 sizeof(*p) * p->num_of_param_pages , -1);
3562
3563
3564 chip->read_buf(mtd, (uint8_t *)ep, len);
3565 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3566 != le16_to_cpu(ep->crc))) {
3567 pr_debug("fail in the CRC.\n");
3568 goto ext_out;
3569 }
3570
3571
3572
3573
3574
3575 if (strncmp(ep->sig, "EPPS", 4)) {
3576 pr_debug("The signature is invalid.\n");
3577 goto ext_out;
3578 }
3579
3580
3581 cursor = (uint8_t *)(ep + 1);
3582 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3583 s = ep->sections + i;
3584 if (s->type == ONFI_SECTION_TYPE_2)
3585 break;
3586 cursor += s->length * 16;
3587 }
3588 if (i == ONFI_EXT_SECTION_MAX) {
3589 pr_debug("We can not find the ECC section.\n");
3590 goto ext_out;
3591 }
3592
3593
3594 ecc = (struct onfi_ext_ecc_info *)cursor;
3595
3596 if (!ecc->codeword_size) {
3597 pr_debug("Invalid codeword size\n");
3598 goto ext_out;
3599 }
3600
3601 chip->ecc_strength_ds = ecc->ecc_bits;
3602 chip->ecc_step_ds = 1 << ecc->codeword_size;
3603 ret = 0;
3604
3605ext_out:
3606 kfree(ep);
3607 return ret;
3608}
3609
3610
3611
3612
3613static int nand_flash_detect_onfi(struct nand_chip *chip)
3614{
3615 struct mtd_info *mtd = nand_to_mtd(chip);
3616 struct nand_onfi_params *p = &chip->onfi_params;
3617 int i, j;
3618 int val;
3619
3620
3621 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3622 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3623 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3624 return 0;
3625
3626 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3627 for (i = 0; i < 3; i++) {
3628 for (j = 0; j < sizeof(*p); j++)
3629 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3630 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3631 le16_to_cpu(p->crc)) {
3632 break;
3633 }
3634 }
3635
3636 if (i == 3) {
3637 pr_err("Could not find valid ONFI parameter page; aborting\n");
3638 return 0;
3639 }
3640
3641
3642 val = le16_to_cpu(p->revision);
3643 if (val & (1 << 5))
3644 chip->onfi_version = 23;
3645 else if (val & (1 << 4))
3646 chip->onfi_version = 22;
3647 else if (val & (1 << 3))
3648 chip->onfi_version = 21;
3649 else if (val & (1 << 2))
3650 chip->onfi_version = 20;
3651 else if (val & (1 << 1))
3652 chip->onfi_version = 10;
3653
3654 if (!chip->onfi_version) {
3655 pr_info("unsupported ONFI version: %d\n", val);
3656 return 0;
3657 }
3658
3659 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3660 sanitize_string(p->model, sizeof(p->model));
3661 if (!mtd->name)
3662 mtd->name = p->model;
3663
3664 mtd->writesize = le32_to_cpu(p->byte_per_page);
3665
3666
3667
3668
3669
3670
3671 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3672 mtd->erasesize *= mtd->writesize;
3673
3674 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3675
3676
3677 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3678 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3679 chip->bits_per_cell = p->bits_per_cell;
3680
3681 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3682 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3683
3684 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3685 chip->options |= NAND_BUSWIDTH_16;
3686
3687 if (p->ecc_bits != 0xff) {
3688 chip->ecc_strength_ds = p->ecc_bits;
3689 chip->ecc_step_ds = 512;
3690 } else if (chip->onfi_version >= 21 &&
3691 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3692
3693
3694
3695
3696
3697
3698
3699 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3700 chip->cmdfunc = nand_command_lp;
3701
3702
3703 if (nand_flash_detect_ext_param_page(chip, p))
3704 pr_warn("Failed to detect ONFI extended param page\n");
3705 } else {
3706 pr_warn("Could not retrieve ONFI ECC requirements\n");
3707 }
3708
3709 return 1;
3710}
3711
3712
3713
3714
3715static int nand_flash_detect_jedec(struct nand_chip *chip)
3716{
3717 struct mtd_info *mtd = nand_to_mtd(chip);
3718 struct nand_jedec_params *p = &chip->jedec_params;
3719 struct jedec_ecc_info *ecc;
3720 int val;
3721 int i, j;
3722
3723
3724 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3725 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3726 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3727 chip->read_byte(mtd) != 'C')
3728 return 0;
3729
3730 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3731 for (i = 0; i < 3; i++) {
3732 for (j = 0; j < sizeof(*p); j++)
3733 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3734
3735 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3736 le16_to_cpu(p->crc))
3737 break;
3738 }
3739
3740 if (i == 3) {
3741 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3742 return 0;
3743 }
3744
3745
3746 val = le16_to_cpu(p->revision);
3747 if (val & (1 << 2))
3748 chip->jedec_version = 10;
3749 else if (val & (1 << 1))
3750 chip->jedec_version = 1;
3751
3752 if (!chip->jedec_version) {
3753 pr_info("unsupported JEDEC version: %d\n", val);
3754 return 0;
3755 }
3756
3757 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3758 sanitize_string(p->model, sizeof(p->model));
3759 if (!mtd->name)
3760 mtd->name = p->model;
3761
3762 mtd->writesize = le32_to_cpu(p->byte_per_page);
3763
3764
3765 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3766 mtd->erasesize *= mtd->writesize;
3767
3768 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3769
3770
3771 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3772 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3773 chip->bits_per_cell = p->bits_per_cell;
3774
3775 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3776 chip->options |= NAND_BUSWIDTH_16;
3777
3778
3779 ecc = &p->ecc_info[0];
3780
3781 if (ecc->codeword_size >= 9) {
3782 chip->ecc_strength_ds = ecc->ecc_bits;
3783 chip->ecc_step_ds = 1 << ecc->codeword_size;
3784 } else {
3785 pr_warn("Invalid codeword size\n");
3786 }
3787
3788 return 1;
3789}
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3803{
3804 int i, j;
3805 for (i = 0; i < period; i++)
3806 for (j = i + period; j < arrlen; j += period)
3807 if (id_data[i] != id_data[j])
3808 return 0;
3809 return 1;
3810}
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820static int nand_id_len(u8 *id_data, int arrlen)
3821{
3822 int last_nonzero, period;
3823
3824
3825 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3826 if (id_data[last_nonzero])
3827 break;
3828
3829
3830 if (last_nonzero < 0)
3831 return 0;
3832
3833
3834 for (period = 1; period < arrlen; period++)
3835 if (nand_id_has_period(id_data, arrlen, period))
3836 break;
3837
3838
3839 if (period < arrlen)
3840 return period;
3841
3842
3843 if (last_nonzero < arrlen - 1)
3844 return last_nonzero + 1;
3845
3846
3847 return arrlen;
3848}
3849
3850
3851static int nand_get_bits_per_cell(u8 cellinfo)
3852{
3853 int bits;
3854
3855 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3856 bits >>= NAND_CI_CELLTYPE_SHIFT;
3857 return bits + 1;
3858}
3859
3860
3861
3862
3863
3864
3865void nand_decode_ext_id(struct nand_chip *chip)
3866{
3867 struct mtd_info *mtd = nand_to_mtd(chip);
3868 int extid;
3869 u8 *id_data = chip->id.data;
3870
3871 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3872
3873 extid = id_data[3];
3874
3875
3876 mtd->writesize = 1024 << (extid & 0x03);
3877 extid >>= 2;
3878
3879 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3880 extid >>= 2;
3881
3882 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3883 extid >>= 2;
3884
3885 if (extid & 0x1)
3886 chip->options |= NAND_BUSWIDTH_16;
3887}
3888EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3889
3890
3891
3892
3893
3894
3895static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3896{
3897 struct mtd_info *mtd = nand_to_mtd(chip);
3898
3899 mtd->erasesize = type->erasesize;
3900 mtd->writesize = type->pagesize;
3901 mtd->oobsize = mtd->writesize / 32;
3902
3903
3904 chip->bits_per_cell = 1;
3905}
3906
3907
3908
3909
3910
3911
3912static void nand_decode_bbm_options(struct nand_chip *chip)
3913{
3914 struct mtd_info *mtd = nand_to_mtd(chip);
3915
3916
3917 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3918 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3919 else
3920 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3921}
3922
3923static inline bool is_full_id_nand(struct nand_flash_dev *type)
3924{
3925 return type->id_len;
3926}
3927
3928static bool find_full_id_nand(struct nand_chip *chip,
3929 struct nand_flash_dev *type)
3930{
3931 struct mtd_info *mtd = nand_to_mtd(chip);
3932 u8 *id_data = chip->id.data;
3933
3934 if (!strncmp(type->id, id_data, type->id_len)) {
3935 mtd->writesize = type->pagesize;
3936 mtd->erasesize = type->erasesize;
3937 mtd->oobsize = type->oobsize;
3938
3939 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3940 chip->chipsize = (uint64_t)type->chipsize << 20;
3941 chip->options |= type->options;
3942 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3943 chip->ecc_step_ds = NAND_ECC_STEP(type);
3944 chip->onfi_timing_mode_default =
3945 type->onfi_timing_mode_default;
3946
3947 if (!mtd->name)
3948 mtd->name = type->name;
3949
3950 return true;
3951 }
3952 return false;
3953}
3954
3955
3956
3957
3958
3959
3960static void nand_manufacturer_detect(struct nand_chip *chip)
3961{
3962
3963
3964
3965
3966 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3967 chip->manufacturer.desc->ops->detect)
3968 chip->manufacturer.desc->ops->detect(chip);
3969 else
3970 nand_decode_ext_id(chip);
3971}
3972
3973
3974
3975
3976
3977
3978
3979static int nand_manufacturer_init(struct nand_chip *chip)
3980{
3981 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
3982 !chip->manufacturer.desc->ops->init)
3983 return 0;
3984
3985 return chip->manufacturer.desc->ops->init(chip);
3986}
3987
3988
3989
3990
3991
3992
3993
3994static void nand_manufacturer_cleanup(struct nand_chip *chip)
3995{
3996
3997 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3998 chip->manufacturer.desc->ops->cleanup)
3999 chip->manufacturer.desc->ops->cleanup(chip);
4000}
4001
4002
4003
4004
4005static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4006{
4007 const struct nand_manufacturer *manufacturer;
4008 struct mtd_info *mtd = nand_to_mtd(chip);
4009 int busw;
4010 int i, ret;
4011 u8 *id_data = chip->id.data;
4012 u8 maf_id, dev_id;
4013
4014
4015
4016
4017
4018 nand_reset(chip, 0);
4019
4020
4021 chip->select_chip(mtd, 0);
4022
4023
4024 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4025
4026
4027 maf_id = chip->read_byte(mtd);
4028 dev_id = chip->read_byte(mtd);
4029
4030
4031
4032
4033
4034
4035
4036
4037 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4038
4039
4040 for (i = 0; i < 8; i++)
4041 id_data[i] = chip->read_byte(mtd);
4042
4043 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4044 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4045 maf_id, dev_id, id_data[0], id_data[1]);
4046 return -ENODEV;
4047 }
4048
4049 chip->id.len = nand_id_len(id_data, 8);
4050
4051
4052 manufacturer = nand_get_manufacturer(maf_id);
4053 chip->manufacturer.desc = manufacturer;
4054
4055 if (!type)
4056 type = nand_flash_ids;
4057
4058
4059
4060
4061
4062
4063
4064
4065 busw = chip->options & NAND_BUSWIDTH_16;
4066
4067
4068
4069
4070
4071 chip->options &= ~NAND_BUSWIDTH_16;
4072
4073 for (; type->name != NULL; type++) {
4074 if (is_full_id_nand(type)) {
4075 if (find_full_id_nand(chip, type))
4076 goto ident_done;
4077 } else if (dev_id == type->dev_id) {
4078 break;
4079 }
4080 }
4081
4082 chip->onfi_version = 0;
4083 if (!type->name || !type->pagesize) {
4084
4085 if (nand_flash_detect_onfi(chip))
4086 goto ident_done;
4087
4088
4089 if (nand_flash_detect_jedec(chip))
4090 goto ident_done;
4091 }
4092
4093 if (!type->name)
4094 return -ENODEV;
4095
4096 if (!mtd->name)
4097 mtd->name = type->name;
4098
4099 chip->chipsize = (uint64_t)type->chipsize << 20;
4100
4101 if (!type->pagesize)
4102 nand_manufacturer_detect(chip);
4103 else
4104 nand_decode_id(chip, type);
4105
4106
4107 chip->options |= type->options;
4108
4109ident_done:
4110
4111 if (chip->options & NAND_BUSWIDTH_AUTO) {
4112 WARN_ON(busw & NAND_BUSWIDTH_16);
4113 nand_set_defaults(chip);
4114 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4115
4116
4117
4118
4119 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4120 maf_id, dev_id);
4121 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4122 mtd->name);
4123 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4124 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4125 return -EINVAL;
4126 }
4127
4128 nand_decode_bbm_options(chip);
4129
4130
4131 chip->page_shift = ffs(mtd->writesize) - 1;
4132
4133 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4134
4135 chip->bbt_erase_shift = chip->phys_erase_shift =
4136 ffs(mtd->erasesize) - 1;
4137 if (chip->chipsize & 0xffffffff)
4138 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4139 else {
4140 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4141 chip->chip_shift += 32 - 1;
4142 }
4143
4144 chip->badblockbits = 8;
4145 chip->erase = single_erase;
4146
4147
4148 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4149 chip->cmdfunc = nand_command_lp;
4150
4151 ret = nand_manufacturer_init(chip);
4152 if (ret)
4153 return ret;
4154
4155 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4156 maf_id, dev_id);
4157
4158 if (chip->onfi_version)
4159 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4160 chip->onfi_params.model);
4161 else if (chip->jedec_version)
4162 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4163 chip->jedec_params.model);
4164 else
4165 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4166 type->name);
4167
4168 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4169 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4170 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4171 return 0;
4172}
4173
4174static const char * const nand_ecc_modes[] = {
4175 [NAND_ECC_NONE] = "none",
4176 [NAND_ECC_SOFT] = "soft",
4177 [NAND_ECC_HW] = "hw",
4178 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4179 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4180};
4181
4182static int of_get_nand_ecc_mode(struct device_node *np)
4183{
4184 const char *pm;
4185 int err, i;
4186
4187 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4188 if (err < 0)
4189 return err;
4190
4191 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4192 if (!strcasecmp(pm, nand_ecc_modes[i]))
4193 return i;
4194
4195
4196
4197
4198
4199
4200 if (!strcasecmp(pm, "soft_bch"))
4201 return NAND_ECC_SOFT;
4202
4203 return -ENODEV;
4204}
4205
4206static const char * const nand_ecc_algos[] = {
4207 [NAND_ECC_HAMMING] = "hamming",
4208 [NAND_ECC_BCH] = "bch",
4209};
4210
4211static int of_get_nand_ecc_algo(struct device_node *np)
4212{
4213 const char *pm;
4214 int err, i;
4215
4216 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4217 if (!err) {
4218 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4219 if (!strcasecmp(pm, nand_ecc_algos[i]))
4220 return i;
4221 return -ENODEV;
4222 }
4223
4224
4225
4226
4227
4228 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4229 if (err < 0)
4230 return err;
4231
4232 if (!strcasecmp(pm, "soft"))
4233 return NAND_ECC_HAMMING;
4234 else if (!strcasecmp(pm, "soft_bch"))
4235 return NAND_ECC_BCH;
4236
4237 return -ENODEV;
4238}
4239
4240static int of_get_nand_ecc_step_size(struct device_node *np)
4241{
4242 int ret;
4243 u32 val;
4244
4245 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4246 return ret ? ret : val;
4247}
4248
4249static int of_get_nand_ecc_strength(struct device_node *np)
4250{
4251 int ret;
4252 u32 val;
4253
4254 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4255 return ret ? ret : val;
4256}
4257
4258static int of_get_nand_bus_width(struct device_node *np)
4259{
4260 u32 val;
4261
4262 if (of_property_read_u32(np, "nand-bus-width", &val))
4263 return 8;
4264
4265 switch (val) {
4266 case 8:
4267 case 16:
4268 return val;
4269 default:
4270 return -EIO;
4271 }
4272}
4273
4274static bool of_get_nand_on_flash_bbt(struct device_node *np)
4275{
4276 return of_property_read_bool(np, "nand-on-flash-bbt");
4277}
4278
4279static int nand_dt_init(struct nand_chip *chip)
4280{
4281 struct device_node *dn = nand_get_flash_node(chip);
4282 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4283
4284 if (!dn)
4285 return 0;
4286
4287 if (of_get_nand_bus_width(dn) == 16)
4288 chip->options |= NAND_BUSWIDTH_16;
4289
4290 if (of_get_nand_on_flash_bbt(dn))
4291 chip->bbt_options |= NAND_BBT_USE_FLASH;
4292
4293 ecc_mode = of_get_nand_ecc_mode(dn);
4294 ecc_algo = of_get_nand_ecc_algo(dn);
4295 ecc_strength = of_get_nand_ecc_strength(dn);
4296 ecc_step = of_get_nand_ecc_step_size(dn);
4297
4298 if (ecc_mode >= 0)
4299 chip->ecc.mode = ecc_mode;
4300
4301 if (ecc_algo >= 0)
4302 chip->ecc.algo = ecc_algo;
4303
4304 if (ecc_strength >= 0)
4305 chip->ecc.strength = ecc_strength;
4306
4307 if (ecc_step > 0)
4308 chip->ecc.size = ecc_step;
4309
4310 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4311 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4312
4313 return 0;
4314}
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4327 struct nand_flash_dev *table)
4328{
4329 int i, nand_maf_id, nand_dev_id;
4330 struct nand_chip *chip = mtd_to_nand(mtd);
4331 int ret;
4332
4333 ret = nand_dt_init(chip);
4334 if (ret)
4335 return ret;
4336
4337 if (!mtd->name && mtd->dev.parent)
4338 mtd->name = dev_name(mtd->dev.parent);
4339
4340 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4341
4342
4343
4344
4345
4346 pr_err("chip.cmd_ctrl() callback is not provided");
4347 return -EINVAL;
4348 }
4349
4350 nand_set_defaults(chip);
4351
4352
4353 ret = nand_detect(chip, table);
4354 if (ret) {
4355 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4356 pr_warn("No NAND device found\n");
4357 chip->select_chip(mtd, -1);
4358 return ret;
4359 }
4360
4361
4362 ret = nand_init_data_interface(chip);
4363 if (ret)
4364 goto err_nand_init;
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374 ret = nand_setup_data_interface(chip);
4375 if (ret)
4376 goto err_nand_init;
4377
4378 nand_maf_id = chip->id.data[0];
4379 nand_dev_id = chip->id.data[1];
4380
4381 chip->select_chip(mtd, -1);
4382
4383
4384 for (i = 1; i < maxchips; i++) {
4385
4386 nand_reset(chip, i);
4387
4388 chip->select_chip(mtd, i);
4389
4390 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4391
4392 if (nand_maf_id != chip->read_byte(mtd) ||
4393 nand_dev_id != chip->read_byte(mtd)) {
4394 chip->select_chip(mtd, -1);
4395 break;
4396 }
4397 chip->select_chip(mtd, -1);
4398 }
4399 if (i > 1)
4400 pr_info("%d chips detected\n", i);
4401
4402
4403 chip->numchips = i;
4404 mtd->size = i * chip->chipsize;
4405
4406 return 0;
4407
4408err_nand_init:
4409
4410 nand_manufacturer_cleanup(chip);
4411
4412 return ret;
4413}
4414EXPORT_SYMBOL(nand_scan_ident);
4415
4416static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4417{
4418 struct nand_chip *chip = mtd_to_nand(mtd);
4419 struct nand_ecc_ctrl *ecc = &chip->ecc;
4420
4421 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4422 return -EINVAL;
4423
4424 switch (ecc->algo) {
4425 case NAND_ECC_HAMMING:
4426 ecc->calculate = nand_calculate_ecc;
4427 ecc->correct = nand_correct_data;
4428 ecc->read_page = nand_read_page_swecc;
4429 ecc->read_subpage = nand_read_subpage;
4430 ecc->write_page = nand_write_page_swecc;
4431 ecc->read_page_raw = nand_read_page_raw;
4432 ecc->write_page_raw = nand_write_page_raw;
4433 ecc->read_oob = nand_read_oob_std;
4434 ecc->write_oob = nand_write_oob_std;
4435 if (!ecc->size)
4436 ecc->size = 256;
4437 ecc->bytes = 3;
4438 ecc->strength = 1;
4439 return 0;
4440 case NAND_ECC_BCH:
4441 if (!mtd_nand_has_bch()) {
4442 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4443 return -EINVAL;
4444 }
4445 ecc->calculate = nand_bch_calculate_ecc;
4446 ecc->correct = nand_bch_correct_data;
4447 ecc->read_page = nand_read_page_swecc;
4448 ecc->read_subpage = nand_read_subpage;
4449 ecc->write_page = nand_write_page_swecc;
4450 ecc->read_page_raw = nand_read_page_raw;
4451 ecc->write_page_raw = nand_write_page_raw;
4452 ecc->read_oob = nand_read_oob_std;
4453 ecc->write_oob = nand_write_oob_std;
4454
4455
4456
4457
4458
4459
4460 if (!ecc->size && (mtd->oobsize >= 64)) {
4461 ecc->size = 512;
4462 ecc->strength = 4;
4463 }
4464
4465
4466
4467
4468
4469 if (!mtd->ooblayout) {
4470
4471 if (mtd->oobsize < 64) {
4472 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4473 return -EINVAL;
4474 }
4475
4476 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4477
4478 }
4479
4480
4481
4482
4483
4484
4485 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4486 ecc->options & NAND_ECC_MAXIMIZE) {
4487 int steps, bytes;
4488
4489
4490 ecc->size = 1024;
4491 steps = mtd->writesize / ecc->size;
4492
4493
4494 bytes = (mtd->oobsize - 2) / steps;
4495 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4496 }
4497
4498
4499 ecc->bytes = 0;
4500 ecc->priv = nand_bch_init(mtd);
4501 if (!ecc->priv) {
4502 WARN(1, "BCH ECC initialization failed!\n");
4503 return -EINVAL;
4504 }
4505 return 0;
4506 default:
4507 WARN(1, "Unsupported ECC algorithm!\n");
4508 return -EINVAL;
4509 }
4510}
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526static bool nand_ecc_strength_good(struct mtd_info *mtd)
4527{
4528 struct nand_chip *chip = mtd_to_nand(mtd);
4529 struct nand_ecc_ctrl *ecc = &chip->ecc;
4530 int corr, ds_corr;
4531
4532 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4533
4534 return true;
4535
4536
4537
4538
4539
4540 corr = (mtd->writesize * ecc->strength) / ecc->size;
4541 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4542
4543 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4544}
4545
4546static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4547{
4548 struct nand_ecc_ctrl *ecc = &chip->ecc;
4549
4550 if (nand_standard_page_accessors(ecc))
4551 return false;
4552
4553
4554
4555
4556
4557
4558
4559 return (!ecc->read_page || !ecc->write_page ||
4560 !ecc->read_page_raw || !ecc->write_page_raw ||
4561 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4562 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4563 ecc->hwctl && ecc->calculate));
4564}
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574int nand_scan_tail(struct mtd_info *mtd)
4575{
4576 struct nand_chip *chip = mtd_to_nand(mtd);
4577 struct nand_ecc_ctrl *ecc = &chip->ecc;
4578 struct nand_buffers *nbuf = NULL;
4579 int ret;
4580
4581
4582 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4583 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4584 ret = -EINVAL;
4585 goto err_ident;
4586 }
4587
4588 if (invalid_ecc_page_accessors(chip)) {
4589 pr_err("Invalid ECC page accessors setup\n");
4590 ret = -EINVAL;
4591 goto err_ident;
4592 }
4593
4594 if (!(chip->options & NAND_OWN_BUFFERS)) {
4595 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4596 if (!nbuf) {
4597 ret = -ENOMEM;
4598 goto err_ident;
4599 }
4600
4601 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4602 if (!nbuf->ecccalc) {
4603 ret = -ENOMEM;
4604 goto err_free;
4605 }
4606
4607 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4608 if (!nbuf->ecccode) {
4609 ret = -ENOMEM;
4610 goto err_free;
4611 }
4612
4613 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4614 GFP_KERNEL);
4615 if (!nbuf->databuf) {
4616 ret = -ENOMEM;
4617 goto err_free;
4618 }
4619
4620 chip->buffers = nbuf;
4621 } else {
4622 if (!chip->buffers) {
4623 ret = -ENOMEM;
4624 goto err_ident;
4625 }
4626 }
4627
4628
4629 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4630
4631
4632
4633
4634 if (!mtd->ooblayout &&
4635 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4636 switch (mtd->oobsize) {
4637 case 8:
4638 case 16:
4639 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4640 break;
4641 case 64:
4642 case 128:
4643 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4644 break;
4645 default:
4646 WARN(1, "No oob scheme defined for oobsize %d\n",
4647 mtd->oobsize);
4648 ret = -EINVAL;
4649 goto err_free;
4650 }
4651 }
4652
4653
4654
4655
4656
4657
4658 switch (ecc->mode) {
4659 case NAND_ECC_HW_OOB_FIRST:
4660
4661 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4662 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4663 ret = -EINVAL;
4664 goto err_free;
4665 }
4666 if (!ecc->read_page)
4667 ecc->read_page = nand_read_page_hwecc_oob_first;
4668
4669 case NAND_ECC_HW:
4670
4671 if (!ecc->read_page)
4672 ecc->read_page = nand_read_page_hwecc;
4673 if (!ecc->write_page)
4674 ecc->write_page = nand_write_page_hwecc;
4675 if (!ecc->read_page_raw)
4676 ecc->read_page_raw = nand_read_page_raw;
4677 if (!ecc->write_page_raw)
4678 ecc->write_page_raw = nand_write_page_raw;
4679 if (!ecc->read_oob)
4680 ecc->read_oob = nand_read_oob_std;
4681 if (!ecc->write_oob)
4682 ecc->write_oob = nand_write_oob_std;
4683 if (!ecc->read_subpage)
4684 ecc->read_subpage = nand_read_subpage;
4685 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4686 ecc->write_subpage = nand_write_subpage_hwecc;
4687
4688 case NAND_ECC_HW_SYNDROME:
4689 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4690 (!ecc->read_page ||
4691 ecc->read_page == nand_read_page_hwecc ||
4692 !ecc->write_page ||
4693 ecc->write_page == nand_write_page_hwecc)) {
4694 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4695 ret = -EINVAL;
4696 goto err_free;
4697 }
4698
4699 if (!ecc->read_page)
4700 ecc->read_page = nand_read_page_syndrome;
4701 if (!ecc->write_page)
4702 ecc->write_page = nand_write_page_syndrome;
4703 if (!ecc->read_page_raw)
4704 ecc->read_page_raw = nand_read_page_raw_syndrome;
4705 if (!ecc->write_page_raw)
4706 ecc->write_page_raw = nand_write_page_raw_syndrome;
4707 if (!ecc->read_oob)
4708 ecc->read_oob = nand_read_oob_syndrome;
4709 if (!ecc->write_oob)
4710 ecc->write_oob = nand_write_oob_syndrome;
4711
4712 if (mtd->writesize >= ecc->size) {
4713 if (!ecc->strength) {
4714 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4715 ret = -EINVAL;
4716 goto err_free;
4717 }
4718 break;
4719 }
4720 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4721 ecc->size, mtd->writesize);
4722 ecc->mode = NAND_ECC_SOFT;
4723 ecc->algo = NAND_ECC_HAMMING;
4724
4725 case NAND_ECC_SOFT:
4726 ret = nand_set_ecc_soft_ops(mtd);
4727 if (ret) {
4728 ret = -EINVAL;
4729 goto err_free;
4730 }
4731 break;
4732
4733 case NAND_ECC_NONE:
4734 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4735 ecc->read_page = nand_read_page_raw;
4736 ecc->write_page = nand_write_page_raw;
4737 ecc->read_oob = nand_read_oob_std;
4738 ecc->read_page_raw = nand_read_page_raw;
4739 ecc->write_page_raw = nand_write_page_raw;
4740 ecc->write_oob = nand_write_oob_std;
4741 ecc->size = mtd->writesize;
4742 ecc->bytes = 0;
4743 ecc->strength = 0;
4744 break;
4745
4746 default:
4747 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4748 ret = -EINVAL;
4749 goto err_free;
4750 }
4751
4752
4753 if (!ecc->read_oob_raw)
4754 ecc->read_oob_raw = ecc->read_oob;
4755 if (!ecc->write_oob_raw)
4756 ecc->write_oob_raw = ecc->write_oob;
4757
4758
4759 mtd->ecc_strength = ecc->strength;
4760 mtd->ecc_step_size = ecc->size;
4761
4762
4763
4764
4765
4766 ecc->steps = mtd->writesize / ecc->size;
4767 if (ecc->steps * ecc->size != mtd->writesize) {
4768 WARN(1, "Invalid ECC parameters\n");
4769 ret = -EINVAL;
4770 goto err_free;
4771 }
4772 ecc->total = ecc->steps * ecc->bytes;
4773
4774
4775
4776
4777
4778 ret = mtd_ooblayout_count_freebytes(mtd);
4779 if (ret < 0)
4780 ret = 0;
4781
4782 mtd->oobavail = ret;
4783
4784
4785 if (!nand_ecc_strength_good(mtd))
4786 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4787 mtd->name);
4788
4789
4790 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4791 switch (ecc->steps) {
4792 case 2:
4793 mtd->subpage_sft = 1;
4794 break;
4795 case 4:
4796 case 8:
4797 case 16:
4798 mtd->subpage_sft = 2;
4799 break;
4800 }
4801 }
4802 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4803
4804
4805 chip->state = FL_READY;
4806
4807
4808 chip->pagebuf = -1;
4809
4810
4811 switch (ecc->mode) {
4812 case NAND_ECC_SOFT:
4813 if (chip->page_shift > 9)
4814 chip->options |= NAND_SUBPAGE_READ;
4815 break;
4816
4817 default:
4818 break;
4819 }
4820
4821
4822 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4823 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4824 MTD_CAP_NANDFLASH;
4825 mtd->_erase = nand_erase;
4826 mtd->_point = NULL;
4827 mtd->_unpoint = NULL;
4828 mtd->_read = nand_read;
4829 mtd->_write = nand_write;
4830 mtd->_panic_write = panic_nand_write;
4831 mtd->_read_oob = nand_read_oob;
4832 mtd->_write_oob = nand_write_oob;
4833 mtd->_sync = nand_sync;
4834 mtd->_lock = NULL;
4835 mtd->_unlock = NULL;
4836 mtd->_suspend = nand_suspend;
4837 mtd->_resume = nand_resume;
4838 mtd->_reboot = nand_shutdown;
4839 mtd->_block_isreserved = nand_block_isreserved;
4840 mtd->_block_isbad = nand_block_isbad;
4841 mtd->_block_markbad = nand_block_markbad;
4842 mtd->_max_bad_blocks = nand_max_bad_blocks;
4843 mtd->writebufsize = mtd->writesize;
4844
4845
4846
4847
4848
4849
4850 if (!mtd->bitflip_threshold)
4851 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4852
4853
4854 if (chip->options & NAND_SKIP_BBTSCAN)
4855 return 0;
4856
4857
4858 ret = chip->scan_bbt(mtd);
4859 if (ret)
4860 goto err_free;
4861 return 0;
4862
4863err_free:
4864 if (nbuf) {
4865 kfree(nbuf->databuf);
4866 kfree(nbuf->ecccode);
4867 kfree(nbuf->ecccalc);
4868 kfree(nbuf);
4869 }
4870
4871err_ident:
4872
4873
4874
4875 nand_manufacturer_cleanup(chip);
4876
4877 return ret;
4878}
4879EXPORT_SYMBOL(nand_scan_tail);
4880
4881
4882
4883
4884
4885
4886#ifdef MODULE
4887#define caller_is_module() (1)
4888#else
4889#define caller_is_module() \
4890 is_module_text_address((unsigned long)__builtin_return_address(0))
4891#endif
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902int nand_scan(struct mtd_info *mtd, int maxchips)
4903{
4904 int ret;
4905
4906 ret = nand_scan_ident(mtd, maxchips, NULL);
4907 if (!ret)
4908 ret = nand_scan_tail(mtd);
4909 return ret;
4910}
4911EXPORT_SYMBOL(nand_scan);
4912
4913
4914
4915
4916
4917void nand_cleanup(struct nand_chip *chip)
4918{
4919 if (chip->ecc.mode == NAND_ECC_SOFT &&
4920 chip->ecc.algo == NAND_ECC_BCH)
4921 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
4922
4923 nand_release_data_interface(chip);
4924
4925
4926 kfree(chip->bbt);
4927 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
4928 kfree(chip->buffers->databuf);
4929 kfree(chip->buffers->ecccode);
4930 kfree(chip->buffers->ecccalc);
4931 kfree(chip->buffers);
4932 }
4933
4934
4935 if (chip->badblock_pattern && chip->badblock_pattern->options
4936 & NAND_BBT_DYNAMICSTRUCT)
4937 kfree(chip->badblock_pattern);
4938
4939
4940 nand_manufacturer_cleanup(chip);
4941}
4942EXPORT_SYMBOL_GPL(nand_cleanup);
4943
4944
4945
4946
4947
4948
4949void nand_release(struct mtd_info *mtd)
4950{
4951 mtd_device_unregister(mtd);
4952 nand_cleanup(mtd_to_nand(mtd));
4953}
4954EXPORT_SYMBOL_GPL(nand_release);
4955
4956MODULE_LICENSE("GPL");
4957MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
4958MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
4959MODULE_DESCRIPTION("Generic NAND flash driver code");
4960