1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/module.h>
33#include <linux/delay.h>
34#include <linux/errno.h>
35#include <linux/err.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/mm.h>
39#include <linux/nmi.h>
40#include <linux/types.h>
41#include <linux/mtd/mtd.h>
42#include <linux/mtd/rawnand.h>
43#include <linux/mtd/nand_ecc.h>
44#include <linux/mtd/nand_bch.h>
45#include <linux/interrupt.h>
46#include <linux/bitops.h>
47#include <linux/io.h>
48#include <linux/mtd/partitions.h>
49#include <linux/of.h>
50
51static int nand_get_device(struct mtd_info *mtd, int new_state);
52
53static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
55
56
57static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59{
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
62
63 if (section > 1)
64 return -ERANGE;
65
66 if (!section) {
67 oobregion->offset = 0;
68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
75
76 oobregion->offset = 6;
77 oobregion->length = ecc->total - 4;
78 }
79
80 return 0;
81}
82
83static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 struct mtd_oob_region *oobregion)
85{
86 if (section > 1)
87 return -ERANGE;
88
89 if (mtd->oobsize == 16) {
90 if (section)
91 return -ERANGE;
92
93 oobregion->length = 8;
94 oobregion->offset = 8;
95 } else {
96 oobregion->length = 2;
97 if (!section)
98 oobregion->offset = 3;
99 else
100 oobregion->offset = 6;
101 }
102
103 return 0;
104}
105
106const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 .ecc = nand_ooblayout_ecc_sp,
108 .free = nand_ooblayout_free_sp,
109};
110EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
111
112static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 struct mtd_oob_region *oobregion)
114{
115 struct nand_chip *chip = mtd_to_nand(mtd);
116 struct nand_ecc_ctrl *ecc = &chip->ecc;
117
118 if (section || !ecc->total)
119 return -ERANGE;
120
121 oobregion->length = ecc->total;
122 oobregion->offset = mtd->oobsize - oobregion->length;
123
124 return 0;
125}
126
127static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 struct mtd_oob_region *oobregion)
129{
130 struct nand_chip *chip = mtd_to_nand(mtd);
131 struct nand_ecc_ctrl *ecc = &chip->ecc;
132
133 if (section)
134 return -ERANGE;
135
136 oobregion->length = mtd->oobsize - ecc->total - 2;
137 oobregion->offset = 2;
138
139 return 0;
140}
141
142const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 .ecc = nand_ooblayout_ecc_lp,
144 .free = nand_ooblayout_free_lp,
145};
146EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
147
148
149
150
151
152static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 struct mtd_oob_region *oobregion)
154{
155 struct nand_chip *chip = mtd_to_nand(mtd);
156 struct nand_ecc_ctrl *ecc = &chip->ecc;
157
158 if (section)
159 return -ERANGE;
160
161 switch (mtd->oobsize) {
162 case 64:
163 oobregion->offset = 40;
164 break;
165 case 128:
166 oobregion->offset = 80;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 oobregion->length = ecc->total;
173 if (oobregion->offset + oobregion->length > mtd->oobsize)
174 return -ERANGE;
175
176 return 0;
177}
178
179static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 struct mtd_oob_region *oobregion)
181{
182 struct nand_chip *chip = mtd_to_nand(mtd);
183 struct nand_ecc_ctrl *ecc = &chip->ecc;
184 int ecc_offset = 0;
185
186 if (section < 0 || section > 1)
187 return -ERANGE;
188
189 switch (mtd->oobsize) {
190 case 64:
191 ecc_offset = 40;
192 break;
193 case 128:
194 ecc_offset = 80;
195 break;
196 default:
197 return -EINVAL;
198 }
199
200 if (section == 0) {
201 oobregion->offset = 2;
202 oobregion->length = ecc_offset - 2;
203 } else {
204 oobregion->offset = ecc_offset + ecc->total;
205 oobregion->length = mtd->oobsize - oobregion->offset;
206 }
207
208 return 0;
209}
210
211static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 .ecc = nand_ooblayout_ecc_lp_hamming,
213 .free = nand_ooblayout_free_lp_hamming,
214};
215
216static int check_offs_len(struct mtd_info *mtd,
217 loff_t ofs, uint64_t len)
218{
219 struct nand_chip *chip = mtd_to_nand(mtd);
220 int ret = 0;
221
222
223 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: unaligned address\n", __func__);
225 ret = -EINVAL;
226 }
227
228
229 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 pr_debug("%s: length not block aligned\n", __func__);
231 ret = -EINVAL;
232 }
233
234 return ret;
235}
236
237
238
239
240
241
242
243static void nand_release_device(struct mtd_info *mtd)
244{
245 struct nand_chip *chip = mtd_to_nand(mtd);
246
247
248 spin_lock(&chip->controller->lock);
249 chip->controller->active = NULL;
250 chip->state = FL_READY;
251 wake_up(&chip->controller->wq);
252 spin_unlock(&chip->controller->lock);
253}
254
255
256
257
258
259
260
261static uint8_t nand_read_byte(struct mtd_info *mtd)
262{
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 return readb(chip->IO_ADDR_R);
265}
266
267
268
269
270
271
272
273
274static uint8_t nand_read_byte16(struct mtd_info *mtd)
275{
276 struct nand_chip *chip = mtd_to_nand(mtd);
277 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
278}
279
280
281
282
283
284
285
286static u16 nand_read_word(struct mtd_info *mtd)
287{
288 struct nand_chip *chip = mtd_to_nand(mtd);
289 return readw(chip->IO_ADDR_R);
290}
291
292
293
294
295
296
297
298
299static void nand_select_chip(struct mtd_info *mtd, int chipnr)
300{
301 struct nand_chip *chip = mtd_to_nand(mtd);
302
303 switch (chipnr) {
304 case -1:
305 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 break;
307 case 0:
308 break;
309
310 default:
311 BUG();
312 }
313}
314
315
316
317
318
319
320
321
322static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
323{
324 struct nand_chip *chip = mtd_to_nand(mtd);
325
326 chip->write_buf(mtd, &byte, 1);
327}
328
329
330
331
332
333
334
335
336static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
337{
338 struct nand_chip *chip = mtd_to_nand(mtd);
339 uint16_t word = byte;
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 chip->write_buf(mtd, (uint8_t *)&word, 2);
358}
359
360
361
362
363
364
365
366
367
368static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
369{
370 struct nand_chip *chip = mtd_to_nand(mtd);
371
372 iowrite8_rep(chip->IO_ADDR_W, buf, len);
373}
374
375
376
377
378
379
380
381
382
383static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
384{
385 struct nand_chip *chip = mtd_to_nand(mtd);
386
387 ioread8_rep(chip->IO_ADDR_R, buf, len);
388}
389
390
391
392
393
394
395
396
397
398static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
399{
400 struct nand_chip *chip = mtd_to_nand(mtd);
401 u16 *p = (u16 *) buf;
402
403 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
404}
405
406
407
408
409
410
411
412
413
414static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
415{
416 struct nand_chip *chip = mtd_to_nand(mtd);
417 u16 *p = (u16 *) buf;
418
419 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
420}
421
422
423
424
425
426
427
428
429static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
430{
431 int page, page_end, res;
432 struct nand_chip *chip = mtd_to_nand(mtd);
433 u8 bad;
434
435 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 ofs += mtd->erasesize - mtd->writesize;
437
438 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
440
441 for (; page < page_end; page++) {
442 res = chip->ecc.read_oob(mtd, chip, page);
443 if (res)
444 return res;
445
446 bad = chip->oob_poi[chip->badblockpos];
447
448 if (likely(chip->badblockbits == 8))
449 res = bad != 0xFF;
450 else
451 res = hweight8(bad) < chip->badblockbits;
452 if (res)
453 return res;
454 }
455
456 return 0;
457}
458
459
460
461
462
463
464
465
466
467
468static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
469{
470 struct nand_chip *chip = mtd_to_nand(mtd);
471 struct mtd_oob_ops ops;
472 uint8_t buf[2] = { 0, 0 };
473 int ret = 0, res, i = 0;
474
475 memset(&ops, 0, sizeof(ops));
476 ops.oobbuf = buf;
477 ops.ooboffs = chip->badblockpos;
478 if (chip->options & NAND_BUSWIDTH_16) {
479 ops.ooboffs &= ~0x01;
480 ops.len = ops.ooblen = 2;
481 } else {
482 ops.len = ops.ooblen = 1;
483 }
484 ops.mode = MTD_OPS_PLACE_OOB;
485
486
487 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 ofs += mtd->erasesize - mtd->writesize;
489 do {
490 res = nand_do_write_oob(mtd, ofs, &ops);
491 if (!ret)
492 ret = res;
493
494 i++;
495 ofs += mtd->writesize;
496 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
497
498 return ret;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
521{
522 struct nand_chip *chip = mtd_to_nand(mtd);
523 int res, ret = 0;
524
525 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 struct erase_info einfo;
527
528
529 memset(&einfo, 0, sizeof(einfo));
530 einfo.addr = ofs;
531 einfo.len = 1ULL << chip->phys_erase_shift;
532 nand_erase_nand(mtd, &einfo, 0);
533
534
535 nand_get_device(mtd, FL_WRITING);
536 ret = chip->block_markbad(mtd, ofs);
537 nand_release_device(mtd);
538 }
539
540
541 if (chip->bbt) {
542 res = nand_markbad_bbt(mtd, ofs);
543 if (!ret)
544 ret = res;
545 }
546
547 if (!ret)
548 mtd->ecc_stats.badblocks++;
549
550 return ret;
551}
552
553
554
555
556
557
558
559
560static int nand_check_wp(struct mtd_info *mtd)
561{
562 struct nand_chip *chip = mtd_to_nand(mtd);
563 u8 status;
564 int ret;
565
566
567 if (chip->options & NAND_BROKEN_XD)
568 return 0;
569
570
571 ret = nand_status_op(chip, &status);
572 if (ret)
573 return ret;
574
575 return status & NAND_STATUS_WP ? 0 : 1;
576}
577
578
579
580
581
582
583
584
585static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
586{
587 struct nand_chip *chip = mtd_to_nand(mtd);
588
589 if (!chip->bbt)
590 return 0;
591
592 return nand_isreserved_bbt(mtd, ofs);
593}
594
595
596
597
598
599
600
601
602
603
604static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
605{
606 struct nand_chip *chip = mtd_to_nand(mtd);
607
608 if (!chip->bbt)
609 return chip->block_bad(mtd, ofs);
610
611
612 return nand_isbad_bbt(mtd, ofs, allowbbt);
613}
614
615
616
617
618
619
620
621
622
623static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
624{
625 struct nand_chip *chip = mtd_to_nand(mtd);
626 int i;
627
628
629 for (i = 0; i < timeo; i++) {
630 if (chip->dev_ready(mtd))
631 break;
632 touch_softlockup_watchdog();
633 mdelay(1);
634 }
635}
636
637
638
639
640
641
642
643void nand_wait_ready(struct mtd_info *mtd)
644{
645 struct nand_chip *chip = mtd_to_nand(mtd);
646 unsigned long timeo = 400;
647
648 if (in_interrupt() || oops_in_progress)
649 return panic_nand_wait_ready(mtd, timeo);
650
651
652 timeo = jiffies + msecs_to_jiffies(timeo);
653 do {
654 if (chip->dev_ready(mtd))
655 return;
656 cond_resched();
657 } while (time_before(jiffies, timeo));
658
659 if (!chip->dev_ready(mtd))
660 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
661}
662EXPORT_SYMBOL_GPL(nand_wait_ready);
663
664
665
666
667
668
669
670
671static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
672{
673 register struct nand_chip *chip = mtd_to_nand(mtd);
674 int ret;
675
676 timeo = jiffies + msecs_to_jiffies(timeo);
677 do {
678 u8 status;
679
680 ret = nand_read_data_op(chip, &status, sizeof(status), true);
681 if (ret)
682 return;
683
684 if (status & NAND_STATUS_READY)
685 break;
686 touch_softlockup_watchdog();
687 } while (time_before(jiffies, timeo));
688};
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
708{
709 const struct nand_sdr_timings *timings;
710 u8 status = 0;
711 int ret;
712
713 if (!chip->exec_op)
714 return -ENOTSUPP;
715
716
717 timings = nand_get_sdr_timings(&chip->data_interface);
718 ndelay(PSEC_TO_NSEC(timings->tWB_max));
719
720 ret = nand_status_op(chip, NULL);
721 if (ret)
722 return ret;
723
724 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
725 do {
726 ret = nand_read_data_op(chip, &status, sizeof(status), true);
727 if (ret)
728 break;
729
730 if (status & NAND_STATUS_READY)
731 break;
732
733
734
735
736
737
738 udelay(10);
739 } while (time_before(jiffies, timeout_ms));
740
741
742
743
744
745
746 nand_exit_status_op(chip);
747
748 if (ret)
749 return ret;
750
751 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
752};
753EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
754
755
756
757
758
759
760
761
762
763
764
765static void nand_command(struct mtd_info *mtd, unsigned int command,
766 int column, int page_addr)
767{
768 register struct nand_chip *chip = mtd_to_nand(mtd);
769 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
770
771
772 if (command == NAND_CMD_SEQIN) {
773 int readcmd;
774
775 if (column >= mtd->writesize) {
776
777 column -= mtd->writesize;
778 readcmd = NAND_CMD_READOOB;
779 } else if (column < 256) {
780
781 readcmd = NAND_CMD_READ0;
782 } else {
783 column -= 256;
784 readcmd = NAND_CMD_READ1;
785 }
786 chip->cmd_ctrl(mtd, readcmd, ctrl);
787 ctrl &= ~NAND_CTRL_CHANGE;
788 }
789 if (command != NAND_CMD_NONE)
790 chip->cmd_ctrl(mtd, command, ctrl);
791
792
793 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
794
795 if (column != -1) {
796
797 if (chip->options & NAND_BUSWIDTH_16 &&
798 !nand_opcode_8bits(command))
799 column >>= 1;
800 chip->cmd_ctrl(mtd, column, ctrl);
801 ctrl &= ~NAND_CTRL_CHANGE;
802 }
803 if (page_addr != -1) {
804 chip->cmd_ctrl(mtd, page_addr, ctrl);
805 ctrl &= ~NAND_CTRL_CHANGE;
806 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
807 if (chip->options & NAND_ROW_ADDR_3)
808 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
809 }
810 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
811
812
813
814
815
816 switch (command) {
817
818 case NAND_CMD_NONE:
819 case NAND_CMD_PAGEPROG:
820 case NAND_CMD_ERASE1:
821 case NAND_CMD_ERASE2:
822 case NAND_CMD_SEQIN:
823 case NAND_CMD_STATUS:
824 case NAND_CMD_READID:
825 case NAND_CMD_SET_FEATURES:
826 return;
827
828 case NAND_CMD_RESET:
829 if (chip->dev_ready)
830 break;
831 udelay(chip->chip_delay);
832 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
833 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
834 chip->cmd_ctrl(mtd,
835 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
836
837 nand_wait_status_ready(mtd, 250);
838 return;
839
840
841 case NAND_CMD_READ0:
842
843
844
845
846
847
848 if (column == -1 && page_addr == -1)
849 return;
850
851 default:
852
853
854
855
856 if (!chip->dev_ready) {
857 udelay(chip->chip_delay);
858 return;
859 }
860 }
861
862
863
864
865 ndelay(100);
866
867 nand_wait_ready(mtd);
868}
869
870static void nand_ccs_delay(struct nand_chip *chip)
871{
872
873
874
875
876 if (!(chip->options & NAND_WAIT_TCCS))
877 return;
878
879
880
881
882
883 if (chip->setup_data_interface)
884 ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
885 else
886 ndelay(500);
887}
888
889
890
891
892
893
894
895
896
897
898
899
900static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
901 int column, int page_addr)
902{
903 register struct nand_chip *chip = mtd_to_nand(mtd);
904
905
906 if (command == NAND_CMD_READOOB) {
907 column += mtd->writesize;
908 command = NAND_CMD_READ0;
909 }
910
911
912 if (command != NAND_CMD_NONE)
913 chip->cmd_ctrl(mtd, command,
914 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
915
916 if (column != -1 || page_addr != -1) {
917 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
918
919
920 if (column != -1) {
921
922 if (chip->options & NAND_BUSWIDTH_16 &&
923 !nand_opcode_8bits(command))
924 column >>= 1;
925 chip->cmd_ctrl(mtd, column, ctrl);
926 ctrl &= ~NAND_CTRL_CHANGE;
927
928
929 if (!nand_opcode_8bits(command))
930 chip->cmd_ctrl(mtd, column >> 8, ctrl);
931 }
932 if (page_addr != -1) {
933 chip->cmd_ctrl(mtd, page_addr, ctrl);
934 chip->cmd_ctrl(mtd, page_addr >> 8,
935 NAND_NCE | NAND_ALE);
936 if (chip->options & NAND_ROW_ADDR_3)
937 chip->cmd_ctrl(mtd, page_addr >> 16,
938 NAND_NCE | NAND_ALE);
939 }
940 }
941 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
942
943
944
945
946
947 switch (command) {
948
949 case NAND_CMD_NONE:
950 case NAND_CMD_CACHEDPROG:
951 case NAND_CMD_PAGEPROG:
952 case NAND_CMD_ERASE1:
953 case NAND_CMD_ERASE2:
954 case NAND_CMD_SEQIN:
955 case NAND_CMD_STATUS:
956 case NAND_CMD_READID:
957 case NAND_CMD_SET_FEATURES:
958 return;
959
960 case NAND_CMD_RNDIN:
961 nand_ccs_delay(chip);
962 return;
963
964 case NAND_CMD_RESET:
965 if (chip->dev_ready)
966 break;
967 udelay(chip->chip_delay);
968 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
969 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
970 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
971 NAND_NCE | NAND_CTRL_CHANGE);
972
973 nand_wait_status_ready(mtd, 250);
974 return;
975
976 case NAND_CMD_RNDOUT:
977
978 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
979 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
980 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
981 NAND_NCE | NAND_CTRL_CHANGE);
982
983 nand_ccs_delay(chip);
984 return;
985
986 case NAND_CMD_READ0:
987
988
989
990
991
992
993 if (column == -1 && page_addr == -1)
994 return;
995
996 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
997 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
998 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
999 NAND_NCE | NAND_CTRL_CHANGE);
1000
1001
1002 default:
1003
1004
1005
1006
1007 if (!chip->dev_ready) {
1008 udelay(chip->chip_delay);
1009 return;
1010 }
1011 }
1012
1013
1014
1015
1016
1017 ndelay(100);
1018
1019 nand_wait_ready(mtd);
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static void panic_nand_get_device(struct nand_chip *chip,
1031 struct mtd_info *mtd, int new_state)
1032{
1033
1034 chip->controller->active = chip;
1035 chip->state = new_state;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045static int
1046nand_get_device(struct mtd_info *mtd, int new_state)
1047{
1048 struct nand_chip *chip = mtd_to_nand(mtd);
1049 spinlock_t *lock = &chip->controller->lock;
1050 wait_queue_head_t *wq = &chip->controller->wq;
1051 DECLARE_WAITQUEUE(wait, current);
1052retry:
1053 spin_lock(lock);
1054
1055
1056 if (!chip->controller->active)
1057 chip->controller->active = chip;
1058
1059 if (chip->controller->active == chip && chip->state == FL_READY) {
1060 chip->state = new_state;
1061 spin_unlock(lock);
1062 return 0;
1063 }
1064 if (new_state == FL_PM_SUSPENDED) {
1065 if (chip->controller->active->state == FL_PM_SUSPENDED) {
1066 chip->state = FL_PM_SUSPENDED;
1067 spin_unlock(lock);
1068 return 0;
1069 }
1070 }
1071 set_current_state(TASK_UNINTERRUPTIBLE);
1072 add_wait_queue(wq, &wait);
1073 spin_unlock(lock);
1074 schedule();
1075 remove_wait_queue(wq, &wait);
1076 goto retry;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1090 unsigned long timeo)
1091{
1092 int i;
1093 for (i = 0; i < timeo; i++) {
1094 if (chip->dev_ready) {
1095 if (chip->dev_ready(mtd))
1096 break;
1097 } else {
1098 int ret;
1099 u8 status;
1100
1101 ret = nand_read_data_op(chip, &status, sizeof(status),
1102 true);
1103 if (ret)
1104 return;
1105
1106 if (status & NAND_STATUS_READY)
1107 break;
1108 }
1109 mdelay(1);
1110 }
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1121{
1122
1123 unsigned long timeo = 400;
1124 u8 status;
1125 int ret;
1126
1127
1128
1129
1130
1131 ndelay(100);
1132
1133 ret = nand_status_op(chip, NULL);
1134 if (ret)
1135 return ret;
1136
1137 if (in_interrupt() || oops_in_progress)
1138 panic_nand_wait(mtd, chip, timeo);
1139 else {
1140 timeo = jiffies + msecs_to_jiffies(timeo);
1141 do {
1142 if (chip->dev_ready) {
1143 if (chip->dev_ready(mtd))
1144 break;
1145 } else {
1146 ret = nand_read_data_op(chip, &status,
1147 sizeof(status), true);
1148 if (ret)
1149 return ret;
1150
1151 if (status & NAND_STATUS_READY)
1152 break;
1153 }
1154 cond_resched();
1155 } while (time_before(jiffies, timeo));
1156 }
1157
1158 ret = nand_read_data_op(chip, &status, sizeof(status), true);
1159 if (ret)
1160 return ret;
1161
1162
1163 WARN_ON(!(status & NAND_STATUS_READY));
1164 return status;
1165}
1166
1167static bool nand_supports_get_features(struct nand_chip *chip, int addr)
1168{
1169 return (chip->parameters.supports_set_get_features &&
1170 test_bit(addr, chip->parameters.get_feature_list));
1171}
1172
1173static bool nand_supports_set_features(struct nand_chip *chip, int addr)
1174{
1175 return (chip->parameters.supports_set_get_features &&
1176 test_bit(addr, chip->parameters.set_feature_list));
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188int nand_get_features(struct nand_chip *chip, int addr,
1189 u8 *subfeature_param)
1190{
1191 struct mtd_info *mtd = nand_to_mtd(chip);
1192
1193 if (!nand_supports_get_features(chip, addr))
1194 return -ENOTSUPP;
1195
1196 return chip->get_features(mtd, chip, addr, subfeature_param);
1197}
1198EXPORT_SYMBOL_GPL(nand_get_features);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209int nand_set_features(struct nand_chip *chip, int addr,
1210 u8 *subfeature_param)
1211{
1212 struct mtd_info *mtd = nand_to_mtd(chip);
1213
1214 if (!nand_supports_set_features(chip, addr))
1215 return -ENOTSUPP;
1216
1217 return chip->set_features(mtd, chip, addr, subfeature_param);
1218}
1219EXPORT_SYMBOL_GPL(nand_set_features);
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1231{
1232 struct mtd_info *mtd = nand_to_mtd(chip);
1233 int ret;
1234
1235 if (!chip->setup_data_interface)
1236 return 0;
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
1253 ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1254 if (ret)
1255 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1256
1257 return ret;
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1274{
1275 struct mtd_info *mtd = nand_to_mtd(chip);
1276 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1277 chip->onfi_timing_mode_default,
1278 };
1279 int ret;
1280
1281 if (!chip->setup_data_interface)
1282 return 0;
1283
1284
1285 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
1286 chip->select_chip(mtd, chipnr);
1287 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
1288 tmode_param);
1289 chip->select_chip(mtd, -1);
1290 if (ret)
1291 return ret;
1292 }
1293
1294
1295 ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
1296 if (ret)
1297 return ret;
1298
1299
1300 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
1301 return 0;
1302
1303 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
1304 chip->select_chip(mtd, chipnr);
1305 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
1306 tmode_param);
1307 chip->select_chip(mtd, -1);
1308 if (ret)
1309 goto err_reset_chip;
1310
1311 if (tmode_param[0] != chip->onfi_timing_mode_default) {
1312 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
1313 chip->onfi_timing_mode_default);
1314 goto err_reset_chip;
1315 }
1316
1317 return 0;
1318
1319err_reset_chip:
1320
1321
1322
1323
1324 nand_reset_data_interface(chip, chipnr);
1325 chip->select_chip(mtd, chipnr);
1326 nand_reset_op(chip);
1327 chip->select_chip(mtd, -1);
1328
1329 return ret;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346static int nand_init_data_interface(struct nand_chip *chip)
1347{
1348 struct mtd_info *mtd = nand_to_mtd(chip);
1349 int modes, mode, ret;
1350
1351 if (!chip->setup_data_interface)
1352 return 0;
1353
1354
1355
1356
1357
1358
1359 modes = onfi_get_async_timing_mode(chip);
1360 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1361 if (!chip->onfi_timing_mode_default)
1362 return 0;
1363
1364 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1365 }
1366
1367
1368 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1369 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
1370 if (ret)
1371 continue;
1372
1373
1374
1375
1376
1377 ret = chip->setup_data_interface(mtd,
1378 NAND_DATA_IFACE_CHECK_ONLY,
1379 &chip->data_interface);
1380 if (!ret) {
1381 chip->onfi_timing_mode_default = mode;
1382 break;
1383 }
1384 }
1385
1386 return 0;
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1402 unsigned int offset_in_page)
1403{
1404 struct mtd_info *mtd = nand_to_mtd(chip);
1405
1406
1407 if (offset_in_page > mtd->writesize + mtd->oobsize)
1408 return -EINVAL;
1409
1410
1411
1412
1413
1414
1415 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1416 offset_in_page -= mtd->writesize;
1417
1418
1419
1420
1421
1422 if (chip->options & NAND_BUSWIDTH_16) {
1423 if (WARN_ON(offset_in_page % 2))
1424 return -EINVAL;
1425
1426 offset_in_page /= 2;
1427 }
1428
1429 addrs[0] = offset_in_page;
1430
1431
1432
1433
1434
1435 if (mtd->writesize <= 512)
1436 return 1;
1437
1438 addrs[1] = offset_in_page >> 8;
1439
1440 return 2;
1441}
1442
1443static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1444 unsigned int offset_in_page, void *buf,
1445 unsigned int len)
1446{
1447 struct mtd_info *mtd = nand_to_mtd(chip);
1448 const struct nand_sdr_timings *sdr =
1449 nand_get_sdr_timings(&chip->data_interface);
1450 u8 addrs[4];
1451 struct nand_op_instr instrs[] = {
1452 NAND_OP_CMD(NAND_CMD_READ0, 0),
1453 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1454 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1455 PSEC_TO_NSEC(sdr->tRR_min)),
1456 NAND_OP_DATA_IN(len, buf, 0),
1457 };
1458 struct nand_operation op = NAND_OPERATION(instrs);
1459 int ret;
1460
1461
1462 if (!len)
1463 op.ninstrs--;
1464
1465 if (offset_in_page >= mtd->writesize)
1466 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1467 else if (offset_in_page >= 256 &&
1468 !(chip->options & NAND_BUSWIDTH_16))
1469 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1470
1471 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1472 if (ret < 0)
1473 return ret;
1474
1475 addrs[1] = page;
1476 addrs[2] = page >> 8;
1477
1478 if (chip->options & NAND_ROW_ADDR_3) {
1479 addrs[3] = page >> 16;
1480 instrs[1].ctx.addr.naddrs++;
1481 }
1482
1483 return nand_exec_op(chip, &op);
1484}
1485
1486static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1487 unsigned int offset_in_page, void *buf,
1488 unsigned int len)
1489{
1490 const struct nand_sdr_timings *sdr =
1491 nand_get_sdr_timings(&chip->data_interface);
1492 u8 addrs[5];
1493 struct nand_op_instr instrs[] = {
1494 NAND_OP_CMD(NAND_CMD_READ0, 0),
1495 NAND_OP_ADDR(4, addrs, 0),
1496 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1497 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1498 PSEC_TO_NSEC(sdr->tRR_min)),
1499 NAND_OP_DATA_IN(len, buf, 0),
1500 };
1501 struct nand_operation op = NAND_OPERATION(instrs);
1502 int ret;
1503
1504
1505 if (!len)
1506 op.ninstrs--;
1507
1508 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1509 if (ret < 0)
1510 return ret;
1511
1512 addrs[2] = page;
1513 addrs[3] = page >> 8;
1514
1515 if (chip->options & NAND_ROW_ADDR_3) {
1516 addrs[4] = page >> 16;
1517 instrs[1].ctx.addr.naddrs++;
1518 }
1519
1520 return nand_exec_op(chip, &op);
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1537 unsigned int offset_in_page, void *buf, unsigned int len)
1538{
1539 struct mtd_info *mtd = nand_to_mtd(chip);
1540
1541 if (len && !buf)
1542 return -EINVAL;
1543
1544 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1545 return -EINVAL;
1546
1547 if (chip->exec_op) {
1548 if (mtd->writesize > 512)
1549 return nand_lp_exec_read_page_op(chip, page,
1550 offset_in_page, buf,
1551 len);
1552
1553 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1554 buf, len);
1555 }
1556
1557 chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
1558 if (len)
1559 chip->read_buf(mtd, buf, len);
1560
1561 return 0;
1562}
1563EXPORT_SYMBOL_GPL(nand_read_page_op);
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1578 unsigned int len)
1579{
1580 struct mtd_info *mtd = nand_to_mtd(chip);
1581 unsigned int i;
1582 u8 *p = buf;
1583
1584 if (len && !buf)
1585 return -EINVAL;
1586
1587 if (chip->exec_op) {
1588 const struct nand_sdr_timings *sdr =
1589 nand_get_sdr_timings(&chip->data_interface);
1590 struct nand_op_instr instrs[] = {
1591 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1592 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1593 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1594 PSEC_TO_NSEC(sdr->tRR_min)),
1595 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1596 };
1597 struct nand_operation op = NAND_OPERATION(instrs);
1598
1599
1600 if (!len)
1601 op.ninstrs--;
1602
1603 return nand_exec_op(chip, &op);
1604 }
1605
1606 chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
1607 for (i = 0; i < len; i++)
1608 p[i] = chip->read_byte(mtd);
1609
1610 return 0;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626int nand_change_read_column_op(struct nand_chip *chip,
1627 unsigned int offset_in_page, void *buf,
1628 unsigned int len, bool force_8bit)
1629{
1630 struct mtd_info *mtd = nand_to_mtd(chip);
1631
1632 if (len && !buf)
1633 return -EINVAL;
1634
1635 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1636 return -EINVAL;
1637
1638
1639 if (mtd->writesize <= 512)
1640 return -ENOTSUPP;
1641
1642 if (chip->exec_op) {
1643 const struct nand_sdr_timings *sdr =
1644 nand_get_sdr_timings(&chip->data_interface);
1645 u8 addrs[2] = {};
1646 struct nand_op_instr instrs[] = {
1647 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1648 NAND_OP_ADDR(2, addrs, 0),
1649 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1650 PSEC_TO_NSEC(sdr->tCCS_min)),
1651 NAND_OP_DATA_IN(len, buf, 0),
1652 };
1653 struct nand_operation op = NAND_OPERATION(instrs);
1654 int ret;
1655
1656 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1657 if (ret < 0)
1658 return ret;
1659
1660
1661 if (!len)
1662 op.ninstrs--;
1663
1664 instrs[3].ctx.data.force_8bit = force_8bit;
1665
1666 return nand_exec_op(chip, &op);
1667 }
1668
1669 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
1670 if (len)
1671 chip->read_buf(mtd, buf, len);
1672
1673 return 0;
1674}
1675EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1691 unsigned int offset_in_oob, void *buf, unsigned int len)
1692{
1693 struct mtd_info *mtd = nand_to_mtd(chip);
1694
1695 if (len && !buf)
1696 return -EINVAL;
1697
1698 if (offset_in_oob + len > mtd->oobsize)
1699 return -EINVAL;
1700
1701 if (chip->exec_op)
1702 return nand_read_page_op(chip, page,
1703 mtd->writesize + offset_in_oob,
1704 buf, len);
1705
1706 chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
1707 if (len)
1708 chip->read_buf(mtd, buf, len);
1709
1710 return 0;
1711}
1712EXPORT_SYMBOL_GPL(nand_read_oob_op);
1713
1714static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1715 unsigned int offset_in_page, const void *buf,
1716 unsigned int len, bool prog)
1717{
1718 struct mtd_info *mtd = nand_to_mtd(chip);
1719 const struct nand_sdr_timings *sdr =
1720 nand_get_sdr_timings(&chip->data_interface);
1721 u8 addrs[5] = {};
1722 struct nand_op_instr instrs[] = {
1723
1724
1725
1726
1727
1728 NAND_OP_CMD(NAND_CMD_READ0, 0),
1729 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1730 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1731 NAND_OP_DATA_OUT(len, buf, 0),
1732 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1733 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1734 };
1735 struct nand_operation op = NAND_OPERATION(instrs);
1736 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1737 int ret;
1738 u8 status;
1739
1740 if (naddrs < 0)
1741 return naddrs;
1742
1743 addrs[naddrs++] = page;
1744 addrs[naddrs++] = page >> 8;
1745 if (chip->options & NAND_ROW_ADDR_3)
1746 addrs[naddrs++] = page >> 16;
1747
1748 instrs[2].ctx.addr.naddrs = naddrs;
1749
1750
1751 if (!prog) {
1752 op.ninstrs -= 2;
1753
1754 if (!len)
1755 op.ninstrs--;
1756 }
1757
1758 if (mtd->writesize <= 512) {
1759
1760
1761
1762
1763
1764 if (offset_in_page >= mtd->writesize)
1765 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1766 else if (offset_in_page >= 256 &&
1767 !(chip->options & NAND_BUSWIDTH_16))
1768 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1769 } else {
1770
1771
1772
1773
1774 op.instrs++;
1775 op.ninstrs--;
1776 }
1777
1778 ret = nand_exec_op(chip, &op);
1779 if (!prog || ret)
1780 return ret;
1781
1782 ret = nand_status_op(chip, &status);
1783 if (ret)
1784 return ret;
1785
1786 return status;
1787}
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1803 unsigned int offset_in_page, const void *buf,
1804 unsigned int len)
1805{
1806 struct mtd_info *mtd = nand_to_mtd(chip);
1807
1808 if (len && !buf)
1809 return -EINVAL;
1810
1811 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1812 return -EINVAL;
1813
1814 if (chip->exec_op)
1815 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1816 len, false);
1817
1818 chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1819
1820 if (buf)
1821 chip->write_buf(mtd, buf, len);
1822
1823 return 0;
1824}
1825EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836int nand_prog_page_end_op(struct nand_chip *chip)
1837{
1838 struct mtd_info *mtd = nand_to_mtd(chip);
1839 int ret;
1840 u8 status;
1841
1842 if (chip->exec_op) {
1843 const struct nand_sdr_timings *sdr =
1844 nand_get_sdr_timings(&chip->data_interface);
1845 struct nand_op_instr instrs[] = {
1846 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1847 PSEC_TO_NSEC(sdr->tWB_max)),
1848 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1849 };
1850 struct nand_operation op = NAND_OPERATION(instrs);
1851
1852 ret = nand_exec_op(chip, &op);
1853 if (ret)
1854 return ret;
1855
1856 ret = nand_status_op(chip, &status);
1857 if (ret)
1858 return ret;
1859 } else {
1860 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1861 ret = chip->waitfunc(mtd, chip);
1862 if (ret < 0)
1863 return ret;
1864
1865 status = ret;
1866 }
1867
1868 if (status & NAND_STATUS_FAIL)
1869 return -EIO;
1870
1871 return 0;
1872}
1873EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1889 unsigned int offset_in_page, const void *buf,
1890 unsigned int len)
1891{
1892 struct mtd_info *mtd = nand_to_mtd(chip);
1893 int status;
1894
1895 if (!len || !buf)
1896 return -EINVAL;
1897
1898 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1899 return -EINVAL;
1900
1901 if (chip->exec_op) {
1902 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1903 len, true);
1904 } else {
1905 chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1906 chip->write_buf(mtd, buf, len);
1907 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1908 status = chip->waitfunc(mtd, chip);
1909 }
1910
1911 if (status & NAND_STATUS_FAIL)
1912 return -EIO;
1913
1914 return 0;
1915}
1916EXPORT_SYMBOL_GPL(nand_prog_page_op);
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931int nand_change_write_column_op(struct nand_chip *chip,
1932 unsigned int offset_in_page,
1933 const void *buf, unsigned int len,
1934 bool force_8bit)
1935{
1936 struct mtd_info *mtd = nand_to_mtd(chip);
1937
1938 if (len && !buf)
1939 return -EINVAL;
1940
1941 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1942 return -EINVAL;
1943
1944
1945 if (mtd->writesize <= 512)
1946 return -ENOTSUPP;
1947
1948 if (chip->exec_op) {
1949 const struct nand_sdr_timings *sdr =
1950 nand_get_sdr_timings(&chip->data_interface);
1951 u8 addrs[2];
1952 struct nand_op_instr instrs[] = {
1953 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1954 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1955 NAND_OP_DATA_OUT(len, buf, 0),
1956 };
1957 struct nand_operation op = NAND_OPERATION(instrs);
1958 int ret;
1959
1960 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1961 if (ret < 0)
1962 return ret;
1963
1964 instrs[2].ctx.data.force_8bit = force_8bit;
1965
1966
1967 if (!len)
1968 op.ninstrs--;
1969
1970 return nand_exec_op(chip, &op);
1971 }
1972
1973 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
1974 if (len)
1975 chip->write_buf(mtd, buf, len);
1976
1977 return 0;
1978}
1979EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1995 unsigned int len)
1996{
1997 struct mtd_info *mtd = nand_to_mtd(chip);
1998 unsigned int i;
1999 u8 *id = buf;
2000
2001 if (len && !buf)
2002 return -EINVAL;
2003
2004 if (chip->exec_op) {
2005 const struct nand_sdr_timings *sdr =
2006 nand_get_sdr_timings(&chip->data_interface);
2007 struct nand_op_instr instrs[] = {
2008 NAND_OP_CMD(NAND_CMD_READID, 0),
2009 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
2010 NAND_OP_8BIT_DATA_IN(len, buf, 0),
2011 };
2012 struct nand_operation op = NAND_OPERATION(instrs);
2013
2014
2015 if (!len)
2016 op.ninstrs--;
2017
2018 return nand_exec_op(chip, &op);
2019 }
2020
2021 chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
2022
2023 for (i = 0; i < len; i++)
2024 id[i] = chip->read_byte(mtd);
2025
2026 return 0;
2027}
2028EXPORT_SYMBOL_GPL(nand_readid_op);
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041int nand_status_op(struct nand_chip *chip, u8 *status)
2042{
2043 struct mtd_info *mtd = nand_to_mtd(chip);
2044
2045 if (chip->exec_op) {
2046 const struct nand_sdr_timings *sdr =
2047 nand_get_sdr_timings(&chip->data_interface);
2048 struct nand_op_instr instrs[] = {
2049 NAND_OP_CMD(NAND_CMD_STATUS,
2050 PSEC_TO_NSEC(sdr->tADL_min)),
2051 NAND_OP_8BIT_DATA_IN(1, status, 0),
2052 };
2053 struct nand_operation op = NAND_OPERATION(instrs);
2054
2055 if (!status)
2056 op.ninstrs--;
2057
2058 return nand_exec_op(chip, &op);
2059 }
2060
2061 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2062 if (status)
2063 *status = chip->read_byte(mtd);
2064
2065 return 0;
2066}
2067EXPORT_SYMBOL_GPL(nand_status_op);
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080int nand_exit_status_op(struct nand_chip *chip)
2081{
2082 struct mtd_info *mtd = nand_to_mtd(chip);
2083
2084 if (chip->exec_op) {
2085 struct nand_op_instr instrs[] = {
2086 NAND_OP_CMD(NAND_CMD_READ0, 0),
2087 };
2088 struct nand_operation op = NAND_OPERATION(instrs);
2089
2090 return nand_exec_op(chip, &op);
2091 }
2092
2093 chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
2094
2095 return 0;
2096}
2097EXPORT_SYMBOL_GPL(nand_exit_status_op);
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
2111{
2112 struct mtd_info *mtd = nand_to_mtd(chip);
2113 unsigned int page = eraseblock <<
2114 (chip->phys_erase_shift - chip->page_shift);
2115 int ret;
2116 u8 status;
2117
2118 if (chip->exec_op) {
2119 const struct nand_sdr_timings *sdr =
2120 nand_get_sdr_timings(&chip->data_interface);
2121 u8 addrs[3] = { page, page >> 8, page >> 16 };
2122 struct nand_op_instr instrs[] = {
2123 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
2124 NAND_OP_ADDR(2, addrs, 0),
2125 NAND_OP_CMD(NAND_CMD_ERASE2,
2126 PSEC_TO_MSEC(sdr->tWB_max)),
2127 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
2128 };
2129 struct nand_operation op = NAND_OPERATION(instrs);
2130
2131 if (chip->options & NAND_ROW_ADDR_3)
2132 instrs[1].ctx.addr.naddrs++;
2133
2134 ret = nand_exec_op(chip, &op);
2135 if (ret)
2136 return ret;
2137
2138 ret = nand_status_op(chip, &status);
2139 if (ret)
2140 return ret;
2141 } else {
2142 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2143 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2144
2145 ret = chip->waitfunc(mtd, chip);
2146 if (ret < 0)
2147 return ret;
2148
2149 status = ret;
2150 }
2151
2152 if (status & NAND_STATUS_FAIL)
2153 return -EIO;
2154
2155 return 0;
2156}
2157EXPORT_SYMBOL_GPL(nand_erase_op);
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171static int nand_set_features_op(struct nand_chip *chip, u8 feature,
2172 const void *data)
2173{
2174 struct mtd_info *mtd = nand_to_mtd(chip);
2175 const u8 *params = data;
2176 int i, ret;
2177 u8 status;
2178
2179 if (chip->exec_op) {
2180 const struct nand_sdr_timings *sdr =
2181 nand_get_sdr_timings(&chip->data_interface);
2182 struct nand_op_instr instrs[] = {
2183 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
2184 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
2185 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
2186 PSEC_TO_NSEC(sdr->tWB_max)),
2187 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
2188 };
2189 struct nand_operation op = NAND_OPERATION(instrs);
2190
2191 ret = nand_exec_op(chip, &op);
2192 if (ret)
2193 return ret;
2194
2195 ret = nand_status_op(chip, &status);
2196 if (ret)
2197 return ret;
2198 } else {
2199 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
2200 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2201 chip->write_byte(mtd, params[i]);
2202
2203 ret = chip->waitfunc(mtd, chip);
2204 if (ret < 0)
2205 return ret;
2206
2207 status = ret;
2208 }
2209
2210 if (status & NAND_STATUS_FAIL)
2211 return -EIO;
2212
2213 return 0;
2214}
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228static int nand_get_features_op(struct nand_chip *chip, u8 feature,
2229 void *data)
2230{
2231 struct mtd_info *mtd = nand_to_mtd(chip);
2232 u8 *params = data;
2233 int i;
2234
2235 if (chip->exec_op) {
2236 const struct nand_sdr_timings *sdr =
2237 nand_get_sdr_timings(&chip->data_interface);
2238 struct nand_op_instr instrs[] = {
2239 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
2240 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
2241 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
2242 PSEC_TO_NSEC(sdr->tRR_min)),
2243 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
2244 data, 0),
2245 };
2246 struct nand_operation op = NAND_OPERATION(instrs);
2247
2248 return nand_exec_op(chip, &op);
2249 }
2250
2251 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
2252 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
2253 params[i] = chip->read_byte(mtd);
2254
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268int nand_reset_op(struct nand_chip *chip)
2269{
2270 struct mtd_info *mtd = nand_to_mtd(chip);
2271
2272 if (chip->exec_op) {
2273 const struct nand_sdr_timings *sdr =
2274 nand_get_sdr_timings(&chip->data_interface);
2275 struct nand_op_instr instrs[] = {
2276 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
2277 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
2278 };
2279 struct nand_operation op = NAND_OPERATION(instrs);
2280
2281 return nand_exec_op(chip, &op);
2282 }
2283
2284 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2285
2286 return 0;
2287}
2288EXPORT_SYMBOL_GPL(nand_reset_op);
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2304 bool force_8bit)
2305{
2306 struct mtd_info *mtd = nand_to_mtd(chip);
2307
2308 if (!len || !buf)
2309 return -EINVAL;
2310
2311 if (chip->exec_op) {
2312 struct nand_op_instr instrs[] = {
2313 NAND_OP_DATA_IN(len, buf, 0),
2314 };
2315 struct nand_operation op = NAND_OPERATION(instrs);
2316
2317 instrs[0].ctx.data.force_8bit = force_8bit;
2318
2319 return nand_exec_op(chip, &op);
2320 }
2321
2322 if (force_8bit) {
2323 u8 *p = buf;
2324 unsigned int i;
2325
2326 for (i = 0; i < len; i++)
2327 p[i] = chip->read_byte(mtd);
2328 } else {
2329 chip->read_buf(mtd, buf, len);
2330 }
2331
2332 return 0;
2333}
2334EXPORT_SYMBOL_GPL(nand_read_data_op);
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349int nand_write_data_op(struct nand_chip *chip, const void *buf,
2350 unsigned int len, bool force_8bit)
2351{
2352 struct mtd_info *mtd = nand_to_mtd(chip);
2353
2354 if (!len || !buf)
2355 return -EINVAL;
2356
2357 if (chip->exec_op) {
2358 struct nand_op_instr instrs[] = {
2359 NAND_OP_DATA_OUT(len, buf, 0),
2360 };
2361 struct nand_operation op = NAND_OPERATION(instrs);
2362
2363 instrs[0].ctx.data.force_8bit = force_8bit;
2364
2365 return nand_exec_op(chip, &op);
2366 }
2367
2368 if (force_8bit) {
2369 const u8 *p = buf;
2370 unsigned int i;
2371
2372 for (i = 0; i < len; i++)
2373 chip->write_byte(mtd, p[i]);
2374 } else {
2375 chip->write_buf(mtd, buf, len);
2376 }
2377
2378 return 0;
2379}
2380EXPORT_SYMBOL_GPL(nand_write_data_op);
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391struct nand_op_parser_ctx {
2392 const struct nand_op_instr *instrs;
2393 unsigned int ninstrs;
2394 struct nand_subop subop;
2395};
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static bool
2418nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2419 const struct nand_op_instr *instr,
2420 unsigned int *start_offset)
2421{
2422 switch (pat->type) {
2423 case NAND_OP_ADDR_INSTR:
2424 if (!pat->ctx.addr.maxcycles)
2425 break;
2426
2427 if (instr->ctx.addr.naddrs - *start_offset >
2428 pat->ctx.addr.maxcycles) {
2429 *start_offset += pat->ctx.addr.maxcycles;
2430 return true;
2431 }
2432 break;
2433
2434 case NAND_OP_DATA_IN_INSTR:
2435 case NAND_OP_DATA_OUT_INSTR:
2436 if (!pat->ctx.data.maxlen)
2437 break;
2438
2439 if (instr->ctx.data.len - *start_offset >
2440 pat->ctx.data.maxlen) {
2441 *start_offset += pat->ctx.data.maxlen;
2442 return true;
2443 }
2444 break;
2445
2446 default:
2447 break;
2448 }
2449
2450 return false;
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464static bool
2465nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2466 struct nand_op_parser_ctx *ctx)
2467{
2468 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2469 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2470 const struct nand_op_instr *instr = ctx->subop.instrs;
2471 unsigned int i, ninstrs;
2472
2473 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2474
2475
2476
2477
2478
2479
2480
2481 if (instr->type != pat->elems[i].type) {
2482 if (!pat->elems[i].optional)
2483 return false;
2484
2485 continue;
2486 }
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2497 &instr_offset)) {
2498 ninstrs++;
2499 i++;
2500 break;
2501 }
2502
2503 instr++;
2504 ninstrs++;
2505 instr_offset = 0;
2506 }
2507
2508
2509
2510
2511
2512
2513
2514 if (!ninstrs)
2515 return false;
2516
2517
2518
2519
2520
2521
2522 for (; i < pat->nelems; i++) {
2523 if (!pat->elems[i].optional)
2524 return false;
2525 }
2526
2527
2528
2529
2530
2531 ctx->subop.ninstrs = ninstrs;
2532 ctx->subop.last_instr_end_off = instr_offset;
2533
2534 return true;
2535}
2536
2537#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2538static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2539{
2540 const struct nand_op_instr *instr;
2541 char *prefix = " ";
2542 unsigned int i;
2543
2544 pr_debug("executing subop:\n");
2545
2546 for (i = 0; i < ctx->ninstrs; i++) {
2547 instr = &ctx->instrs[i];
2548
2549 if (instr == &ctx->subop.instrs[0])
2550 prefix = " ->";
2551
2552 switch (instr->type) {
2553 case NAND_OP_CMD_INSTR:
2554 pr_debug("%sCMD [0x%02x]\n", prefix,
2555 instr->ctx.cmd.opcode);
2556 break;
2557 case NAND_OP_ADDR_INSTR:
2558 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
2559 instr->ctx.addr.naddrs,
2560 instr->ctx.addr.naddrs < 64 ?
2561 instr->ctx.addr.naddrs : 64,
2562 instr->ctx.addr.addrs);
2563 break;
2564 case NAND_OP_DATA_IN_INSTR:
2565 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
2566 instr->ctx.data.len,
2567 instr->ctx.data.force_8bit ?
2568 ", force 8-bit" : "");
2569 break;
2570 case NAND_OP_DATA_OUT_INSTR:
2571 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2572 instr->ctx.data.len,
2573 instr->ctx.data.force_8bit ?
2574 ", force 8-bit" : "");
2575 break;
2576 case NAND_OP_WAITRDY_INSTR:
2577 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
2578 instr->ctx.waitrdy.timeout_ms);
2579 break;
2580 }
2581
2582 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2583 prefix = " ";
2584 }
2585}
2586#else
2587static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2588{
2589
2590}
2591#endif
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615int nand_op_parser_exec_op(struct nand_chip *chip,
2616 const struct nand_op_parser *parser,
2617 const struct nand_operation *op, bool check_only)
2618{
2619 struct nand_op_parser_ctx ctx = {
2620 .subop.instrs = op->instrs,
2621 .instrs = op->instrs,
2622 .ninstrs = op->ninstrs,
2623 };
2624 unsigned int i;
2625
2626 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2627 int ret;
2628
2629 for (i = 0; i < parser->npatterns; i++) {
2630 const struct nand_op_parser_pattern *pattern;
2631
2632 pattern = &parser->patterns[i];
2633 if (!nand_op_parser_match_pat(pattern, &ctx))
2634 continue;
2635
2636 nand_op_parser_trace(&ctx);
2637
2638 if (check_only)
2639 break;
2640
2641 ret = pattern->exec(chip, &ctx.subop);
2642 if (ret)
2643 return ret;
2644
2645 break;
2646 }
2647
2648 if (i == parser->npatterns) {
2649 pr_debug("->exec_op() parser: pattern not found!\n");
2650 return -ENOTSUPP;
2651 }
2652
2653
2654
2655
2656
2657 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2658 if (ctx.subop.last_instr_end_off)
2659 ctx.subop.instrs -= 1;
2660
2661 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2662 }
2663
2664 return 0;
2665}
2666EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2667
2668static bool nand_instr_is_data(const struct nand_op_instr *instr)
2669{
2670 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2671 instr->type == NAND_OP_DATA_OUT_INSTR);
2672}
2673
2674static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2675 unsigned int instr_idx)
2676{
2677 return subop && instr_idx < subop->ninstrs;
2678}
2679
2680static int nand_subop_get_start_off(const struct nand_subop *subop,
2681 unsigned int instr_idx)
2682{
2683 if (instr_idx)
2684 return 0;
2685
2686 return subop->first_instr_start_off;
2687}
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2701 unsigned int instr_idx)
2702{
2703 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2704 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2705 return -EINVAL;
2706
2707 return nand_subop_get_start_off(subop, instr_idx);
2708}
2709EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2723 unsigned int instr_idx)
2724{
2725 int start_off, end_off;
2726
2727 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2728 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2729 return -EINVAL;
2730
2731 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2732
2733 if (instr_idx == subop->ninstrs - 1 &&
2734 subop->last_instr_end_off)
2735 end_off = subop->last_instr_end_off;
2736 else
2737 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2738
2739 return end_off - start_off;
2740}
2741EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754int nand_subop_get_data_start_off(const struct nand_subop *subop,
2755 unsigned int instr_idx)
2756{
2757 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2758 !nand_instr_is_data(&subop->instrs[instr_idx]))
2759 return -EINVAL;
2760
2761 return nand_subop_get_start_off(subop, instr_idx);
2762}
2763EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776int nand_subop_get_data_len(const struct nand_subop *subop,
2777 unsigned int instr_idx)
2778{
2779 int start_off = 0, end_off;
2780
2781 if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2782 !nand_instr_is_data(&subop->instrs[instr_idx]))
2783 return -EINVAL;
2784
2785 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2786
2787 if (instr_idx == subop->ninstrs - 1 &&
2788 subop->last_instr_end_off)
2789 end_off = subop->last_instr_end_off;
2790 else
2791 end_off = subop->instrs[instr_idx].ctx.data.len;
2792
2793 return end_off - start_off;
2794}
2795EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808int nand_reset(struct nand_chip *chip, int chipnr)
2809{
2810 struct mtd_info *mtd = nand_to_mtd(chip);
2811 struct nand_data_interface saved_data_intf = chip->data_interface;
2812 int ret;
2813
2814 ret = nand_reset_data_interface(chip, chipnr);
2815 if (ret)
2816 return ret;
2817
2818
2819
2820
2821
2822 chip->select_chip(mtd, chipnr);
2823 ret = nand_reset_op(chip);
2824 chip->select_chip(mtd, -1);
2825 if (ret)
2826 return ret;
2827
2828
2829
2830
2831
2832
2833
2834
2835 if (!chip->onfi_timing_mode_default)
2836 return 0;
2837
2838 chip->data_interface = saved_data_intf;
2839 ret = nand_setup_data_interface(chip, chipnr);
2840 if (ret)
2841 return ret;
2842
2843 return 0;
2844}
2845EXPORT_SYMBOL_GPL(nand_reset);
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2867{
2868 const unsigned char *bitmap = buf;
2869 int bitflips = 0;
2870 int weight;
2871
2872 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2873 len--, bitmap++) {
2874 weight = hweight8(*bitmap);
2875 bitflips += BITS_PER_BYTE - weight;
2876 if (unlikely(bitflips > bitflips_threshold))
2877 return -EBADMSG;
2878 }
2879
2880 for (; len >= sizeof(long);
2881 len -= sizeof(long), bitmap += sizeof(long)) {
2882 unsigned long d = *((unsigned long *)bitmap);
2883 if (d == ~0UL)
2884 continue;
2885 weight = hweight_long(d);
2886 bitflips += BITS_PER_LONG - weight;
2887 if (unlikely(bitflips > bitflips_threshold))
2888 return -EBADMSG;
2889 }
2890
2891 for (; len > 0; len--, bitmap++) {
2892 weight = hweight8(*bitmap);
2893 bitflips += BITS_PER_BYTE - weight;
2894 if (unlikely(bitflips > bitflips_threshold))
2895 return -EBADMSG;
2896 }
2897
2898 return bitflips;
2899}
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940int nand_check_erased_ecc_chunk(void *data, int datalen,
2941 void *ecc, int ecclen,
2942 void *extraoob, int extraooblen,
2943 int bitflips_threshold)
2944{
2945 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2946
2947 data_bitflips = nand_check_erased_buf(data, datalen,
2948 bitflips_threshold);
2949 if (data_bitflips < 0)
2950 return data_bitflips;
2951
2952 bitflips_threshold -= data_bitflips;
2953
2954 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2955 if (ecc_bitflips < 0)
2956 return ecc_bitflips;
2957
2958 bitflips_threshold -= ecc_bitflips;
2959
2960 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2961 bitflips_threshold);
2962 if (extraoob_bitflips < 0)
2963 return extraoob_bitflips;
2964
2965 if (data_bitflips)
2966 memset(data, 0xff, datalen);
2967
2968 if (ecc_bitflips)
2969 memset(ecc, 0xff, ecclen);
2970
2971 if (extraoob_bitflips)
2972 memset(extraoob, 0xff, extraooblen);
2973
2974 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2975}
2976EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2989 uint8_t *buf, int oob_required, int page)
2990{
2991 int ret;
2992
2993 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2994 if (ret)
2995 return ret;
2996
2997 if (oob_required) {
2998 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2999 false);
3000 if (ret)
3001 return ret;
3002 }
3003
3004 return 0;
3005}
3006EXPORT_SYMBOL(nand_read_page_raw);
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
3019 struct nand_chip *chip, uint8_t *buf,
3020 int oob_required, int page)
3021{
3022 int eccsize = chip->ecc.size;
3023 int eccbytes = chip->ecc.bytes;
3024 uint8_t *oob = chip->oob_poi;
3025 int steps, size, ret;
3026
3027 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3028 if (ret)
3029 return ret;
3030
3031 for (steps = chip->ecc.steps; steps > 0; steps--) {
3032 ret = nand_read_data_op(chip, buf, eccsize, false);
3033 if (ret)
3034 return ret;
3035
3036 buf += eccsize;
3037
3038 if (chip->ecc.prepad) {
3039 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3040 false);
3041 if (ret)
3042 return ret;
3043
3044 oob += chip->ecc.prepad;
3045 }
3046
3047 ret = nand_read_data_op(chip, oob, eccbytes, false);
3048 if (ret)
3049 return ret;
3050
3051 oob += eccbytes;
3052
3053 if (chip->ecc.postpad) {
3054 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3055 false);
3056 if (ret)
3057 return ret;
3058
3059 oob += chip->ecc.postpad;
3060 }
3061 }
3062
3063 size = mtd->oobsize - (oob - chip->oob_poi);
3064 if (size) {
3065 ret = nand_read_data_op(chip, oob, size, false);
3066 if (ret)
3067 return ret;
3068 }
3069
3070 return 0;
3071}
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
3082 uint8_t *buf, int oob_required, int page)
3083{
3084 int i, eccsize = chip->ecc.size, ret;
3085 int eccbytes = chip->ecc.bytes;
3086 int eccsteps = chip->ecc.steps;
3087 uint8_t *p = buf;
3088 uint8_t *ecc_calc = chip->ecc.calc_buf;
3089 uint8_t *ecc_code = chip->ecc.code_buf;
3090 unsigned int max_bitflips = 0;
3091
3092 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
3093
3094 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3095 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3096
3097 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3098 chip->ecc.total);
3099 if (ret)
3100 return ret;
3101
3102 eccsteps = chip->ecc.steps;
3103 p = buf;
3104
3105 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3106 int stat;
3107
3108 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
3109 if (stat < 0) {
3110 mtd->ecc_stats.failed++;
3111 } else {
3112 mtd->ecc_stats.corrected += stat;
3113 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3114 }
3115 }
3116 return max_bitflips;
3117}
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
3129 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
3130 int page)
3131{
3132 int start_step, end_step, num_steps, ret;
3133 uint8_t *p;
3134 int data_col_addr, i, gaps = 0;
3135 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3136 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3137 int index, section = 0;
3138 unsigned int max_bitflips = 0;
3139 struct mtd_oob_region oobregion = { };
3140
3141
3142 start_step = data_offs / chip->ecc.size;
3143 end_step = (data_offs + readlen - 1) / chip->ecc.size;
3144 num_steps = end_step - start_step + 1;
3145 index = start_step * chip->ecc.bytes;
3146
3147
3148 datafrag_len = num_steps * chip->ecc.size;
3149 eccfrag_len = num_steps * chip->ecc.bytes;
3150
3151 data_col_addr = start_step * chip->ecc.size;
3152
3153 p = bufpoi + data_col_addr;
3154 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3155 if (ret)
3156 return ret;
3157
3158
3159 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3160 chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
3161
3162
3163
3164
3165
3166 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
3167 if (ret)
3168 return ret;
3169
3170 if (oobregion.length < eccfrag_len)
3171 gaps = 1;
3172
3173 if (gaps) {
3174 ret = nand_change_read_column_op(chip, mtd->writesize,
3175 chip->oob_poi, mtd->oobsize,
3176 false);
3177 if (ret)
3178 return ret;
3179 } else {
3180
3181
3182
3183
3184 aligned_pos = oobregion.offset & ~(busw - 1);
3185 aligned_len = eccfrag_len;
3186 if (oobregion.offset & (busw - 1))
3187 aligned_len++;
3188 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3189 (busw - 1))
3190 aligned_len++;
3191
3192 ret = nand_change_read_column_op(chip,
3193 mtd->writesize + aligned_pos,
3194 &chip->oob_poi[aligned_pos],
3195 aligned_len, false);
3196 if (ret)
3197 return ret;
3198 }
3199
3200 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3201 chip->oob_poi, index, eccfrag_len);
3202 if (ret)
3203 return ret;
3204
3205 p = bufpoi + data_col_addr;
3206 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3207 int stat;
3208
3209 stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
3210 &chip->ecc.calc_buf[i]);
3211 if (stat == -EBADMSG &&
3212 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3213
3214 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3215 &chip->ecc.code_buf[i],
3216 chip->ecc.bytes,
3217 NULL, 0,
3218 chip->ecc.strength);
3219 }
3220
3221 if (stat < 0) {
3222 mtd->ecc_stats.failed++;
3223 } else {
3224 mtd->ecc_stats.corrected += stat;
3225 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3226 }
3227 }
3228 return max_bitflips;
3229}
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
3242 uint8_t *buf, int oob_required, int page)
3243{
3244 int i, eccsize = chip->ecc.size, ret;
3245 int eccbytes = chip->ecc.bytes;
3246 int eccsteps = chip->ecc.steps;
3247 uint8_t *p = buf;
3248 uint8_t *ecc_calc = chip->ecc.calc_buf;
3249 uint8_t *ecc_code = chip->ecc.code_buf;
3250 unsigned int max_bitflips = 0;
3251
3252 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3253 if (ret)
3254 return ret;
3255
3256 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3257 chip->ecc.hwctl(mtd, NAND_ECC_READ);
3258
3259 ret = nand_read_data_op(chip, p, eccsize, false);
3260 if (ret)
3261 return ret;
3262
3263 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3264 }
3265
3266 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3267 if (ret)
3268 return ret;
3269
3270 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3271 chip->ecc.total);
3272 if (ret)
3273 return ret;
3274
3275 eccsteps = chip->ecc.steps;
3276 p = buf;
3277
3278 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3279 int stat;
3280
3281 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
3282 if (stat == -EBADMSG &&
3283 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3284
3285 stat = nand_check_erased_ecc_chunk(p, eccsize,
3286 &ecc_code[i], eccbytes,
3287 NULL, 0,
3288 chip->ecc.strength);
3289 }
3290
3291 if (stat < 0) {
3292 mtd->ecc_stats.failed++;
3293 } else {
3294 mtd->ecc_stats.corrected += stat;
3295 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3296 }
3297 }
3298 return max_bitflips;
3299}
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
3316 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
3317{
3318 int i, eccsize = chip->ecc.size, ret;
3319 int eccbytes = chip->ecc.bytes;
3320 int eccsteps = chip->ecc.steps;
3321 uint8_t *p = buf;
3322 uint8_t *ecc_code = chip->ecc.code_buf;
3323 uint8_t *ecc_calc = chip->ecc.calc_buf;
3324 unsigned int max_bitflips = 0;
3325
3326
3327 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3328 if (ret)
3329 return ret;
3330
3331 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3332 if (ret)
3333 return ret;
3334
3335 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3336 chip->ecc.total);
3337 if (ret)
3338 return ret;
3339
3340 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3341 int stat;
3342
3343 chip->ecc.hwctl(mtd, NAND_ECC_READ);
3344
3345 ret = nand_read_data_op(chip, p, eccsize, false);
3346 if (ret)
3347 return ret;
3348
3349 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
3350
3351 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
3352 if (stat == -EBADMSG &&
3353 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3354
3355 stat = nand_check_erased_ecc_chunk(p, eccsize,
3356 &ecc_code[i], eccbytes,
3357 NULL, 0,
3358 chip->ecc.strength);
3359 }
3360
3361 if (stat < 0) {
3362 mtd->ecc_stats.failed++;
3363 } else {
3364 mtd->ecc_stats.corrected += stat;
3365 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3366 }
3367 }
3368 return max_bitflips;
3369}
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3383 uint8_t *buf, int oob_required, int page)
3384{
3385 int ret, i, eccsize = chip->ecc.size;
3386 int eccbytes = chip->ecc.bytes;
3387 int eccsteps = chip->ecc.steps;
3388 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3389 uint8_t *p = buf;
3390 uint8_t *oob = chip->oob_poi;
3391 unsigned int max_bitflips = 0;
3392
3393 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3394 if (ret)
3395 return ret;
3396
3397 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3398 int stat;
3399
3400 chip->ecc.hwctl(mtd, NAND_ECC_READ);
3401
3402 ret = nand_read_data_op(chip, p, eccsize, false);
3403 if (ret)
3404 return ret;
3405
3406 if (chip->ecc.prepad) {
3407 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3408 false);
3409 if (ret)
3410 return ret;
3411
3412 oob += chip->ecc.prepad;
3413 }
3414
3415 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
3416
3417 ret = nand_read_data_op(chip, oob, eccbytes, false);
3418 if (ret)
3419 return ret;
3420
3421 stat = chip->ecc.correct(mtd, p, oob, NULL);
3422
3423 oob += eccbytes;
3424
3425 if (chip->ecc.postpad) {
3426 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3427 false);
3428 if (ret)
3429 return ret;
3430
3431 oob += chip->ecc.postpad;
3432 }
3433
3434 if (stat == -EBADMSG &&
3435 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3436
3437 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3438 oob - eccpadbytes,
3439 eccpadbytes,
3440 NULL, 0,
3441 chip->ecc.strength);
3442 }
3443
3444 if (stat < 0) {
3445 mtd->ecc_stats.failed++;
3446 } else {
3447 mtd->ecc_stats.corrected += stat;
3448 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3449 }
3450 }
3451
3452
3453 i = mtd->oobsize - (oob - chip->oob_poi);
3454 if (i) {
3455 ret = nand_read_data_op(chip, oob, i, false);
3456 if (ret)
3457 return ret;
3458 }
3459
3460 return max_bitflips;
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
3471 struct mtd_oob_ops *ops, size_t len)
3472{
3473 struct nand_chip *chip = mtd_to_nand(mtd);
3474 int ret;
3475
3476 switch (ops->mode) {
3477
3478 case MTD_OPS_PLACE_OOB:
3479 case MTD_OPS_RAW:
3480 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3481 return oob + len;
3482
3483 case MTD_OPS_AUTO_OOB:
3484 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3485 ops->ooboffs, len);
3486 BUG_ON(ret);
3487 return oob + len;
3488
3489 default:
3490 BUG();
3491 }
3492 return NULL;
3493}
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
3505{
3506 struct nand_chip *chip = mtd_to_nand(mtd);
3507
3508 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3509
3510 if (retry_mode >= chip->read_retries)
3511 return -EINVAL;
3512
3513 if (!chip->setup_read_retry)
3514 return -EOPNOTSUPP;
3515
3516 return chip->setup_read_retry(mtd, retry_mode);
3517}
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
3528 struct mtd_oob_ops *ops)
3529{
3530 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3531 struct nand_chip *chip = mtd_to_nand(mtd);
3532 int ret = 0;
3533 uint32_t readlen = ops->len;
3534 uint32_t oobreadlen = ops->ooblen;
3535 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3536
3537 uint8_t *bufpoi, *oob, *buf;
3538 int use_bufpoi;
3539 unsigned int max_bitflips = 0;
3540 int retry_mode = 0;
3541 bool ecc_fail = false;
3542
3543 chipnr = (int)(from >> chip->chip_shift);
3544 chip->select_chip(mtd, chipnr);
3545
3546 realpage = (int)(from >> chip->page_shift);
3547 page = realpage & chip->pagemask;
3548
3549 col = (int)(from & (mtd->writesize - 1));
3550
3551 buf = ops->datbuf;
3552 oob = ops->oobbuf;
3553 oob_required = oob ? 1 : 0;
3554
3555 while (1) {
3556 unsigned int ecc_failures = mtd->ecc_stats.failed;
3557
3558 bytes = min(mtd->writesize - col, readlen);
3559 aligned = (bytes == mtd->writesize);
3560
3561 if (!aligned)
3562 use_bufpoi = 1;
3563 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3564 use_bufpoi = !virt_addr_valid(buf) ||
3565 !IS_ALIGNED((unsigned long)buf,
3566 chip->buf_align);
3567 else
3568 use_bufpoi = 0;
3569
3570
3571 if (realpage != chip->pagebuf || oob) {
3572 bufpoi = use_bufpoi ? chip->data_buf : buf;
3573
3574 if (use_bufpoi && aligned)
3575 pr_debug("%s: using read bounce buffer for buf@%p\n",
3576 __func__, buf);
3577
3578read_retry:
3579
3580
3581
3582
3583 if (unlikely(ops->mode == MTD_OPS_RAW))
3584 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
3585 oob_required,
3586 page);
3587 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3588 !oob)
3589 ret = chip->ecc.read_subpage(mtd, chip,
3590 col, bytes, bufpoi,
3591 page);
3592 else
3593 ret = chip->ecc.read_page(mtd, chip, bufpoi,
3594 oob_required, page);
3595 if (ret < 0) {
3596 if (use_bufpoi)
3597
3598 chip->pagebuf = -1;
3599 break;
3600 }
3601
3602
3603 if (use_bufpoi) {
3604 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3605 !(mtd->ecc_stats.failed - ecc_failures) &&
3606 (ops->mode != MTD_OPS_RAW)) {
3607 chip->pagebuf = realpage;
3608 chip->pagebuf_bitflips = ret;
3609 } else {
3610
3611 chip->pagebuf = -1;
3612 }
3613 memcpy(buf, chip->data_buf + col, bytes);
3614 }
3615
3616 if (unlikely(oob)) {
3617 int toread = min(oobreadlen, max_oobsize);
3618
3619 if (toread) {
3620 oob = nand_transfer_oob(mtd,
3621 oob, ops, toread);
3622 oobreadlen -= toread;
3623 }
3624 }
3625
3626 if (chip->options & NAND_NEED_READRDY) {
3627
3628 if (!chip->dev_ready)
3629 udelay(chip->chip_delay);
3630 else
3631 nand_wait_ready(mtd);
3632 }
3633
3634 if (mtd->ecc_stats.failed - ecc_failures) {
3635 if (retry_mode + 1 < chip->read_retries) {
3636 retry_mode++;
3637 ret = nand_setup_read_retry(mtd,
3638 retry_mode);
3639 if (ret < 0)
3640 break;
3641
3642
3643 mtd->ecc_stats.failed = ecc_failures;
3644 goto read_retry;
3645 } else {
3646
3647 ecc_fail = true;
3648 }
3649 }
3650
3651 buf += bytes;
3652 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3653 } else {
3654 memcpy(buf, chip->data_buf + col, bytes);
3655 buf += bytes;
3656 max_bitflips = max_t(unsigned int, max_bitflips,
3657 chip->pagebuf_bitflips);
3658 }
3659
3660 readlen -= bytes;
3661
3662
3663 if (retry_mode) {
3664 ret = nand_setup_read_retry(mtd, 0);
3665 if (ret < 0)
3666 break;
3667 retry_mode = 0;
3668 }
3669
3670 if (!readlen)
3671 break;
3672
3673
3674 col = 0;
3675
3676 realpage++;
3677
3678 page = realpage & chip->pagemask;
3679
3680 if (!page) {
3681 chipnr++;
3682 chip->select_chip(mtd, -1);
3683 chip->select_chip(mtd, chipnr);
3684 }
3685 }
3686 chip->select_chip(mtd, -1);
3687
3688 ops->retlen = ops->len - (size_t) readlen;
3689 if (oob)
3690 ops->oobretlen = ops->ooblen - oobreadlen;
3691
3692 if (ret < 0)
3693 return ret;
3694
3695 if (ecc_fail)
3696 return -EBADMSG;
3697
3698 return max_bitflips;
3699}
3700
3701
3702
3703
3704
3705
3706
3707int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
3708{
3709 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3710}
3711EXPORT_SYMBOL(nand_read_oob_std);
3712
3713
3714
3715
3716
3717
3718
3719
3720int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3721 int page)
3722{
3723 int length = mtd->oobsize;
3724 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3725 int eccsize = chip->ecc.size;
3726 uint8_t *bufpoi = chip->oob_poi;
3727 int i, toread, sndrnd = 0, pos, ret;
3728
3729 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3730 if (ret)
3731 return ret;
3732
3733 for (i = 0; i < chip->ecc.steps; i++) {
3734 if (sndrnd) {
3735 int ret;
3736
3737 pos = eccsize + i * (eccsize + chunk);
3738 if (mtd->writesize > 512)
3739 ret = nand_change_read_column_op(chip, pos,
3740 NULL, 0,
3741 false);
3742 else
3743 ret = nand_read_page_op(chip, page, pos, NULL,
3744 0);
3745
3746 if (ret)
3747 return ret;
3748 } else
3749 sndrnd = 1;
3750 toread = min_t(int, length, chunk);
3751
3752 ret = nand_read_data_op(chip, bufpoi, toread, false);
3753 if (ret)
3754 return ret;
3755
3756 bufpoi += toread;
3757 length -= toread;
3758 }
3759 if (length > 0) {
3760 ret = nand_read_data_op(chip, bufpoi, length, false);
3761 if (ret)
3762 return ret;
3763 }
3764
3765 return 0;
3766}
3767EXPORT_SYMBOL(nand_read_oob_syndrome);
3768
3769
3770
3771
3772
3773
3774
3775int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
3776{
3777 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3778 mtd->oobsize);
3779}
3780EXPORT_SYMBOL(nand_write_oob_std);
3781
3782
3783
3784
3785
3786
3787
3788
3789int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
3790 int page)
3791{
3792 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3793 int eccsize = chip->ecc.size, length = mtd->oobsize;
3794 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3795 const uint8_t *bufpoi = chip->oob_poi;
3796
3797
3798
3799
3800
3801
3802 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3803 pos = steps * (eccsize + chunk);
3804 steps = 0;
3805 } else
3806 pos = eccsize;
3807
3808 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3809 if (ret)
3810 return ret;
3811
3812 for (i = 0; i < steps; i++) {
3813 if (sndcmd) {
3814 if (mtd->writesize <= 512) {
3815 uint32_t fill = 0xFFFFFFFF;
3816
3817 len = eccsize;
3818 while (len > 0) {
3819 int num = min_t(int, len, 4);
3820
3821 ret = nand_write_data_op(chip, &fill,
3822 num, false);
3823 if (ret)
3824 return ret;
3825
3826 len -= num;
3827 }
3828 } else {
3829 pos = eccsize + i * (eccsize + chunk);
3830 ret = nand_change_write_column_op(chip, pos,
3831 NULL, 0,
3832 false);
3833 if (ret)
3834 return ret;
3835 }
3836 } else
3837 sndcmd = 1;
3838 len = min_t(int, length, chunk);
3839
3840 ret = nand_write_data_op(chip, bufpoi, len, false);
3841 if (ret)
3842 return ret;
3843
3844 bufpoi += len;
3845 length -= len;
3846 }
3847 if (length > 0) {
3848 ret = nand_write_data_op(chip, bufpoi, length, false);
3849 if (ret)
3850 return ret;
3851 }
3852
3853 return nand_prog_page_end_op(chip);
3854}
3855EXPORT_SYMBOL(nand_write_oob_syndrome);
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
3866 struct mtd_oob_ops *ops)
3867{
3868 unsigned int max_bitflips = 0;
3869 int page, realpage, chipnr;
3870 struct nand_chip *chip = mtd_to_nand(mtd);
3871 struct mtd_ecc_stats stats;
3872 int readlen = ops->ooblen;
3873 int len;
3874 uint8_t *buf = ops->oobbuf;
3875 int ret = 0;
3876
3877 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3878 __func__, (unsigned long long)from, readlen);
3879
3880 stats = mtd->ecc_stats;
3881
3882 len = mtd_oobavail(mtd, ops);
3883
3884 chipnr = (int)(from >> chip->chip_shift);
3885 chip->select_chip(mtd, chipnr);
3886
3887
3888 realpage = (int)(from >> chip->page_shift);
3889 page = realpage & chip->pagemask;
3890
3891 while (1) {
3892 if (ops->mode == MTD_OPS_RAW)
3893 ret = chip->ecc.read_oob_raw(mtd, chip, page);
3894 else
3895 ret = chip->ecc.read_oob(mtd, chip, page);
3896
3897 if (ret < 0)
3898 break;
3899
3900 len = min(len, readlen);
3901 buf = nand_transfer_oob(mtd, buf, ops, len);
3902
3903 if (chip->options & NAND_NEED_READRDY) {
3904
3905 if (!chip->dev_ready)
3906 udelay(chip->chip_delay);
3907 else
3908 nand_wait_ready(mtd);
3909 }
3910
3911 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3912
3913 readlen -= len;
3914 if (!readlen)
3915 break;
3916
3917
3918 realpage++;
3919
3920 page = realpage & chip->pagemask;
3921
3922 if (!page) {
3923 chipnr++;
3924 chip->select_chip(mtd, -1);
3925 chip->select_chip(mtd, chipnr);
3926 }
3927 }
3928 chip->select_chip(mtd, -1);
3929
3930 ops->oobretlen = ops->ooblen - readlen;
3931
3932 if (ret < 0)
3933 return ret;
3934
3935 if (mtd->ecc_stats.failed - stats.failed)
3936 return -EBADMSG;
3937
3938 return max_bitflips;
3939}
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3950 struct mtd_oob_ops *ops)
3951{
3952 int ret;
3953
3954 ops->retlen = 0;
3955
3956 if (ops->mode != MTD_OPS_PLACE_OOB &&
3957 ops->mode != MTD_OPS_AUTO_OOB &&
3958 ops->mode != MTD_OPS_RAW)
3959 return -ENOTSUPP;
3960
3961 nand_get_device(mtd, FL_READING);
3962
3963 if (!ops->datbuf)
3964 ret = nand_do_read_oob(mtd, from, ops);
3965 else
3966 ret = nand_do_read_ops(mtd, from, ops);
3967
3968 nand_release_device(mtd);
3969 return ret;
3970}
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
3984 const uint8_t *buf, int oob_required, int page)
3985{
3986 int ret;
3987
3988 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3989 if (ret)
3990 return ret;
3991
3992 if (oob_required) {
3993 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3994 false);
3995 if (ret)
3996 return ret;
3997 }
3998
3999 return nand_prog_page_end_op(chip);
4000}
4001EXPORT_SYMBOL(nand_write_page_raw);
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
4014 struct nand_chip *chip,
4015 const uint8_t *buf, int oob_required,
4016 int page)
4017{
4018 int eccsize = chip->ecc.size;
4019 int eccbytes = chip->ecc.bytes;
4020 uint8_t *oob = chip->oob_poi;
4021 int steps, size, ret;
4022
4023 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4024 if (ret)
4025 return ret;
4026
4027 for (steps = chip->ecc.steps; steps > 0; steps--) {
4028 ret = nand_write_data_op(chip, buf, eccsize, false);
4029 if (ret)
4030 return ret;
4031
4032 buf += eccsize;
4033
4034 if (chip->ecc.prepad) {
4035 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4036 false);
4037 if (ret)
4038 return ret;
4039
4040 oob += chip->ecc.prepad;
4041 }
4042
4043 ret = nand_write_data_op(chip, oob, eccbytes, false);
4044 if (ret)
4045 return ret;
4046
4047 oob += eccbytes;
4048
4049 if (chip->ecc.postpad) {
4050 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4051 false);
4052 if (ret)
4053 return ret;
4054
4055 oob += chip->ecc.postpad;
4056 }
4057 }
4058
4059 size = mtd->oobsize - (oob - chip->oob_poi);
4060 if (size) {
4061 ret = nand_write_data_op(chip, oob, size, false);
4062 if (ret)
4063 return ret;
4064 }
4065
4066 return nand_prog_page_end_op(chip);
4067}
4068
4069
4070
4071
4072
4073
4074
4075
4076static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
4077 const uint8_t *buf, int oob_required,
4078 int page)
4079{
4080 int i, eccsize = chip->ecc.size, ret;
4081 int eccbytes = chip->ecc.bytes;
4082 int eccsteps = chip->ecc.steps;
4083 uint8_t *ecc_calc = chip->ecc.calc_buf;
4084 const uint8_t *p = buf;
4085
4086
4087 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4088 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
4089
4090 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4091 chip->ecc.total);
4092 if (ret)
4093 return ret;
4094
4095 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
4096}
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
4107 const uint8_t *buf, int oob_required,
4108 int page)
4109{
4110 int i, eccsize = chip->ecc.size, ret;
4111 int eccbytes = chip->ecc.bytes;
4112 int eccsteps = chip->ecc.steps;
4113 uint8_t *ecc_calc = chip->ecc.calc_buf;
4114 const uint8_t *p = buf;
4115
4116 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4117 if (ret)
4118 return ret;
4119
4120 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4121 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4122
4123 ret = nand_write_data_op(chip, p, eccsize, false);
4124 if (ret)
4125 return ret;
4126
4127 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
4128 }
4129
4130 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4131 chip->ecc.total);
4132 if (ret)
4133 return ret;
4134
4135 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4136 if (ret)
4137 return ret;
4138
4139 return nand_prog_page_end_op(chip);
4140}
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153static int nand_write_subpage_hwecc(struct mtd_info *mtd,
4154 struct nand_chip *chip, uint32_t offset,
4155 uint32_t data_len, const uint8_t *buf,
4156 int oob_required, int page)
4157{
4158 uint8_t *oob_buf = chip->oob_poi;
4159 uint8_t *ecc_calc = chip->ecc.calc_buf;
4160 int ecc_size = chip->ecc.size;
4161 int ecc_bytes = chip->ecc.bytes;
4162 int ecc_steps = chip->ecc.steps;
4163 uint32_t start_step = offset / ecc_size;
4164 uint32_t end_step = (offset + data_len - 1) / ecc_size;
4165 int oob_bytes = mtd->oobsize / ecc_steps;
4166 int step, ret;
4167
4168 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4169 if (ret)
4170 return ret;
4171
4172 for (step = 0; step < ecc_steps; step++) {
4173
4174 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4175
4176
4177 ret = nand_write_data_op(chip, buf, ecc_size, false);
4178 if (ret)
4179 return ret;
4180
4181
4182 if ((step < start_step) || (step > end_step))
4183 memset(ecc_calc, 0xff, ecc_bytes);
4184 else
4185 chip->ecc.calculate(mtd, buf, ecc_calc);
4186
4187
4188
4189 if (!oob_required || (step < start_step) || (step > end_step))
4190 memset(oob_buf, 0xff, oob_bytes);
4191
4192 buf += ecc_size;
4193 ecc_calc += ecc_bytes;
4194 oob_buf += oob_bytes;
4195 }
4196
4197
4198
4199 ecc_calc = chip->ecc.calc_buf;
4200 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4201 chip->ecc.total);
4202 if (ret)
4203 return ret;
4204
4205
4206 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4207 if (ret)
4208 return ret;
4209
4210 return nand_prog_page_end_op(chip);
4211}
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225static int nand_write_page_syndrome(struct mtd_info *mtd,
4226 struct nand_chip *chip,
4227 const uint8_t *buf, int oob_required,
4228 int page)
4229{
4230 int i, eccsize = chip->ecc.size;
4231 int eccbytes = chip->ecc.bytes;
4232 int eccsteps = chip->ecc.steps;
4233 const uint8_t *p = buf;
4234 uint8_t *oob = chip->oob_poi;
4235 int ret;
4236
4237 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4238 if (ret)
4239 return ret;
4240
4241 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4242 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
4243
4244 ret = nand_write_data_op(chip, p, eccsize, false);
4245 if (ret)
4246 return ret;
4247
4248 if (chip->ecc.prepad) {
4249 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4250 false);
4251 if (ret)
4252 return ret;
4253
4254 oob += chip->ecc.prepad;
4255 }
4256
4257 chip->ecc.calculate(mtd, p, oob);
4258
4259 ret = nand_write_data_op(chip, oob, eccbytes, false);
4260 if (ret)
4261 return ret;
4262
4263 oob += eccbytes;
4264
4265 if (chip->ecc.postpad) {
4266 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4267 false);
4268 if (ret)
4269 return ret;
4270
4271 oob += chip->ecc.postpad;
4272 }
4273 }
4274
4275
4276 i = mtd->oobsize - (oob - chip->oob_poi);
4277 if (i) {
4278 ret = nand_write_data_op(chip, oob, i, false);
4279 if (ret)
4280 return ret;
4281 }
4282
4283 return nand_prog_page_end_op(chip);
4284}
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
4298 uint32_t offset, int data_len, const uint8_t *buf,
4299 int oob_required, int page, int raw)
4300{
4301 int status, subpage;
4302
4303 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4304 chip->ecc.write_subpage)
4305 subpage = offset || (data_len < mtd->writesize);
4306 else
4307 subpage = 0;
4308
4309 if (unlikely(raw))
4310 status = chip->ecc.write_page_raw(mtd, chip, buf,
4311 oob_required, page);
4312 else if (subpage)
4313 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
4314 buf, oob_required, page);
4315 else
4316 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
4317 page);
4318
4319 if (status < 0)
4320 return status;
4321
4322 return 0;
4323}
4324
4325
4326
4327
4328
4329
4330
4331
4332static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
4333 struct mtd_oob_ops *ops)
4334{
4335 struct nand_chip *chip = mtd_to_nand(mtd);
4336 int ret;
4337
4338
4339
4340
4341
4342 memset(chip->oob_poi, 0xff, mtd->oobsize);
4343
4344 switch (ops->mode) {
4345
4346 case MTD_OPS_PLACE_OOB:
4347 case MTD_OPS_RAW:
4348 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
4349 return oob + len;
4350
4351 case MTD_OPS_AUTO_OOB:
4352 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
4353 ops->ooboffs, len);
4354 BUG_ON(ret);
4355 return oob + len;
4356
4357 default:
4358 BUG();
4359 }
4360 return NULL;
4361}
4362
4363#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
4374 struct mtd_oob_ops *ops)
4375{
4376 int chipnr, realpage, page, column;
4377 struct nand_chip *chip = mtd_to_nand(mtd);
4378 uint32_t writelen = ops->len;
4379
4380 uint32_t oobwritelen = ops->ooblen;
4381 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4382
4383 uint8_t *oob = ops->oobbuf;
4384 uint8_t *buf = ops->datbuf;
4385 int ret;
4386 int oob_required = oob ? 1 : 0;
4387
4388 ops->retlen = 0;
4389 if (!writelen)
4390 return 0;
4391
4392
4393 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4394 pr_notice("%s: attempt to write non page aligned data\n",
4395 __func__);
4396 return -EINVAL;
4397 }
4398
4399 column = to & (mtd->writesize - 1);
4400
4401 chipnr = (int)(to >> chip->chip_shift);
4402 chip->select_chip(mtd, chipnr);
4403
4404
4405 if (nand_check_wp(mtd)) {
4406 ret = -EIO;
4407 goto err_out;
4408 }
4409
4410 realpage = (int)(to >> chip->page_shift);
4411 page = realpage & chip->pagemask;
4412
4413
4414 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
4415 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
4416 chip->pagebuf = -1;
4417
4418
4419 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4420 ret = -EINVAL;
4421 goto err_out;
4422 }
4423
4424 while (1) {
4425 int bytes = mtd->writesize;
4426 uint8_t *wbuf = buf;
4427 int use_bufpoi;
4428 int part_pagewr = (column || writelen < mtd->writesize);
4429
4430 if (part_pagewr)
4431 use_bufpoi = 1;
4432 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4433 use_bufpoi = !virt_addr_valid(buf) ||
4434 !IS_ALIGNED((unsigned long)buf,
4435 chip->buf_align);
4436 else
4437 use_bufpoi = 0;
4438
4439
4440 if (use_bufpoi) {
4441 pr_debug("%s: using write bounce buffer for buf@%p\n",
4442 __func__, buf);
4443 if (part_pagewr)
4444 bytes = min_t(int, bytes - column, writelen);
4445 chip->pagebuf = -1;
4446 memset(chip->data_buf, 0xff, mtd->writesize);
4447 memcpy(&chip->data_buf[column], buf, bytes);
4448 wbuf = chip->data_buf;
4449 }
4450
4451 if (unlikely(oob)) {
4452 size_t len = min(oobwritelen, oobmaxlen);
4453 oob = nand_fill_oob(mtd, oob, len, ops);
4454 oobwritelen -= len;
4455 } else {
4456
4457 memset(chip->oob_poi, 0xff, mtd->oobsize);
4458 }
4459
4460 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
4461 oob_required, page,
4462 (ops->mode == MTD_OPS_RAW));
4463 if (ret)
4464 break;
4465
4466 writelen -= bytes;
4467 if (!writelen)
4468 break;
4469
4470 column = 0;
4471 buf += bytes;
4472 realpage++;
4473
4474 page = realpage & chip->pagemask;
4475
4476 if (!page) {
4477 chipnr++;
4478 chip->select_chip(mtd, -1);
4479 chip->select_chip(mtd, chipnr);
4480 }
4481 }
4482
4483 ops->retlen = ops->len - writelen;
4484 if (unlikely(oob))
4485 ops->oobretlen = ops->ooblen;
4486
4487err_out:
4488 chip->select_chip(mtd, -1);
4489 return ret;
4490}
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4504 size_t *retlen, const uint8_t *buf)
4505{
4506 struct nand_chip *chip = mtd_to_nand(mtd);
4507 int chipnr = (int)(to >> chip->chip_shift);
4508 struct mtd_oob_ops ops;
4509 int ret;
4510
4511
4512 panic_nand_get_device(chip, mtd, FL_WRITING);
4513
4514 chip->select_chip(mtd, chipnr);
4515
4516
4517 panic_nand_wait(mtd, chip, 400);
4518
4519 memset(&ops, 0, sizeof(ops));
4520 ops.len = len;
4521 ops.datbuf = (uint8_t *)buf;
4522 ops.mode = MTD_OPS_PLACE_OOB;
4523
4524 ret = nand_do_write_ops(mtd, to, &ops);
4525
4526 *retlen = ops.retlen;
4527 return ret;
4528}
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4539 struct mtd_oob_ops *ops)
4540{
4541 int chipnr, page, status, len;
4542 struct nand_chip *chip = mtd_to_nand(mtd);
4543
4544 pr_debug("%s: to = 0x%08x, len = %i\n",
4545 __func__, (unsigned int)to, (int)ops->ooblen);
4546
4547 len = mtd_oobavail(mtd, ops);
4548
4549
4550 if ((ops->ooboffs + ops->ooblen) > len) {
4551 pr_debug("%s: attempt to write past end of page\n",
4552 __func__);
4553 return -EINVAL;
4554 }
4555
4556 chipnr = (int)(to >> chip->chip_shift);
4557
4558
4559
4560
4561
4562
4563
4564 nand_reset(chip, chipnr);
4565
4566 chip->select_chip(mtd, chipnr);
4567
4568
4569 page = (int)(to >> chip->page_shift);
4570
4571
4572 if (nand_check_wp(mtd)) {
4573 chip->select_chip(mtd, -1);
4574 return -EROFS;
4575 }
4576
4577
4578 if (page == chip->pagebuf)
4579 chip->pagebuf = -1;
4580
4581 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
4582
4583 if (ops->mode == MTD_OPS_RAW)
4584 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
4585 else
4586 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
4587
4588 chip->select_chip(mtd, -1);
4589
4590 if (status)
4591 return status;
4592
4593 ops->oobretlen = ops->ooblen;
4594
4595 return 0;
4596}
4597
4598
4599
4600
4601
4602
4603
4604static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4605 struct mtd_oob_ops *ops)
4606{
4607 int ret = -ENOTSUPP;
4608
4609 ops->retlen = 0;
4610
4611 nand_get_device(mtd, FL_WRITING);
4612
4613 switch (ops->mode) {
4614 case MTD_OPS_PLACE_OOB:
4615 case MTD_OPS_AUTO_OOB:
4616 case MTD_OPS_RAW:
4617 break;
4618
4619 default:
4620 goto out;
4621 }
4622
4623 if (!ops->datbuf)
4624 ret = nand_do_write_oob(mtd, to, ops);
4625 else
4626 ret = nand_do_write_ops(mtd, to, ops);
4627
4628out:
4629 nand_release_device(mtd);
4630 return ret;
4631}
4632
4633
4634
4635
4636
4637
4638
4639
4640static int single_erase(struct mtd_info *mtd, int page)
4641{
4642 struct nand_chip *chip = mtd_to_nand(mtd);
4643 unsigned int eraseblock;
4644
4645
4646 eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
4647
4648 return nand_erase_op(chip, eraseblock);
4649}
4650
4651
4652
4653
4654
4655
4656
4657
4658static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4659{
4660 return nand_erase_nand(mtd, instr, 0);
4661}
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
4672 int allowbbt)
4673{
4674 int page, status, pages_per_block, ret, chipnr;
4675 struct nand_chip *chip = mtd_to_nand(mtd);
4676 loff_t len;
4677
4678 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4679 __func__, (unsigned long long)instr->addr,
4680 (unsigned long long)instr->len);
4681
4682 if (check_offs_len(mtd, instr->addr, instr->len))
4683 return -EINVAL;
4684
4685
4686 nand_get_device(mtd, FL_ERASING);
4687
4688
4689 page = (int)(instr->addr >> chip->page_shift);
4690 chipnr = (int)(instr->addr >> chip->chip_shift);
4691
4692
4693 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4694
4695
4696 chip->select_chip(mtd, chipnr);
4697
4698
4699 if (nand_check_wp(mtd)) {
4700 pr_debug("%s: device is write protected!\n",
4701 __func__);
4702 ret = -EIO;
4703 goto erase_exit;
4704 }
4705
4706
4707 len = instr->len;
4708
4709 while (len) {
4710
4711 if (nand_block_checkbad(mtd, ((loff_t) page) <<
4712 chip->page_shift, allowbbt)) {
4713 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4714 __func__, page);
4715 ret = -EIO;
4716 goto erase_exit;
4717 }
4718
4719
4720
4721
4722
4723 if (page <= chip->pagebuf && chip->pagebuf <
4724 (page + pages_per_block))
4725 chip->pagebuf = -1;
4726
4727 status = chip->erase(mtd, page & chip->pagemask);
4728
4729
4730 if (status) {
4731 pr_debug("%s: failed erase, page 0x%08x\n",
4732 __func__, page);
4733 ret = -EIO;
4734 instr->fail_addr =
4735 ((loff_t)page << chip->page_shift);
4736 goto erase_exit;
4737 }
4738
4739
4740 len -= (1ULL << chip->phys_erase_shift);
4741 page += pages_per_block;
4742
4743
4744 if (len && !(page & chip->pagemask)) {
4745 chipnr++;
4746 chip->select_chip(mtd, -1);
4747 chip->select_chip(mtd, chipnr);
4748 }
4749 }
4750
4751 ret = 0;
4752erase_exit:
4753
4754
4755 chip->select_chip(mtd, -1);
4756 nand_release_device(mtd);
4757
4758
4759 return ret;
4760}
4761
4762
4763
4764
4765
4766
4767
4768static void nand_sync(struct mtd_info *mtd)
4769{
4770 pr_debug("%s: called\n", __func__);
4771
4772
4773 nand_get_device(mtd, FL_SYNCING);
4774
4775 nand_release_device(mtd);
4776}
4777
4778
4779
4780
4781
4782
4783static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4784{
4785 struct nand_chip *chip = mtd_to_nand(mtd);
4786 int chipnr = (int)(offs >> chip->chip_shift);
4787 int ret;
4788
4789
4790 nand_get_device(mtd, FL_READING);
4791 chip->select_chip(mtd, chipnr);
4792
4793 ret = nand_block_checkbad(mtd, offs, 0);
4794
4795 chip->select_chip(mtd, -1);
4796 nand_release_device(mtd);
4797
4798 return ret;
4799}
4800
4801
4802
4803
4804
4805
4806static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4807{
4808 int ret;
4809
4810 ret = nand_block_isbad(mtd, ofs);
4811 if (ret) {
4812
4813 if (ret > 0)
4814 return 0;
4815 return ret;
4816 }
4817
4818 return nand_block_markbad_lowlevel(mtd, ofs);
4819}
4820
4821
4822
4823
4824
4825
4826
4827static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
4828{
4829 struct nand_chip *chip = mtd_to_nand(mtd);
4830 u32 part_start_block;
4831 u32 part_end_block;
4832 u32 part_start_die;
4833 u32 part_end_die;
4834
4835
4836
4837
4838
4839 if (!chip->max_bb_per_die || !chip->blocks_per_die)
4840 return -ENOTSUPP;
4841
4842
4843 part_start_block = mtd_div_by_eb(ofs, mtd);
4844 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
4845
4846
4847 part_start_die = part_start_block / chip->blocks_per_die;
4848 part_end_die = part_end_block / chip->blocks_per_die;
4849
4850
4851
4852
4853
4854 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
4855}
4856
4857
4858
4859
4860
4861
4862
4863
4864static int nand_default_set_features(struct mtd_info *mtd,
4865 struct nand_chip *chip, int addr,
4866 uint8_t *subfeature_param)
4867{
4868 return nand_set_features_op(chip, addr, subfeature_param);
4869}
4870
4871
4872
4873
4874
4875
4876
4877
4878static int nand_default_get_features(struct mtd_info *mtd,
4879 struct nand_chip *chip, int addr,
4880 uint8_t *subfeature_param)
4881{
4882 return nand_get_features_op(chip, addr, subfeature_param);
4883}
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
4896 int addr, u8 *subfeature_param)
4897{
4898 return -ENOTSUPP;
4899}
4900EXPORT_SYMBOL(nand_get_set_features_notsupp);
4901
4902
4903
4904
4905
4906static int nand_suspend(struct mtd_info *mtd)
4907{
4908 return nand_get_device(mtd, FL_PM_SUSPENDED);
4909}
4910
4911
4912
4913
4914
4915static void nand_resume(struct mtd_info *mtd)
4916{
4917 struct nand_chip *chip = mtd_to_nand(mtd);
4918
4919 if (chip->state == FL_PM_SUSPENDED)
4920 nand_release_device(mtd);
4921 else
4922 pr_err("%s called for a chip which is not in suspended state\n",
4923 __func__);
4924}
4925
4926
4927
4928
4929
4930
4931static void nand_shutdown(struct mtd_info *mtd)
4932{
4933 nand_get_device(mtd, FL_PM_SUSPENDED);
4934}
4935
4936
4937static void nand_set_defaults(struct nand_chip *chip)
4938{
4939 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
4940
4941
4942 if (!chip->chip_delay)
4943 chip->chip_delay = 20;
4944
4945
4946 if (!chip->cmdfunc && !chip->exec_op)
4947 chip->cmdfunc = nand_command;
4948
4949
4950 if (chip->waitfunc == NULL)
4951 chip->waitfunc = nand_wait;
4952
4953 if (!chip->select_chip)
4954 chip->select_chip = nand_select_chip;
4955
4956
4957 if (!chip->set_features)
4958 chip->set_features = nand_default_set_features;
4959 if (!chip->get_features)
4960 chip->get_features = nand_default_get_features;
4961
4962
4963 if (!chip->read_byte || chip->read_byte == nand_read_byte)
4964 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
4965 if (!chip->read_word)
4966 chip->read_word = nand_read_word;
4967 if (!chip->block_bad)
4968 chip->block_bad = nand_block_bad;
4969 if (!chip->block_markbad)
4970 chip->block_markbad = nand_default_block_markbad;
4971 if (!chip->write_buf || chip->write_buf == nand_write_buf)
4972 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
4973 if (!chip->write_byte || chip->write_byte == nand_write_byte)
4974 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
4975 if (!chip->read_buf || chip->read_buf == nand_read_buf)
4976 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
4977 if (!chip->scan_bbt)
4978 chip->scan_bbt = nand_default_bbt;
4979
4980 if (!chip->controller) {
4981 chip->controller = &chip->hwcontrol;
4982 nand_hw_control_init(chip->controller);
4983 }
4984
4985 if (!chip->buf_align)
4986 chip->buf_align = 1;
4987}
4988
4989
4990static void sanitize_string(uint8_t *s, size_t len)
4991{
4992 ssize_t i;
4993
4994
4995 s[len - 1] = 0;
4996
4997
4998 for (i = 0; i < len - 1; i++) {
4999 if (s[i] < ' ' || s[i] > 127)
5000 s[i] = '?';
5001 }
5002
5003
5004 strim(s);
5005}
5006
5007static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
5008{
5009 int i;
5010 while (len--) {
5011 crc ^= *p++ << 8;
5012 for (i = 0; i < 8; i++)
5013 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
5014 }
5015
5016 return crc;
5017}
5018
5019
5020static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
5021 struct nand_onfi_params *p)
5022{
5023 struct onfi_ext_param_page *ep;
5024 struct onfi_ext_section *s;
5025 struct onfi_ext_ecc_info *ecc;
5026 uint8_t *cursor;
5027 int ret;
5028 int len;
5029 int i;
5030
5031 len = le16_to_cpu(p->ext_param_page_length) * 16;
5032 ep = kmalloc(len, GFP_KERNEL);
5033 if (!ep)
5034 return -ENOMEM;
5035
5036
5037 ret = nand_read_param_page_op(chip, 0, NULL, 0);
5038 if (ret)
5039 goto ext_out;
5040
5041
5042 ret = nand_change_read_column_op(chip,
5043 sizeof(*p) * p->num_of_param_pages,
5044 ep, len, true);
5045 if (ret)
5046 goto ext_out;
5047
5048 ret = -EINVAL;
5049 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
5050 != le16_to_cpu(ep->crc))) {
5051 pr_debug("fail in the CRC.\n");
5052 goto ext_out;
5053 }
5054
5055
5056
5057
5058
5059 if (strncmp(ep->sig, "EPPS", 4)) {
5060 pr_debug("The signature is invalid.\n");
5061 goto ext_out;
5062 }
5063
5064
5065 cursor = (uint8_t *)(ep + 1);
5066 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
5067 s = ep->sections + i;
5068 if (s->type == ONFI_SECTION_TYPE_2)
5069 break;
5070 cursor += s->length * 16;
5071 }
5072 if (i == ONFI_EXT_SECTION_MAX) {
5073 pr_debug("We can not find the ECC section.\n");
5074 goto ext_out;
5075 }
5076
5077
5078 ecc = (struct onfi_ext_ecc_info *)cursor;
5079
5080 if (!ecc->codeword_size) {
5081 pr_debug("Invalid codeword size\n");
5082 goto ext_out;
5083 }
5084
5085 chip->ecc_strength_ds = ecc->ecc_bits;
5086 chip->ecc_step_ds = 1 << ecc->codeword_size;
5087 ret = 0;
5088
5089ext_out:
5090 kfree(ep);
5091 return ret;
5092}
5093
5094
5095
5096
5097static int nand_flash_detect_onfi(struct nand_chip *chip)
5098{
5099 struct mtd_info *mtd = nand_to_mtd(chip);
5100 struct nand_onfi_params *p;
5101 char id[4];
5102 int i, ret, val;
5103
5104
5105 ret = nand_readid_op(chip, 0x20, id, sizeof(id));
5106 if (ret || strncmp(id, "ONFI", 4))
5107 return 0;
5108
5109
5110 p = kzalloc(sizeof(*p), GFP_KERNEL);
5111 if (!p)
5112 return -ENOMEM;
5113
5114 ret = nand_read_param_page_op(chip, 0, NULL, 0);
5115 if (ret) {
5116 ret = 0;
5117 goto free_onfi_param_page;
5118 }
5119
5120 for (i = 0; i < 3; i++) {
5121 ret = nand_read_data_op(chip, p, sizeof(*p), true);
5122 if (ret) {
5123 ret = 0;
5124 goto free_onfi_param_page;
5125 }
5126
5127 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
5128 le16_to_cpu(p->crc)) {
5129 break;
5130 }
5131 }
5132
5133 if (i == 3) {
5134 pr_err("Could not find valid ONFI parameter page; aborting\n");
5135 goto free_onfi_param_page;
5136 }
5137
5138
5139 val = le16_to_cpu(p->revision);
5140 if (val & (1 << 5))
5141 chip->parameters.onfi.version = 23;
5142 else if (val & (1 << 4))
5143 chip->parameters.onfi.version = 22;
5144 else if (val & (1 << 3))
5145 chip->parameters.onfi.version = 21;
5146 else if (val & (1 << 2))
5147 chip->parameters.onfi.version = 20;
5148 else if (val & (1 << 1))
5149 chip->parameters.onfi.version = 10;
5150
5151 if (!chip->parameters.onfi.version) {
5152 pr_info("unsupported ONFI version: %d\n", val);
5153 goto free_onfi_param_page;
5154 } else {
5155 ret = 1;
5156 }
5157
5158 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
5159 sanitize_string(p->model, sizeof(p->model));
5160 strncpy(chip->parameters.model, p->model,
5161 sizeof(chip->parameters.model) - 1);
5162
5163 mtd->writesize = le32_to_cpu(p->byte_per_page);
5164
5165
5166
5167
5168
5169
5170 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
5171 mtd->erasesize *= mtd->writesize;
5172
5173 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
5174
5175
5176 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
5177 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
5178 chip->bits_per_cell = p->bits_per_cell;
5179
5180 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
5181 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
5182
5183 if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
5184 chip->options |= NAND_BUSWIDTH_16;
5185
5186 if (p->ecc_bits != 0xff) {
5187 chip->ecc_strength_ds = p->ecc_bits;
5188 chip->ecc_step_ds = 512;
5189 } else if (chip->parameters.onfi.version >= 21 &&
5190 (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
5191
5192
5193
5194
5195
5196
5197
5198 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
5199 chip->cmdfunc = nand_command_lp;
5200
5201
5202 if (nand_flash_detect_ext_param_page(chip, p))
5203 pr_warn("Failed to detect ONFI extended param page\n");
5204 } else {
5205 pr_warn("Could not retrieve ONFI ECC requirements\n");
5206 }
5207
5208
5209 if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
5210 chip->parameters.supports_set_get_features = true;
5211 bitmap_set(chip->parameters.get_feature_list,
5212 ONFI_FEATURE_ADDR_TIMING_MODE, 1);
5213 bitmap_set(chip->parameters.set_feature_list,
5214 ONFI_FEATURE_ADDR_TIMING_MODE, 1);
5215 }
5216 chip->parameters.onfi.tPROG = le16_to_cpu(p->t_prog);
5217 chip->parameters.onfi.tBERS = le16_to_cpu(p->t_bers);
5218 chip->parameters.onfi.tR = le16_to_cpu(p->t_r);
5219 chip->parameters.onfi.tCCS = le16_to_cpu(p->t_ccs);
5220 chip->parameters.onfi.async_timing_mode =
5221 le16_to_cpu(p->async_timing_mode);
5222 chip->parameters.onfi.vendor_revision =
5223 le16_to_cpu(p->vendor_revision);
5224 memcpy(chip->parameters.onfi.vendor, p->vendor,
5225 sizeof(p->vendor));
5226
5227free_onfi_param_page:
5228 kfree(p);
5229 return ret;
5230}
5231
5232
5233
5234
5235static int nand_flash_detect_jedec(struct nand_chip *chip)
5236{
5237 struct mtd_info *mtd = nand_to_mtd(chip);
5238 struct nand_jedec_params *p;
5239 struct jedec_ecc_info *ecc;
5240 int jedec_version = 0;
5241 char id[5];
5242 int i, val, ret;
5243
5244
5245 ret = nand_readid_op(chip, 0x40, id, sizeof(id));
5246 if (ret || strncmp(id, "JEDEC", sizeof(id)))
5247 return 0;
5248
5249
5250 p = kzalloc(sizeof(*p), GFP_KERNEL);
5251 if (!p)
5252 return -ENOMEM;
5253
5254 ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
5255 if (ret) {
5256 ret = 0;
5257 goto free_jedec_param_page;
5258 }
5259
5260 for (i = 0; i < 3; i++) {
5261 ret = nand_read_data_op(chip, p, sizeof(*p), true);
5262 if (ret) {
5263 ret = 0;
5264 goto free_jedec_param_page;
5265 }
5266
5267 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
5268 le16_to_cpu(p->crc))
5269 break;
5270 }
5271
5272 if (i == 3) {
5273 pr_err("Could not find valid JEDEC parameter page; aborting\n");
5274 goto free_jedec_param_page;
5275 }
5276
5277
5278 val = le16_to_cpu(p->revision);
5279 if (val & (1 << 2))
5280 jedec_version = 10;
5281 else if (val & (1 << 1))
5282 jedec_version = 1;
5283
5284 if (!jedec_version) {
5285 pr_info("unsupported JEDEC version: %d\n", val);
5286 goto free_jedec_param_page;
5287 }
5288
5289 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
5290 sanitize_string(p->model, sizeof(p->model));
5291 strncpy(chip->parameters.model, p->model,
5292 sizeof(chip->parameters.model) - 1);
5293
5294 mtd->writesize = le32_to_cpu(p->byte_per_page);
5295
5296
5297 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
5298 mtd->erasesize *= mtd->writesize;
5299
5300 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
5301
5302
5303 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
5304 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
5305 chip->bits_per_cell = p->bits_per_cell;
5306
5307 if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
5308 chip->options |= NAND_BUSWIDTH_16;
5309
5310
5311 ecc = &p->ecc_info[0];
5312
5313 if (ecc->codeword_size >= 9) {
5314 chip->ecc_strength_ds = ecc->ecc_bits;
5315 chip->ecc_step_ds = 1 << ecc->codeword_size;
5316 } else {
5317 pr_warn("Invalid codeword size\n");
5318 }
5319
5320free_jedec_param_page:
5321 kfree(p);
5322 return ret;
5323}
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336static int nand_id_has_period(u8 *id_data, int arrlen, int period)
5337{
5338 int i, j;
5339 for (i = 0; i < period; i++)
5340 for (j = i + period; j < arrlen; j += period)
5341 if (id_data[i] != id_data[j])
5342 return 0;
5343 return 1;
5344}
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354static int nand_id_len(u8 *id_data, int arrlen)
5355{
5356 int last_nonzero, period;
5357
5358
5359 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
5360 if (id_data[last_nonzero])
5361 break;
5362
5363
5364 if (last_nonzero < 0)
5365 return 0;
5366
5367
5368 for (period = 1; period < arrlen; period++)
5369 if (nand_id_has_period(id_data, arrlen, period))
5370 break;
5371
5372
5373 if (period < arrlen)
5374 return period;
5375
5376
5377 if (last_nonzero < arrlen - 1)
5378 return last_nonzero + 1;
5379
5380
5381 return arrlen;
5382}
5383
5384
5385static int nand_get_bits_per_cell(u8 cellinfo)
5386{
5387 int bits;
5388
5389 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
5390 bits >>= NAND_CI_CELLTYPE_SHIFT;
5391 return bits + 1;
5392}
5393
5394
5395
5396
5397
5398
5399void nand_decode_ext_id(struct nand_chip *chip)
5400{
5401 struct mtd_info *mtd = nand_to_mtd(chip);
5402 int extid;
5403 u8 *id_data = chip->id.data;
5404
5405 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
5406
5407 extid = id_data[3];
5408
5409
5410 mtd->writesize = 1024 << (extid & 0x03);
5411 extid >>= 2;
5412
5413 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
5414 extid >>= 2;
5415
5416 mtd->erasesize = (64 * 1024) << (extid & 0x03);
5417 extid >>= 2;
5418
5419 if (extid & 0x1)
5420 chip->options |= NAND_BUSWIDTH_16;
5421}
5422EXPORT_SYMBOL_GPL(nand_decode_ext_id);
5423
5424
5425
5426
5427
5428
5429static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
5430{
5431 struct mtd_info *mtd = nand_to_mtd(chip);
5432
5433 mtd->erasesize = type->erasesize;
5434 mtd->writesize = type->pagesize;
5435 mtd->oobsize = mtd->writesize / 32;
5436
5437
5438 chip->bits_per_cell = 1;
5439}
5440
5441
5442
5443
5444
5445
5446static void nand_decode_bbm_options(struct nand_chip *chip)
5447{
5448 struct mtd_info *mtd = nand_to_mtd(chip);
5449
5450
5451 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
5452 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
5453 else
5454 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
5455}
5456
5457static inline bool is_full_id_nand(struct nand_flash_dev *type)
5458{
5459 return type->id_len;
5460}
5461
5462static bool find_full_id_nand(struct nand_chip *chip,
5463 struct nand_flash_dev *type)
5464{
5465 struct mtd_info *mtd = nand_to_mtd(chip);
5466 u8 *id_data = chip->id.data;
5467
5468 if (!strncmp(type->id, id_data, type->id_len)) {
5469 mtd->writesize = type->pagesize;
5470 mtd->erasesize = type->erasesize;
5471 mtd->oobsize = type->oobsize;
5472
5473 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
5474 chip->chipsize = (uint64_t)type->chipsize << 20;
5475 chip->options |= type->options;
5476 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
5477 chip->ecc_step_ds = NAND_ECC_STEP(type);
5478 chip->onfi_timing_mode_default =
5479 type->onfi_timing_mode_default;
5480
5481 strncpy(chip->parameters.model, type->name,
5482 sizeof(chip->parameters.model) - 1);
5483
5484 return true;
5485 }
5486 return false;
5487}
5488
5489
5490
5491
5492
5493
5494static void nand_manufacturer_detect(struct nand_chip *chip)
5495{
5496
5497
5498
5499
5500 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
5501 chip->manufacturer.desc->ops->detect) {
5502
5503 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
5504 chip->manufacturer.desc->ops->detect(chip);
5505 } else {
5506 nand_decode_ext_id(chip);
5507 }
5508}
5509
5510
5511
5512
5513
5514
5515
5516static int nand_manufacturer_init(struct nand_chip *chip)
5517{
5518 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
5519 !chip->manufacturer.desc->ops->init)
5520 return 0;
5521
5522 return chip->manufacturer.desc->ops->init(chip);
5523}
5524
5525
5526
5527
5528
5529
5530
5531static void nand_manufacturer_cleanup(struct nand_chip *chip)
5532{
5533
5534 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
5535 chip->manufacturer.desc->ops->cleanup)
5536 chip->manufacturer.desc->ops->cleanup(chip);
5537}
5538
5539
5540
5541
5542static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
5543{
5544 const struct nand_manufacturer *manufacturer;
5545 struct mtd_info *mtd = nand_to_mtd(chip);
5546 int busw, ret;
5547 u8 *id_data = chip->id.data;
5548 u8 maf_id, dev_id;
5549
5550
5551
5552
5553
5554 ret = nand_reset(chip, 0);
5555 if (ret)
5556 return ret;
5557
5558
5559 chip->select_chip(mtd, 0);
5560
5561
5562 ret = nand_readid_op(chip, 0, id_data, 2);
5563 if (ret)
5564 return ret;
5565
5566
5567 maf_id = id_data[0];
5568 dev_id = id_data[1];
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5579 if (ret)
5580 return ret;
5581
5582 if (id_data[0] != maf_id || id_data[1] != dev_id) {
5583 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5584 maf_id, dev_id, id_data[0], id_data[1]);
5585 return -ENODEV;
5586 }
5587
5588 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5589
5590
5591 manufacturer = nand_get_manufacturer(maf_id);
5592 chip->manufacturer.desc = manufacturer;
5593
5594 if (!type)
5595 type = nand_flash_ids;
5596
5597
5598
5599
5600
5601
5602
5603
5604 busw = chip->options & NAND_BUSWIDTH_16;
5605
5606
5607
5608
5609
5610 chip->options &= ~NAND_BUSWIDTH_16;
5611
5612 for (; type->name != NULL; type++) {
5613 if (is_full_id_nand(type)) {
5614 if (find_full_id_nand(chip, type))
5615 goto ident_done;
5616 } else if (dev_id == type->dev_id) {
5617 break;
5618 }
5619 }
5620
5621 chip->parameters.onfi.version = 0;
5622 if (!type->name || !type->pagesize) {
5623
5624 ret = nand_flash_detect_onfi(chip);
5625 if (ret < 0)
5626 return ret;
5627 else if (ret)
5628 goto ident_done;
5629
5630
5631 ret = nand_flash_detect_jedec(chip);
5632 if (ret < 0)
5633 return ret;
5634 else if (ret)
5635 goto ident_done;
5636 }
5637
5638 if (!type->name)
5639 return -ENODEV;
5640
5641 strncpy(chip->parameters.model, type->name,
5642 sizeof(chip->parameters.model) - 1);
5643
5644 chip->chipsize = (uint64_t)type->chipsize << 20;
5645
5646 if (!type->pagesize)
5647 nand_manufacturer_detect(chip);
5648 else
5649 nand_decode_id(chip, type);
5650
5651
5652 chip->options |= type->options;
5653
5654ident_done:
5655 if (!mtd->name)
5656 mtd->name = chip->parameters.model;
5657
5658 if (chip->options & NAND_BUSWIDTH_AUTO) {
5659 WARN_ON(busw & NAND_BUSWIDTH_16);
5660 nand_set_defaults(chip);
5661 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5662
5663
5664
5665
5666 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5667 maf_id, dev_id);
5668 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5669 mtd->name);
5670 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5671 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5672 return -EINVAL;
5673 }
5674
5675 nand_decode_bbm_options(chip);
5676
5677
5678 chip->page_shift = ffs(mtd->writesize) - 1;
5679
5680 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
5681
5682 chip->bbt_erase_shift = chip->phys_erase_shift =
5683 ffs(mtd->erasesize) - 1;
5684 if (chip->chipsize & 0xffffffff)
5685 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
5686 else {
5687 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
5688 chip->chip_shift += 32 - 1;
5689 }
5690
5691 if (chip->chip_shift - chip->page_shift > 16)
5692 chip->options |= NAND_ROW_ADDR_3;
5693
5694 chip->badblockbits = 8;
5695 chip->erase = single_erase;
5696
5697
5698 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
5699 chip->cmdfunc = nand_command_lp;
5700
5701 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5702 maf_id, dev_id);
5703 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
5704 chip->parameters.model);
5705 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5706 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5707 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5708 return 0;
5709}
5710
5711static const char * const nand_ecc_modes[] = {
5712 [NAND_ECC_NONE] = "none",
5713 [NAND_ECC_SOFT] = "soft",
5714 [NAND_ECC_HW] = "hw",
5715 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5716 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
5717 [NAND_ECC_ON_DIE] = "on-die",
5718};
5719
5720static int of_get_nand_ecc_mode(struct device_node *np)
5721{
5722 const char *pm;
5723 int err, i;
5724
5725 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5726 if (err < 0)
5727 return err;
5728
5729 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
5730 if (!strcasecmp(pm, nand_ecc_modes[i]))
5731 return i;
5732
5733
5734
5735
5736
5737
5738 if (!strcasecmp(pm, "soft_bch"))
5739 return NAND_ECC_SOFT;
5740
5741 return -ENODEV;
5742}
5743
5744static const char * const nand_ecc_algos[] = {
5745 [NAND_ECC_HAMMING] = "hamming",
5746 [NAND_ECC_BCH] = "bch",
5747};
5748
5749static int of_get_nand_ecc_algo(struct device_node *np)
5750{
5751 const char *pm;
5752 int err, i;
5753
5754 err = of_property_read_string(np, "nand-ecc-algo", &pm);
5755 if (!err) {
5756 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
5757 if (!strcasecmp(pm, nand_ecc_algos[i]))
5758 return i;
5759 return -ENODEV;
5760 }
5761
5762
5763
5764
5765
5766 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5767 if (err < 0)
5768 return err;
5769
5770 if (!strcasecmp(pm, "soft"))
5771 return NAND_ECC_HAMMING;
5772 else if (!strcasecmp(pm, "soft_bch"))
5773 return NAND_ECC_BCH;
5774
5775 return -ENODEV;
5776}
5777
5778static int of_get_nand_ecc_step_size(struct device_node *np)
5779{
5780 int ret;
5781 u32 val;
5782
5783 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
5784 return ret ? ret : val;
5785}
5786
5787static int of_get_nand_ecc_strength(struct device_node *np)
5788{
5789 int ret;
5790 u32 val;
5791
5792 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
5793 return ret ? ret : val;
5794}
5795
5796static int of_get_nand_bus_width(struct device_node *np)
5797{
5798 u32 val;
5799
5800 if (of_property_read_u32(np, "nand-bus-width", &val))
5801 return 8;
5802
5803 switch (val) {
5804 case 8:
5805 case 16:
5806 return val;
5807 default:
5808 return -EIO;
5809 }
5810}
5811
5812static bool of_get_nand_on_flash_bbt(struct device_node *np)
5813{
5814 return of_property_read_bool(np, "nand-on-flash-bbt");
5815}
5816
5817static int nand_dt_init(struct nand_chip *chip)
5818{
5819 struct device_node *dn = nand_get_flash_node(chip);
5820 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
5821
5822 if (!dn)
5823 return 0;
5824
5825 if (of_get_nand_bus_width(dn) == 16)
5826 chip->options |= NAND_BUSWIDTH_16;
5827
5828 if (of_get_nand_on_flash_bbt(dn))
5829 chip->bbt_options |= NAND_BBT_USE_FLASH;
5830
5831 ecc_mode = of_get_nand_ecc_mode(dn);
5832 ecc_algo = of_get_nand_ecc_algo(dn);
5833 ecc_strength = of_get_nand_ecc_strength(dn);
5834 ecc_step = of_get_nand_ecc_step_size(dn);
5835
5836 if (ecc_mode >= 0)
5837 chip->ecc.mode = ecc_mode;
5838
5839 if (ecc_algo >= 0)
5840 chip->ecc.algo = ecc_algo;
5841
5842 if (ecc_strength >= 0)
5843 chip->ecc.strength = ecc_strength;
5844
5845 if (ecc_step > 0)
5846 chip->ecc.size = ecc_step;
5847
5848 if (of_property_read_bool(dn, "nand-ecc-maximize"))
5849 chip->ecc.options |= NAND_ECC_MAXIMIZE;
5850
5851 return 0;
5852}
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864int nand_scan_ident(struct mtd_info *mtd, int maxchips,
5865 struct nand_flash_dev *table)
5866{
5867 int i, nand_maf_id, nand_dev_id;
5868 struct nand_chip *chip = mtd_to_nand(mtd);
5869 int ret;
5870
5871
5872 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5873
5874 ret = nand_dt_init(chip);
5875 if (ret)
5876 return ret;
5877
5878 if (!mtd->name && mtd->dev.parent)
5879 mtd->name = dev_name(mtd->dev.parent);
5880
5881
5882
5883
5884
5885 if (!chip->exec_op) {
5886
5887
5888
5889
5890 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
5891 pr_err("->cmd_ctrl() should be provided\n");
5892 return -EINVAL;
5893 }
5894 }
5895
5896
5897 nand_set_defaults(chip);
5898
5899
5900 ret = nand_detect(chip, table);
5901 if (ret) {
5902 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5903 pr_warn("No NAND device found\n");
5904 chip->select_chip(mtd, -1);
5905 return ret;
5906 }
5907
5908 nand_maf_id = chip->id.data[0];
5909 nand_dev_id = chip->id.data[1];
5910
5911 chip->select_chip(mtd, -1);
5912
5913
5914 for (i = 1; i < maxchips; i++) {
5915 u8 id[2];
5916
5917
5918 nand_reset(chip, i);
5919
5920 chip->select_chip(mtd, i);
5921
5922 nand_readid_op(chip, 0, id, sizeof(id));
5923
5924 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5925 chip->select_chip(mtd, -1);
5926 break;
5927 }
5928 chip->select_chip(mtd, -1);
5929 }
5930 if (i > 1)
5931 pr_info("%d chips detected\n", i);
5932
5933
5934 chip->numchips = i;
5935 mtd->size = i * chip->chipsize;
5936
5937 return 0;
5938}
5939EXPORT_SYMBOL(nand_scan_ident);
5940
5941static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
5942{
5943 struct nand_chip *chip = mtd_to_nand(mtd);
5944 struct nand_ecc_ctrl *ecc = &chip->ecc;
5945
5946 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5947 return -EINVAL;
5948
5949 switch (ecc->algo) {
5950 case NAND_ECC_HAMMING:
5951 ecc->calculate = nand_calculate_ecc;
5952 ecc->correct = nand_correct_data;
5953 ecc->read_page = nand_read_page_swecc;
5954 ecc->read_subpage = nand_read_subpage;
5955 ecc->write_page = nand_write_page_swecc;
5956 ecc->read_page_raw = nand_read_page_raw;
5957 ecc->write_page_raw = nand_write_page_raw;
5958 ecc->read_oob = nand_read_oob_std;
5959 ecc->write_oob = nand_write_oob_std;
5960 if (!ecc->size)
5961 ecc->size = 256;
5962 ecc->bytes = 3;
5963 ecc->strength = 1;
5964 return 0;
5965 case NAND_ECC_BCH:
5966 if (!mtd_nand_has_bch()) {
5967 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5968 return -EINVAL;
5969 }
5970 ecc->calculate = nand_bch_calculate_ecc;
5971 ecc->correct = nand_bch_correct_data;
5972 ecc->read_page = nand_read_page_swecc;
5973 ecc->read_subpage = nand_read_subpage;
5974 ecc->write_page = nand_write_page_swecc;
5975 ecc->read_page_raw = nand_read_page_raw;
5976 ecc->write_page_raw = nand_write_page_raw;
5977 ecc->read_oob = nand_read_oob_std;
5978 ecc->write_oob = nand_write_oob_std;
5979
5980
5981
5982
5983
5984
5985 if (!ecc->size && (mtd->oobsize >= 64)) {
5986 ecc->size = 512;
5987 ecc->strength = 4;
5988 }
5989
5990
5991
5992
5993
5994 if (!mtd->ooblayout) {
5995
5996 if (mtd->oobsize < 64) {
5997 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5998 return -EINVAL;
5999 }
6000
6001 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
6002
6003 }
6004
6005
6006
6007
6008
6009
6010 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
6011 ecc->options & NAND_ECC_MAXIMIZE) {
6012 int steps, bytes;
6013
6014
6015 ecc->size = 1024;
6016 steps = mtd->writesize / ecc->size;
6017
6018
6019 bytes = (mtd->oobsize - 2) / steps;
6020 ecc->strength = bytes * 8 / fls(8 * ecc->size);
6021 }
6022
6023
6024 ecc->bytes = 0;
6025 ecc->priv = nand_bch_init(mtd);
6026 if (!ecc->priv) {
6027 WARN(1, "BCH ECC initialization failed!\n");
6028 return -EINVAL;
6029 }
6030 return 0;
6031 default:
6032 WARN(1, "Unsupported ECC algorithm!\n");
6033 return -EINVAL;
6034 }
6035}
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047int nand_check_ecc_caps(struct nand_chip *chip,
6048 const struct nand_ecc_caps *caps, int oobavail)
6049{
6050 struct mtd_info *mtd = nand_to_mtd(chip);
6051 const struct nand_ecc_step_info *stepinfo;
6052 int preset_step = chip->ecc.size;
6053 int preset_strength = chip->ecc.strength;
6054 int nsteps, ecc_bytes;
6055 int i, j;
6056
6057 if (WARN_ON(oobavail < 0))
6058 return -EINVAL;
6059
6060 if (!preset_step || !preset_strength)
6061 return -ENODATA;
6062
6063 nsteps = mtd->writesize / preset_step;
6064
6065 for (i = 0; i < caps->nstepinfos; i++) {
6066 stepinfo = &caps->stepinfos[i];
6067
6068 if (stepinfo->stepsize != preset_step)
6069 continue;
6070
6071 for (j = 0; j < stepinfo->nstrengths; j++) {
6072 if (stepinfo->strengths[j] != preset_strength)
6073 continue;
6074
6075 ecc_bytes = caps->calc_ecc_bytes(preset_step,
6076 preset_strength);
6077 if (WARN_ON_ONCE(ecc_bytes < 0))
6078 return ecc_bytes;
6079
6080 if (ecc_bytes * nsteps > oobavail) {
6081 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
6082 preset_step, preset_strength);
6083 return -ENOSPC;
6084 }
6085
6086 chip->ecc.bytes = ecc_bytes;
6087
6088 return 0;
6089 }
6090 }
6091
6092 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
6093 preset_step, preset_strength);
6094
6095 return -ENOTSUPP;
6096}
6097EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109int nand_match_ecc_req(struct nand_chip *chip,
6110 const struct nand_ecc_caps *caps, int oobavail)
6111{
6112 struct mtd_info *mtd = nand_to_mtd(chip);
6113 const struct nand_ecc_step_info *stepinfo;
6114 int req_step = chip->ecc_step_ds;
6115 int req_strength = chip->ecc_strength_ds;
6116 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
6117 int best_step, best_strength, best_ecc_bytes;
6118 int best_ecc_bytes_total = INT_MAX;
6119 int i, j;
6120
6121 if (WARN_ON(oobavail < 0))
6122 return -EINVAL;
6123
6124
6125 if (!req_step || !req_strength)
6126 return -ENOTSUPP;
6127
6128
6129 req_corr = mtd->writesize / req_step * req_strength;
6130
6131 for (i = 0; i < caps->nstepinfos; i++) {
6132 stepinfo = &caps->stepinfos[i];
6133 step_size = stepinfo->stepsize;
6134
6135 for (j = 0; j < stepinfo->nstrengths; j++) {
6136 strength = stepinfo->strengths[j];
6137
6138
6139
6140
6141
6142
6143 if (step_size < req_step && strength < req_strength)
6144 continue;
6145
6146 if (mtd->writesize % step_size)
6147 continue;
6148
6149 nsteps = mtd->writesize / step_size;
6150
6151 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
6152 if (WARN_ON_ONCE(ecc_bytes < 0))
6153 continue;
6154 ecc_bytes_total = ecc_bytes * nsteps;
6155
6156 if (ecc_bytes_total > oobavail ||
6157 strength * nsteps < req_corr)
6158 continue;
6159
6160
6161
6162
6163
6164 if (ecc_bytes_total < best_ecc_bytes_total) {
6165 best_ecc_bytes_total = ecc_bytes_total;
6166 best_step = step_size;
6167 best_strength = strength;
6168 best_ecc_bytes = ecc_bytes;
6169 }
6170 }
6171 }
6172
6173 if (best_ecc_bytes_total == INT_MAX)
6174 return -ENOTSUPP;
6175
6176 chip->ecc.size = best_step;
6177 chip->ecc.strength = best_strength;
6178 chip->ecc.bytes = best_ecc_bytes;
6179
6180 return 0;
6181}
6182EXPORT_SYMBOL_GPL(nand_match_ecc_req);
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193int nand_maximize_ecc(struct nand_chip *chip,
6194 const struct nand_ecc_caps *caps, int oobavail)
6195{
6196 struct mtd_info *mtd = nand_to_mtd(chip);
6197 const struct nand_ecc_step_info *stepinfo;
6198 int step_size, strength, nsteps, ecc_bytes, corr;
6199 int best_corr = 0;
6200 int best_step = 0;
6201 int best_strength, best_ecc_bytes;
6202 int i, j;
6203
6204 if (WARN_ON(oobavail < 0))
6205 return -EINVAL;
6206
6207 for (i = 0; i < caps->nstepinfos; i++) {
6208 stepinfo = &caps->stepinfos[i];
6209 step_size = stepinfo->stepsize;
6210
6211
6212 if (chip->ecc.size && step_size != chip->ecc.size)
6213 continue;
6214
6215 for (j = 0; j < stepinfo->nstrengths; j++) {
6216 strength = stepinfo->strengths[j];
6217
6218 if (mtd->writesize % step_size)
6219 continue;
6220
6221 nsteps = mtd->writesize / step_size;
6222
6223 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
6224 if (WARN_ON_ONCE(ecc_bytes < 0))
6225 continue;
6226
6227 if (ecc_bytes * nsteps > oobavail)
6228 continue;
6229
6230 corr = strength * nsteps;
6231
6232
6233
6234
6235
6236 if (corr > best_corr ||
6237 (corr == best_corr && step_size > best_step)) {
6238 best_corr = corr;
6239 best_step = step_size;
6240 best_strength = strength;
6241 best_ecc_bytes = ecc_bytes;
6242 }
6243 }
6244 }
6245
6246 if (!best_corr)
6247 return -ENOTSUPP;
6248
6249 chip->ecc.size = best_step;
6250 chip->ecc.strength = best_strength;
6251 chip->ecc.bytes = best_ecc_bytes;
6252
6253 return 0;
6254}
6255EXPORT_SYMBOL_GPL(nand_maximize_ecc);
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271static bool nand_ecc_strength_good(struct mtd_info *mtd)
6272{
6273 struct nand_chip *chip = mtd_to_nand(mtd);
6274 struct nand_ecc_ctrl *ecc = &chip->ecc;
6275 int corr, ds_corr;
6276
6277 if (ecc->size == 0 || chip->ecc_step_ds == 0)
6278
6279 return true;
6280
6281
6282
6283
6284
6285 corr = (mtd->writesize * ecc->strength) / ecc->size;
6286 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
6287
6288 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
6289}
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299int nand_scan_tail(struct mtd_info *mtd)
6300{
6301 struct nand_chip *chip = mtd_to_nand(mtd);
6302 struct nand_ecc_ctrl *ecc = &chip->ecc;
6303 int ret, i;
6304
6305
6306 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6307 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6308 return -EINVAL;
6309 }
6310
6311 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6312 if (!chip->data_buf)
6313 return -ENOMEM;
6314
6315
6316
6317
6318
6319
6320
6321 chip->select_chip(mtd, 0);
6322 ret = nand_manufacturer_init(chip);
6323 chip->select_chip(mtd, -1);
6324 if (ret)
6325 goto err_free_buf;
6326
6327
6328 chip->oob_poi = chip->data_buf + mtd->writesize;
6329
6330
6331
6332
6333 if (!mtd->ooblayout &&
6334 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
6335 switch (mtd->oobsize) {
6336 case 8:
6337 case 16:
6338 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
6339 break;
6340 case 64:
6341 case 128:
6342 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
6343 break;
6344 default:
6345
6346
6347
6348
6349
6350
6351
6352 if (ecc->mode == NAND_ECC_NONE) {
6353 mtd_set_ooblayout(mtd,
6354 &nand_ooblayout_lp_ops);
6355 break;
6356 }
6357
6358 WARN(1, "No oob scheme defined for oobsize %d\n",
6359 mtd->oobsize);
6360 ret = -EINVAL;
6361 goto err_nand_manuf_cleanup;
6362 }
6363 }
6364
6365
6366
6367
6368
6369
6370 switch (ecc->mode) {
6371 case NAND_ECC_HW_OOB_FIRST:
6372
6373 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
6374 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6375 ret = -EINVAL;
6376 goto err_nand_manuf_cleanup;
6377 }
6378 if (!ecc->read_page)
6379 ecc->read_page = nand_read_page_hwecc_oob_first;
6380
6381 case NAND_ECC_HW:
6382
6383 if (!ecc->read_page)
6384 ecc->read_page = nand_read_page_hwecc;
6385 if (!ecc->write_page)
6386 ecc->write_page = nand_write_page_hwecc;
6387 if (!ecc->read_page_raw)
6388 ecc->read_page_raw = nand_read_page_raw;
6389 if (!ecc->write_page_raw)
6390 ecc->write_page_raw = nand_write_page_raw;
6391 if (!ecc->read_oob)
6392 ecc->read_oob = nand_read_oob_std;
6393 if (!ecc->write_oob)
6394 ecc->write_oob = nand_write_oob_std;
6395 if (!ecc->read_subpage)
6396 ecc->read_subpage = nand_read_subpage;
6397 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
6398 ecc->write_subpage = nand_write_subpage_hwecc;
6399
6400 case NAND_ECC_HW_SYNDROME:
6401 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
6402 (!ecc->read_page ||
6403 ecc->read_page == nand_read_page_hwecc ||
6404 !ecc->write_page ||
6405 ecc->write_page == nand_write_page_hwecc)) {
6406 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
6407 ret = -EINVAL;
6408 goto err_nand_manuf_cleanup;
6409 }
6410
6411 if (!ecc->read_page)
6412 ecc->read_page = nand_read_page_syndrome;
6413 if (!ecc->write_page)
6414 ecc->write_page = nand_write_page_syndrome;
6415 if (!ecc->read_page_raw)
6416 ecc->read_page_raw = nand_read_page_raw_syndrome;
6417 if (!ecc->write_page_raw)
6418 ecc->write_page_raw = nand_write_page_raw_syndrome;
6419 if (!ecc->read_oob)
6420 ecc->read_oob = nand_read_oob_syndrome;
6421 if (!ecc->write_oob)
6422 ecc->write_oob = nand_write_oob_syndrome;
6423
6424 if (mtd->writesize >= ecc->size) {
6425 if (!ecc->strength) {
6426 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6427 ret = -EINVAL;
6428 goto err_nand_manuf_cleanup;
6429 }
6430 break;
6431 }
6432 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6433 ecc->size, mtd->writesize);
6434 ecc->mode = NAND_ECC_SOFT;
6435 ecc->algo = NAND_ECC_HAMMING;
6436
6437 case NAND_ECC_SOFT:
6438 ret = nand_set_ecc_soft_ops(mtd);
6439 if (ret) {
6440 ret = -EINVAL;
6441 goto err_nand_manuf_cleanup;
6442 }
6443 break;
6444
6445 case NAND_ECC_ON_DIE:
6446 if (!ecc->read_page || !ecc->write_page) {
6447 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6448 ret = -EINVAL;
6449 goto err_nand_manuf_cleanup;
6450 }
6451 if (!ecc->read_oob)
6452 ecc->read_oob = nand_read_oob_std;
6453 if (!ecc->write_oob)
6454 ecc->write_oob = nand_write_oob_std;
6455 break;
6456
6457 case NAND_ECC_NONE:
6458 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
6459 ecc->read_page = nand_read_page_raw;
6460 ecc->write_page = nand_write_page_raw;
6461 ecc->read_oob = nand_read_oob_std;
6462 ecc->read_page_raw = nand_read_page_raw;
6463 ecc->write_page_raw = nand_write_page_raw;
6464 ecc->write_oob = nand_write_oob_std;
6465 ecc->size = mtd->writesize;
6466 ecc->bytes = 0;
6467 ecc->strength = 0;
6468 break;
6469
6470 default:
6471 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
6472 ret = -EINVAL;
6473 goto err_nand_manuf_cleanup;
6474 }
6475
6476 if (ecc->correct || ecc->calculate) {
6477 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6478 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6479 if (!ecc->calc_buf || !ecc->code_buf) {
6480 ret = -ENOMEM;
6481 goto err_nand_manuf_cleanup;
6482 }
6483 }
6484
6485
6486 if (!ecc->read_oob_raw)
6487 ecc->read_oob_raw = ecc->read_oob;
6488 if (!ecc->write_oob_raw)
6489 ecc->write_oob_raw = ecc->write_oob;
6490
6491
6492 mtd->ecc_strength = ecc->strength;
6493 mtd->ecc_step_size = ecc->size;
6494
6495
6496
6497
6498
6499 ecc->steps = mtd->writesize / ecc->size;
6500 if (ecc->steps * ecc->size != mtd->writesize) {
6501 WARN(1, "Invalid ECC parameters\n");
6502 ret = -EINVAL;
6503 goto err_nand_manuf_cleanup;
6504 }
6505 ecc->total = ecc->steps * ecc->bytes;
6506 if (ecc->total > mtd->oobsize) {
6507 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6508 ret = -EINVAL;
6509 goto err_nand_manuf_cleanup;
6510 }
6511
6512
6513
6514
6515
6516 ret = mtd_ooblayout_count_freebytes(mtd);
6517 if (ret < 0)
6518 ret = 0;
6519
6520 mtd->oobavail = ret;
6521
6522
6523 if (!nand_ecc_strength_good(mtd))
6524 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
6525 mtd->name);
6526
6527
6528 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6529 switch (ecc->steps) {
6530 case 2:
6531 mtd->subpage_sft = 1;
6532 break;
6533 case 4:
6534 case 8:
6535 case 16:
6536 mtd->subpage_sft = 2;
6537 break;
6538 }
6539 }
6540 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6541
6542
6543 chip->state = FL_READY;
6544
6545
6546 chip->pagebuf = -1;
6547
6548
6549 switch (ecc->mode) {
6550 case NAND_ECC_SOFT:
6551 if (chip->page_shift > 9)
6552 chip->options |= NAND_SUBPAGE_READ;
6553 break;
6554
6555 default:
6556 break;
6557 }
6558
6559
6560 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
6561 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
6562 MTD_CAP_NANDFLASH;
6563 mtd->_erase = nand_erase;
6564 mtd->_point = NULL;
6565 mtd->_unpoint = NULL;
6566 mtd->_panic_write = panic_nand_write;
6567 mtd->_read_oob = nand_read_oob;
6568 mtd->_write_oob = nand_write_oob;
6569 mtd->_sync = nand_sync;
6570 mtd->_lock = NULL;
6571 mtd->_unlock = NULL;
6572 mtd->_suspend = nand_suspend;
6573 mtd->_resume = nand_resume;
6574 mtd->_reboot = nand_shutdown;
6575 mtd->_block_isreserved = nand_block_isreserved;
6576 mtd->_block_isbad = nand_block_isbad;
6577 mtd->_block_markbad = nand_block_markbad;
6578 mtd->_max_bad_blocks = nand_max_bad_blocks;
6579 mtd->writebufsize = mtd->writesize;
6580
6581
6582
6583
6584
6585
6586 if (!mtd->bitflip_threshold)
6587 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6588
6589
6590 ret = nand_init_data_interface(chip);
6591 if (ret)
6592 goto err_nand_manuf_cleanup;
6593
6594
6595 for (i = 0; i < chip->numchips; i++) {
6596 ret = nand_setup_data_interface(chip, i);
6597 if (ret)
6598 goto err_nand_manuf_cleanup;
6599 }
6600
6601
6602 if (chip->options & NAND_SKIP_BBTSCAN)
6603 return 0;
6604
6605
6606 ret = chip->scan_bbt(mtd);
6607 if (ret)
6608 goto err_nand_manuf_cleanup;
6609
6610 return 0;
6611
6612
6613err_nand_manuf_cleanup:
6614 nand_manufacturer_cleanup(chip);
6615
6616err_free_buf:
6617 kfree(chip->data_buf);
6618 kfree(ecc->code_buf);
6619 kfree(ecc->calc_buf);
6620
6621 return ret;
6622}
6623EXPORT_SYMBOL(nand_scan_tail);
6624
6625
6626
6627
6628
6629
6630#ifdef MODULE
6631#define caller_is_module() (1)
6632#else
6633#define caller_is_module() \
6634 is_module_text_address((unsigned long)__builtin_return_address(0))
6635#endif
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646int nand_scan(struct mtd_info *mtd, int maxchips)
6647{
6648 int ret;
6649
6650 ret = nand_scan_ident(mtd, maxchips, NULL);
6651 if (!ret)
6652 ret = nand_scan_tail(mtd);
6653 return ret;
6654}
6655EXPORT_SYMBOL(nand_scan);
6656
6657
6658
6659
6660
6661void nand_cleanup(struct nand_chip *chip)
6662{
6663 if (chip->ecc.mode == NAND_ECC_SOFT &&
6664 chip->ecc.algo == NAND_ECC_BCH)
6665 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
6666
6667
6668 kfree(chip->bbt);
6669 kfree(chip->data_buf);
6670 kfree(chip->ecc.code_buf);
6671 kfree(chip->ecc.calc_buf);
6672
6673
6674 if (chip->badblock_pattern && chip->badblock_pattern->options
6675 & NAND_BBT_DYNAMICSTRUCT)
6676 kfree(chip->badblock_pattern);
6677
6678
6679 nand_manufacturer_cleanup(chip);
6680}
6681EXPORT_SYMBOL_GPL(nand_cleanup);
6682
6683
6684
6685
6686
6687
6688void nand_release(struct mtd_info *mtd)
6689{
6690 mtd_device_unregister(mtd);
6691 nand_cleanup(mtd_to_nand(mtd));
6692}
6693EXPORT_SYMBOL_GPL(nand_release);
6694
6695MODULE_LICENSE("GPL");
6696MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6697MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6698MODULE_DESCRIPTION("Generic NAND flash driver code");
6699