1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/err.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/mm.h>
35#include <linux/types.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/nand_ecc.h>
38#include <linux/mtd/nand_bch.h>
39#include <linux/interrupt.h>
40#include <linux/bitops.h>
41#include <linux/io.h>
42#include <linux/mtd/partitions.h>
43#include <linux/of.h>
44#include <linux/gpio/consumer.h>
45
46#include "internals.h"
47
48
49static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
50 struct mtd_oob_region *oobregion)
51{
52 struct nand_chip *chip = mtd_to_nand(mtd);
53 struct nand_ecc_ctrl *ecc = &chip->ecc;
54
55 if (section > 1)
56 return -ERANGE;
57
58 if (!section) {
59 oobregion->offset = 0;
60 if (mtd->oobsize == 16)
61 oobregion->length = 4;
62 else
63 oobregion->length = 3;
64 } else {
65 if (mtd->oobsize == 8)
66 return -ERANGE;
67
68 oobregion->offset = 6;
69 oobregion->length = ecc->total - 4;
70 }
71
72 return 0;
73}
74
75static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
76 struct mtd_oob_region *oobregion)
77{
78 if (section > 1)
79 return -ERANGE;
80
81 if (mtd->oobsize == 16) {
82 if (section)
83 return -ERANGE;
84
85 oobregion->length = 8;
86 oobregion->offset = 8;
87 } else {
88 oobregion->length = 2;
89 if (!section)
90 oobregion->offset = 3;
91 else
92 oobregion->offset = 6;
93 }
94
95 return 0;
96}
97
98const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
99 .ecc = nand_ooblayout_ecc_sp,
100 .free = nand_ooblayout_free_sp,
101};
102EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
103
104static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
105 struct mtd_oob_region *oobregion)
106{
107 struct nand_chip *chip = mtd_to_nand(mtd);
108 struct nand_ecc_ctrl *ecc = &chip->ecc;
109
110 if (section || !ecc->total)
111 return -ERANGE;
112
113 oobregion->length = ecc->total;
114 oobregion->offset = mtd->oobsize - oobregion->length;
115
116 return 0;
117}
118
119static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
120 struct mtd_oob_region *oobregion)
121{
122 struct nand_chip *chip = mtd_to_nand(mtd);
123 struct nand_ecc_ctrl *ecc = &chip->ecc;
124
125 if (section)
126 return -ERANGE;
127
128 oobregion->length = mtd->oobsize - ecc->total - 2;
129 oobregion->offset = 2;
130
131 return 0;
132}
133
134const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
135 .ecc = nand_ooblayout_ecc_lp,
136 .free = nand_ooblayout_free_lp,
137};
138EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
139
140
141
142
143
144static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
145 struct mtd_oob_region *oobregion)
146{
147 struct nand_chip *chip = mtd_to_nand(mtd);
148 struct nand_ecc_ctrl *ecc = &chip->ecc;
149
150 if (section)
151 return -ERANGE;
152
153 switch (mtd->oobsize) {
154 case 64:
155 oobregion->offset = 40;
156 break;
157 case 128:
158 oobregion->offset = 80;
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 oobregion->length = ecc->total;
165 if (oobregion->offset + oobregion->length > mtd->oobsize)
166 return -ERANGE;
167
168 return 0;
169}
170
171static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
172 struct mtd_oob_region *oobregion)
173{
174 struct nand_chip *chip = mtd_to_nand(mtd);
175 struct nand_ecc_ctrl *ecc = &chip->ecc;
176 int ecc_offset = 0;
177
178 if (section < 0 || section > 1)
179 return -ERANGE;
180
181 switch (mtd->oobsize) {
182 case 64:
183 ecc_offset = 40;
184 break;
185 case 128:
186 ecc_offset = 80;
187 break;
188 default:
189 return -EINVAL;
190 }
191
192 if (section == 0) {
193 oobregion->offset = 2;
194 oobregion->length = ecc_offset - 2;
195 } else {
196 oobregion->offset = ecc_offset + ecc->total;
197 oobregion->length = mtd->oobsize - oobregion->offset;
198 }
199
200 return 0;
201}
202
203static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
204 .ecc = nand_ooblayout_ecc_lp_hamming,
205 .free = nand_ooblayout_free_lp_hamming,
206};
207
208static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
209{
210 int ret = 0;
211
212
213 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
214 pr_debug("%s: unaligned address\n", __func__);
215 ret = -EINVAL;
216 }
217
218
219 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
220 pr_debug("%s: length not block aligned\n", __func__);
221 ret = -EINVAL;
222 }
223
224 return ret;
225}
226
227
228
229
230
231
232
233
234
235
236void nand_select_target(struct nand_chip *chip, unsigned int cs)
237{
238
239
240
241
242 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
243 return;
244
245 chip->cur_cs = cs;
246
247 if (chip->legacy.select_chip)
248 chip->legacy.select_chip(chip, cs);
249}
250EXPORT_SYMBOL_GPL(nand_select_target);
251
252
253
254
255
256
257
258
259void nand_deselect_target(struct nand_chip *chip)
260{
261 if (chip->legacy.select_chip)
262 chip->legacy.select_chip(chip, -1);
263
264 chip->cur_cs = -1;
265}
266EXPORT_SYMBOL_GPL(nand_deselect_target);
267
268
269
270
271
272
273
274static void nand_release_device(struct nand_chip *chip)
275{
276
277 mutex_unlock(&chip->controller->lock);
278 mutex_unlock(&chip->lock);
279}
280
281
282
283
284
285
286
287
288
289
290int nand_bbm_get_next_page(struct nand_chip *chip, int page)
291{
292 struct mtd_info *mtd = nand_to_mtd(chip);
293 int last_page = ((mtd->erasesize - mtd->writesize) >>
294 chip->page_shift) & chip->pagemask;
295
296 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
297 return 0;
298 else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
299 return 1;
300 else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
301 return last_page;
302
303 return -EINVAL;
304}
305
306
307
308
309
310
311
312
313static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
314{
315 int first_page, page_offset;
316 int res;
317 u8 bad;
318
319 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
320 page_offset = nand_bbm_get_next_page(chip, 0);
321
322 while (page_offset >= 0) {
323 res = chip->ecc.read_oob(chip, first_page + page_offset);
324 if (res < 0)
325 return res;
326
327 bad = chip->oob_poi[chip->badblockpos];
328
329 if (likely(chip->badblockbits == 8))
330 res = bad != 0xFF;
331 else
332 res = hweight8(bad) < chip->badblockbits;
333 if (res)
334 return res;
335
336 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
337 }
338
339 return 0;
340}
341
342static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
343{
344 if (chip->legacy.block_bad)
345 return chip->legacy.block_bad(chip, ofs);
346
347 return nand_block_bad(chip, ofs);
348}
349
350
351
352
353
354
355
356
357
358static int nand_get_device(struct nand_chip *chip)
359{
360 mutex_lock(&chip->lock);
361 if (chip->suspended) {
362 mutex_unlock(&chip->lock);
363 return -EBUSY;
364 }
365 mutex_lock(&chip->controller->lock);
366
367 return 0;
368}
369
370
371
372
373
374
375
376
377static int nand_check_wp(struct nand_chip *chip)
378{
379 u8 status;
380 int ret;
381
382
383 if (chip->options & NAND_BROKEN_XD)
384 return 0;
385
386
387 ret = nand_status_op(chip, &status);
388 if (ret)
389 return ret;
390
391 return status & NAND_STATUS_WP ? 0 : 1;
392}
393
394
395
396
397
398
399
400
401static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
402 struct mtd_oob_ops *ops)
403{
404 struct mtd_info *mtd = nand_to_mtd(chip);
405 int ret;
406
407
408
409
410
411 memset(chip->oob_poi, 0xff, mtd->oobsize);
412
413 switch (ops->mode) {
414
415 case MTD_OPS_PLACE_OOB:
416 case MTD_OPS_RAW:
417 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
418 return oob + len;
419
420 case MTD_OPS_AUTO_OOB:
421 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
422 ops->ooboffs, len);
423 BUG_ON(ret);
424 return oob + len;
425
426 default:
427 BUG();
428 }
429 return NULL;
430}
431
432
433
434
435
436
437
438
439
440static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
441 struct mtd_oob_ops *ops)
442{
443 struct mtd_info *mtd = nand_to_mtd(chip);
444 int chipnr, page, status, len, ret;
445
446 pr_debug("%s: to = 0x%08x, len = %i\n",
447 __func__, (unsigned int)to, (int)ops->ooblen);
448
449 len = mtd_oobavail(mtd, ops);
450
451
452 if ((ops->ooboffs + ops->ooblen) > len) {
453 pr_debug("%s: attempt to write past end of page\n",
454 __func__);
455 return -EINVAL;
456 }
457
458 chipnr = (int)(to >> chip->chip_shift);
459
460
461
462
463
464
465
466 ret = nand_reset(chip, chipnr);
467 if (ret)
468 return ret;
469
470 nand_select_target(chip, chipnr);
471
472
473 page = (int)(to >> chip->page_shift);
474
475
476 if (nand_check_wp(chip)) {
477 nand_deselect_target(chip);
478 return -EROFS;
479 }
480
481
482 if (page == chip->pagecache.page)
483 chip->pagecache.page = -1;
484
485 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
486
487 if (ops->mode == MTD_OPS_RAW)
488 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
489 else
490 status = chip->ecc.write_oob(chip, page & chip->pagemask);
491
492 nand_deselect_target(chip);
493
494 if (status)
495 return status;
496
497 ops->oobretlen = ops->ooblen;
498
499 return 0;
500}
501
502
503
504
505
506
507
508
509
510
511static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
512{
513 struct mtd_info *mtd = nand_to_mtd(chip);
514 struct mtd_oob_ops ops;
515 uint8_t buf[2] = { 0, 0 };
516 int ret = 0, res, page_offset;
517
518 memset(&ops, 0, sizeof(ops));
519 ops.oobbuf = buf;
520 ops.ooboffs = chip->badblockpos;
521 if (chip->options & NAND_BUSWIDTH_16) {
522 ops.ooboffs &= ~0x01;
523 ops.len = ops.ooblen = 2;
524 } else {
525 ops.len = ops.ooblen = 1;
526 }
527 ops.mode = MTD_OPS_PLACE_OOB;
528
529 page_offset = nand_bbm_get_next_page(chip, 0);
530
531 while (page_offset >= 0) {
532 res = nand_do_write_oob(chip,
533 ofs + (page_offset * mtd->writesize),
534 &ops);
535
536 if (!ret)
537 ret = res;
538
539 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
540 }
541
542 return ret;
543}
544
545
546
547
548
549
550int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
551{
552 if (chip->legacy.block_markbad)
553 return chip->legacy.block_markbad(chip, ofs);
554
555 return nand_default_block_markbad(chip, ofs);
556}
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
578{
579 struct mtd_info *mtd = nand_to_mtd(chip);
580 int res, ret = 0;
581
582 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
583 struct erase_info einfo;
584
585
586 memset(&einfo, 0, sizeof(einfo));
587 einfo.addr = ofs;
588 einfo.len = 1ULL << chip->phys_erase_shift;
589 nand_erase_nand(chip, &einfo, 0);
590
591
592 ret = nand_get_device(chip);
593 if (ret)
594 return ret;
595
596 ret = nand_markbad_bbm(chip, ofs);
597 nand_release_device(chip);
598 }
599
600
601 if (chip->bbt) {
602 res = nand_markbad_bbt(chip, ofs);
603 if (!ret)
604 ret = res;
605 }
606
607 if (!ret)
608 mtd->ecc_stats.badblocks++;
609
610 return ret;
611}
612
613
614
615
616
617
618
619
620static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
621{
622 struct nand_chip *chip = mtd_to_nand(mtd);
623
624 if (!chip->bbt)
625 return 0;
626
627 return nand_isreserved_bbt(chip, ofs);
628}
629
630
631
632
633
634
635
636
637
638
639static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
640{
641
642 if (chip->bbt)
643 return nand_isbad_bbt(chip, ofs, allowbbt);
644
645 return nand_isbad_bbm(chip, ofs);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
666{
667 const struct nand_sdr_timings *timings;
668 u8 status = 0;
669 int ret;
670
671 if (!nand_has_exec_op(chip))
672 return -ENOTSUPP;
673
674
675 timings = nand_get_sdr_timings(&chip->data_interface);
676 ndelay(PSEC_TO_NSEC(timings->tWB_max));
677
678 ret = nand_status_op(chip, NULL);
679 if (ret)
680 return ret;
681
682 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
683 do {
684 ret = nand_read_data_op(chip, &status, sizeof(status), true);
685 if (ret)
686 break;
687
688 if (status & NAND_STATUS_READY)
689 break;
690
691
692
693
694
695
696 udelay(10);
697 } while (time_before(jiffies, timeout_ms));
698
699
700
701
702
703
704 nand_exit_status_op(chip);
705
706 if (ret)
707 return ret;
708
709 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
710};
711EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
728 unsigned long timeout_ms)
729{
730
731 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
732 do {
733 if (gpiod_get_value_cansleep(gpiod))
734 return 0;
735
736 cond_resched();
737 } while (time_before(jiffies, timeout_ms));
738
739 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
740};
741EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
742
743
744
745
746
747
748
749
750
751
752void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
753{
754 int i;
755 for (i = 0; i < timeo; i++) {
756 if (chip->legacy.dev_ready) {
757 if (chip->legacy.dev_ready(chip))
758 break;
759 } else {
760 int ret;
761 u8 status;
762
763 ret = nand_read_data_op(chip, &status, sizeof(status),
764 true);
765 if (ret)
766 return;
767
768 if (status & NAND_STATUS_READY)
769 break;
770 }
771 mdelay(1);
772 }
773}
774
775static bool nand_supports_get_features(struct nand_chip *chip, int addr)
776{
777 return (chip->parameters.supports_set_get_features &&
778 test_bit(addr, chip->parameters.get_feature_list));
779}
780
781static bool nand_supports_set_features(struct nand_chip *chip, int addr)
782{
783 return (chip->parameters.supports_set_get_features &&
784 test_bit(addr, chip->parameters.set_feature_list));
785}
786
787
788
789
790
791
792
793
794
795
796static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
797{
798 int ret;
799
800 if (!nand_has_setup_data_iface(chip))
801 return 0;
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
818 ret = chip->controller->ops->setup_data_interface(chip, chipnr,
819 &chip->data_interface);
820 if (ret)
821 pr_err("Failed to configure data interface to SDR timing mode 0\n");
822
823 return ret;
824}
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
840{
841 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
842 chip->onfi_timing_mode_default,
843 };
844 int ret;
845
846 if (!nand_has_setup_data_iface(chip))
847 return 0;
848
849
850 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
851 nand_select_target(chip, chipnr);
852 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
853 tmode_param);
854 nand_deselect_target(chip);
855 if (ret)
856 return ret;
857 }
858
859
860 ret = chip->controller->ops->setup_data_interface(chip, chipnr,
861 &chip->data_interface);
862 if (ret)
863 return ret;
864
865
866 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
867 return 0;
868
869 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
870 nand_select_target(chip, chipnr);
871 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
872 tmode_param);
873 nand_deselect_target(chip);
874 if (ret)
875 goto err_reset_chip;
876
877 if (tmode_param[0] != chip->onfi_timing_mode_default) {
878 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
879 chip->onfi_timing_mode_default);
880 goto err_reset_chip;
881 }
882
883 return 0;
884
885err_reset_chip:
886
887
888
889
890 nand_reset_data_interface(chip, chipnr);
891 nand_select_target(chip, chipnr);
892 nand_reset_op(chip);
893 nand_deselect_target(chip);
894
895 return ret;
896}
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912static int nand_init_data_interface(struct nand_chip *chip)
913{
914 int modes, mode, ret;
915
916 if (!nand_has_setup_data_iface(chip))
917 return 0;
918
919
920
921
922
923
924 if (chip->parameters.onfi) {
925 modes = chip->parameters.onfi->async_timing_mode;
926 } else {
927 if (!chip->onfi_timing_mode_default)
928 return 0;
929
930 modes = GENMASK(chip->onfi_timing_mode_default, 0);
931 }
932
933 for (mode = fls(modes) - 1; mode >= 0; mode--) {
934 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
935 if (ret)
936 continue;
937
938
939
940
941
942 ret = chip->controller->ops->setup_data_interface(chip,
943 NAND_DATA_IFACE_CHECK_ONLY,
944 &chip->data_interface);
945 if (!ret) {
946 chip->onfi_timing_mode_default = mode;
947 break;
948 }
949 }
950
951 return 0;
952}
953
954
955
956
957
958
959
960
961
962
963
964
965
966static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
967 unsigned int offset_in_page)
968{
969 struct mtd_info *mtd = nand_to_mtd(chip);
970
971
972 if (offset_in_page > mtd->writesize + mtd->oobsize)
973 return -EINVAL;
974
975
976
977
978
979
980 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
981 offset_in_page -= mtd->writesize;
982
983
984
985
986
987 if (chip->options & NAND_BUSWIDTH_16) {
988 if (WARN_ON(offset_in_page % 2))
989 return -EINVAL;
990
991 offset_in_page /= 2;
992 }
993
994 addrs[0] = offset_in_page;
995
996
997
998
999
1000 if (mtd->writesize <= 512)
1001 return 1;
1002
1003 addrs[1] = offset_in_page >> 8;
1004
1005 return 2;
1006}
1007
1008static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1009 unsigned int offset_in_page, void *buf,
1010 unsigned int len)
1011{
1012 struct mtd_info *mtd = nand_to_mtd(chip);
1013 const struct nand_sdr_timings *sdr =
1014 nand_get_sdr_timings(&chip->data_interface);
1015 u8 addrs[4];
1016 struct nand_op_instr instrs[] = {
1017 NAND_OP_CMD(NAND_CMD_READ0, 0),
1018 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1019 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1020 PSEC_TO_NSEC(sdr->tRR_min)),
1021 NAND_OP_DATA_IN(len, buf, 0),
1022 };
1023 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1024 int ret;
1025
1026
1027 if (!len)
1028 op.ninstrs--;
1029
1030 if (offset_in_page >= mtd->writesize)
1031 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1032 else if (offset_in_page >= 256 &&
1033 !(chip->options & NAND_BUSWIDTH_16))
1034 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1035
1036 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1037 if (ret < 0)
1038 return ret;
1039
1040 addrs[1] = page;
1041 addrs[2] = page >> 8;
1042
1043 if (chip->options & NAND_ROW_ADDR_3) {
1044 addrs[3] = page >> 16;
1045 instrs[1].ctx.addr.naddrs++;
1046 }
1047
1048 return nand_exec_op(chip, &op);
1049}
1050
1051static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1052 unsigned int offset_in_page, void *buf,
1053 unsigned int len)
1054{
1055 const struct nand_sdr_timings *sdr =
1056 nand_get_sdr_timings(&chip->data_interface);
1057 u8 addrs[5];
1058 struct nand_op_instr instrs[] = {
1059 NAND_OP_CMD(NAND_CMD_READ0, 0),
1060 NAND_OP_ADDR(4, addrs, 0),
1061 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1062 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1063 PSEC_TO_NSEC(sdr->tRR_min)),
1064 NAND_OP_DATA_IN(len, buf, 0),
1065 };
1066 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1067 int ret;
1068
1069
1070 if (!len)
1071 op.ninstrs--;
1072
1073 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1074 if (ret < 0)
1075 return ret;
1076
1077 addrs[2] = page;
1078 addrs[3] = page >> 8;
1079
1080 if (chip->options & NAND_ROW_ADDR_3) {
1081 addrs[4] = page >> 16;
1082 instrs[1].ctx.addr.naddrs++;
1083 }
1084
1085 return nand_exec_op(chip, &op);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1102 unsigned int offset_in_page, void *buf, unsigned int len)
1103{
1104 struct mtd_info *mtd = nand_to_mtd(chip);
1105
1106 if (len && !buf)
1107 return -EINVAL;
1108
1109 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1110 return -EINVAL;
1111
1112 if (nand_has_exec_op(chip)) {
1113 if (mtd->writesize > 512)
1114 return nand_lp_exec_read_page_op(chip, page,
1115 offset_in_page, buf,
1116 len);
1117
1118 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1119 buf, len);
1120 }
1121
1122 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1123 if (len)
1124 chip->legacy.read_buf(chip, buf, len);
1125
1126 return 0;
1127}
1128EXPORT_SYMBOL_GPL(nand_read_page_op);
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1143 unsigned int len)
1144{
1145 unsigned int i;
1146 u8 *p = buf;
1147
1148 if (len && !buf)
1149 return -EINVAL;
1150
1151 if (nand_has_exec_op(chip)) {
1152 const struct nand_sdr_timings *sdr =
1153 nand_get_sdr_timings(&chip->data_interface);
1154 struct nand_op_instr instrs[] = {
1155 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1156 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1157 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1158 PSEC_TO_NSEC(sdr->tRR_min)),
1159 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1160 };
1161 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1162
1163
1164 if (!len)
1165 op.ninstrs--;
1166
1167 return nand_exec_op(chip, &op);
1168 }
1169
1170 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1171 for (i = 0; i < len; i++)
1172 p[i] = chip->legacy.read_byte(chip);
1173
1174 return 0;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190int nand_change_read_column_op(struct nand_chip *chip,
1191 unsigned int offset_in_page, void *buf,
1192 unsigned int len, bool force_8bit)
1193{
1194 struct mtd_info *mtd = nand_to_mtd(chip);
1195
1196 if (len && !buf)
1197 return -EINVAL;
1198
1199 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1200 return -EINVAL;
1201
1202
1203 if (mtd->writesize <= 512)
1204 return -ENOTSUPP;
1205
1206 if (nand_has_exec_op(chip)) {
1207 const struct nand_sdr_timings *sdr =
1208 nand_get_sdr_timings(&chip->data_interface);
1209 u8 addrs[2] = {};
1210 struct nand_op_instr instrs[] = {
1211 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1212 NAND_OP_ADDR(2, addrs, 0),
1213 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1214 PSEC_TO_NSEC(sdr->tCCS_min)),
1215 NAND_OP_DATA_IN(len, buf, 0),
1216 };
1217 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1218 int ret;
1219
1220 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1221 if (ret < 0)
1222 return ret;
1223
1224
1225 if (!len)
1226 op.ninstrs--;
1227
1228 instrs[3].ctx.data.force_8bit = force_8bit;
1229
1230 return nand_exec_op(chip, &op);
1231 }
1232
1233 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1234 if (len)
1235 chip->legacy.read_buf(chip, buf, len);
1236
1237 return 0;
1238}
1239EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1255 unsigned int offset_in_oob, void *buf, unsigned int len)
1256{
1257 struct mtd_info *mtd = nand_to_mtd(chip);
1258
1259 if (len && !buf)
1260 return -EINVAL;
1261
1262 if (offset_in_oob + len > mtd->oobsize)
1263 return -EINVAL;
1264
1265 if (nand_has_exec_op(chip))
1266 return nand_read_page_op(chip, page,
1267 mtd->writesize + offset_in_oob,
1268 buf, len);
1269
1270 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1271 if (len)
1272 chip->legacy.read_buf(chip, buf, len);
1273
1274 return 0;
1275}
1276EXPORT_SYMBOL_GPL(nand_read_oob_op);
1277
1278static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1279 unsigned int offset_in_page, const void *buf,
1280 unsigned int len, bool prog)
1281{
1282 struct mtd_info *mtd = nand_to_mtd(chip);
1283 const struct nand_sdr_timings *sdr =
1284 nand_get_sdr_timings(&chip->data_interface);
1285 u8 addrs[5] = {};
1286 struct nand_op_instr instrs[] = {
1287
1288
1289
1290
1291
1292 NAND_OP_CMD(NAND_CMD_READ0, 0),
1293 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1294 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1295 NAND_OP_DATA_OUT(len, buf, 0),
1296 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1297 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1298 };
1299 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1300 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1301 int ret;
1302 u8 status;
1303
1304 if (naddrs < 0)
1305 return naddrs;
1306
1307 addrs[naddrs++] = page;
1308 addrs[naddrs++] = page >> 8;
1309 if (chip->options & NAND_ROW_ADDR_3)
1310 addrs[naddrs++] = page >> 16;
1311
1312 instrs[2].ctx.addr.naddrs = naddrs;
1313
1314
1315 if (!prog) {
1316 op.ninstrs -= 2;
1317
1318 if (!len)
1319 op.ninstrs--;
1320 }
1321
1322 if (mtd->writesize <= 512) {
1323
1324
1325
1326
1327
1328 if (offset_in_page >= mtd->writesize)
1329 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1330 else if (offset_in_page >= 256 &&
1331 !(chip->options & NAND_BUSWIDTH_16))
1332 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1333 } else {
1334
1335
1336
1337
1338 op.instrs++;
1339 op.ninstrs--;
1340 }
1341
1342 ret = nand_exec_op(chip, &op);
1343 if (!prog || ret)
1344 return ret;
1345
1346 ret = nand_status_op(chip, &status);
1347 if (ret)
1348 return ret;
1349
1350 return status;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1367 unsigned int offset_in_page, const void *buf,
1368 unsigned int len)
1369{
1370 struct mtd_info *mtd = nand_to_mtd(chip);
1371
1372 if (len && !buf)
1373 return -EINVAL;
1374
1375 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1376 return -EINVAL;
1377
1378 if (nand_has_exec_op(chip))
1379 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1380 len, false);
1381
1382 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1383
1384 if (buf)
1385 chip->legacy.write_buf(chip, buf, len);
1386
1387 return 0;
1388}
1389EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400int nand_prog_page_end_op(struct nand_chip *chip)
1401{
1402 int ret;
1403 u8 status;
1404
1405 if (nand_has_exec_op(chip)) {
1406 const struct nand_sdr_timings *sdr =
1407 nand_get_sdr_timings(&chip->data_interface);
1408 struct nand_op_instr instrs[] = {
1409 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1410 PSEC_TO_NSEC(sdr->tWB_max)),
1411 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1412 };
1413 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1414
1415 ret = nand_exec_op(chip, &op);
1416 if (ret)
1417 return ret;
1418
1419 ret = nand_status_op(chip, &status);
1420 if (ret)
1421 return ret;
1422 } else {
1423 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1424 ret = chip->legacy.waitfunc(chip);
1425 if (ret < 0)
1426 return ret;
1427
1428 status = ret;
1429 }
1430
1431 if (status & NAND_STATUS_FAIL)
1432 return -EIO;
1433
1434 return 0;
1435}
1436EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1452 unsigned int offset_in_page, const void *buf,
1453 unsigned int len)
1454{
1455 struct mtd_info *mtd = nand_to_mtd(chip);
1456 int status;
1457
1458 if (!len || !buf)
1459 return -EINVAL;
1460
1461 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1462 return -EINVAL;
1463
1464 if (nand_has_exec_op(chip)) {
1465 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1466 len, true);
1467 } else {
1468 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1469 page);
1470 chip->legacy.write_buf(chip, buf, len);
1471 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1472 status = chip->legacy.waitfunc(chip);
1473 }
1474
1475 if (status & NAND_STATUS_FAIL)
1476 return -EIO;
1477
1478 return 0;
1479}
1480EXPORT_SYMBOL_GPL(nand_prog_page_op);
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495int nand_change_write_column_op(struct nand_chip *chip,
1496 unsigned int offset_in_page,
1497 const void *buf, unsigned int len,
1498 bool force_8bit)
1499{
1500 struct mtd_info *mtd = nand_to_mtd(chip);
1501
1502 if (len && !buf)
1503 return -EINVAL;
1504
1505 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1506 return -EINVAL;
1507
1508
1509 if (mtd->writesize <= 512)
1510 return -ENOTSUPP;
1511
1512 if (nand_has_exec_op(chip)) {
1513 const struct nand_sdr_timings *sdr =
1514 nand_get_sdr_timings(&chip->data_interface);
1515 u8 addrs[2];
1516 struct nand_op_instr instrs[] = {
1517 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1518 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1519 NAND_OP_DATA_OUT(len, buf, 0),
1520 };
1521 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1522 int ret;
1523
1524 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1525 if (ret < 0)
1526 return ret;
1527
1528 instrs[2].ctx.data.force_8bit = force_8bit;
1529
1530
1531 if (!len)
1532 op.ninstrs--;
1533
1534 return nand_exec_op(chip, &op);
1535 }
1536
1537 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1538 if (len)
1539 chip->legacy.write_buf(chip, buf, len);
1540
1541 return 0;
1542}
1543EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1559 unsigned int len)
1560{
1561 unsigned int i;
1562 u8 *id = buf;
1563
1564 if (len && !buf)
1565 return -EINVAL;
1566
1567 if (nand_has_exec_op(chip)) {
1568 const struct nand_sdr_timings *sdr =
1569 nand_get_sdr_timings(&chip->data_interface);
1570 struct nand_op_instr instrs[] = {
1571 NAND_OP_CMD(NAND_CMD_READID, 0),
1572 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1573 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1574 };
1575 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1576
1577
1578 if (!len)
1579 op.ninstrs--;
1580
1581 return nand_exec_op(chip, &op);
1582 }
1583
1584 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1585
1586 for (i = 0; i < len; i++)
1587 id[i] = chip->legacy.read_byte(chip);
1588
1589 return 0;
1590}
1591EXPORT_SYMBOL_GPL(nand_readid_op);
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604int nand_status_op(struct nand_chip *chip, u8 *status)
1605{
1606 if (nand_has_exec_op(chip)) {
1607 const struct nand_sdr_timings *sdr =
1608 nand_get_sdr_timings(&chip->data_interface);
1609 struct nand_op_instr instrs[] = {
1610 NAND_OP_CMD(NAND_CMD_STATUS,
1611 PSEC_TO_NSEC(sdr->tADL_min)),
1612 NAND_OP_8BIT_DATA_IN(1, status, 0),
1613 };
1614 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1615
1616 if (!status)
1617 op.ninstrs--;
1618
1619 return nand_exec_op(chip, &op);
1620 }
1621
1622 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1623 if (status)
1624 *status = chip->legacy.read_byte(chip);
1625
1626 return 0;
1627}
1628EXPORT_SYMBOL_GPL(nand_status_op);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641int nand_exit_status_op(struct nand_chip *chip)
1642{
1643 if (nand_has_exec_op(chip)) {
1644 struct nand_op_instr instrs[] = {
1645 NAND_OP_CMD(NAND_CMD_READ0, 0),
1646 };
1647 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1648
1649 return nand_exec_op(chip, &op);
1650 }
1651
1652 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1653
1654 return 0;
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1669{
1670 unsigned int page = eraseblock <<
1671 (chip->phys_erase_shift - chip->page_shift);
1672 int ret;
1673 u8 status;
1674
1675 if (nand_has_exec_op(chip)) {
1676 const struct nand_sdr_timings *sdr =
1677 nand_get_sdr_timings(&chip->data_interface);
1678 u8 addrs[3] = { page, page >> 8, page >> 16 };
1679 struct nand_op_instr instrs[] = {
1680 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1681 NAND_OP_ADDR(2, addrs, 0),
1682 NAND_OP_CMD(NAND_CMD_ERASE2,
1683 PSEC_TO_MSEC(sdr->tWB_max)),
1684 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1685 };
1686 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1687
1688 if (chip->options & NAND_ROW_ADDR_3)
1689 instrs[1].ctx.addr.naddrs++;
1690
1691 ret = nand_exec_op(chip, &op);
1692 if (ret)
1693 return ret;
1694
1695 ret = nand_status_op(chip, &status);
1696 if (ret)
1697 return ret;
1698 } else {
1699 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1700 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1701
1702 ret = chip->legacy.waitfunc(chip);
1703 if (ret < 0)
1704 return ret;
1705
1706 status = ret;
1707 }
1708
1709 if (status & NAND_STATUS_FAIL)
1710 return -EIO;
1711
1712 return 0;
1713}
1714EXPORT_SYMBOL_GPL(nand_erase_op);
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1729 const void *data)
1730{
1731 const u8 *params = data;
1732 int i, ret;
1733
1734 if (nand_has_exec_op(chip)) {
1735 const struct nand_sdr_timings *sdr =
1736 nand_get_sdr_timings(&chip->data_interface);
1737 struct nand_op_instr instrs[] = {
1738 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1739 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1740 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1741 PSEC_TO_NSEC(sdr->tWB_max)),
1742 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1743 };
1744 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1745
1746 return nand_exec_op(chip, &op);
1747 }
1748
1749 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1750 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1751 chip->legacy.write_byte(chip, params[i]);
1752
1753 ret = chip->legacy.waitfunc(chip);
1754 if (ret < 0)
1755 return ret;
1756
1757 if (ret & NAND_STATUS_FAIL)
1758 return -EIO;
1759
1760 return 0;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1776 void *data)
1777{
1778 u8 *params = data;
1779 int i;
1780
1781 if (nand_has_exec_op(chip)) {
1782 const struct nand_sdr_timings *sdr =
1783 nand_get_sdr_timings(&chip->data_interface);
1784 struct nand_op_instr instrs[] = {
1785 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1786 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1787 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1788 PSEC_TO_NSEC(sdr->tRR_min)),
1789 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1790 data, 0),
1791 };
1792 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1793
1794 return nand_exec_op(chip, &op);
1795 }
1796
1797 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1798 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1799 params[i] = chip->legacy.read_byte(chip);
1800
1801 return 0;
1802}
1803
1804static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1805 unsigned int delay_ns)
1806{
1807 if (nand_has_exec_op(chip)) {
1808 struct nand_op_instr instrs[] = {
1809 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1810 PSEC_TO_NSEC(delay_ns)),
1811 };
1812 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1813
1814 return nand_exec_op(chip, &op);
1815 }
1816
1817
1818 if (!chip->legacy.dev_ready)
1819 udelay(chip->legacy.chip_delay);
1820 else
1821 nand_wait_ready(chip);
1822
1823 return 0;
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836int nand_reset_op(struct nand_chip *chip)
1837{
1838 if (nand_has_exec_op(chip)) {
1839 const struct nand_sdr_timings *sdr =
1840 nand_get_sdr_timings(&chip->data_interface);
1841 struct nand_op_instr instrs[] = {
1842 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1843 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1844 };
1845 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1846
1847 return nand_exec_op(chip, &op);
1848 }
1849
1850 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1851
1852 return 0;
1853}
1854EXPORT_SYMBOL_GPL(nand_reset_op);
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1870 bool force_8bit)
1871{
1872 if (!len || !buf)
1873 return -EINVAL;
1874
1875 if (nand_has_exec_op(chip)) {
1876 struct nand_op_instr instrs[] = {
1877 NAND_OP_DATA_IN(len, buf, 0),
1878 };
1879 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1880
1881 instrs[0].ctx.data.force_8bit = force_8bit;
1882
1883 return nand_exec_op(chip, &op);
1884 }
1885
1886 if (force_8bit) {
1887 u8 *p = buf;
1888 unsigned int i;
1889
1890 for (i = 0; i < len; i++)
1891 p[i] = chip->legacy.read_byte(chip);
1892 } else {
1893 chip->legacy.read_buf(chip, buf, len);
1894 }
1895
1896 return 0;
1897}
1898EXPORT_SYMBOL_GPL(nand_read_data_op);
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913int nand_write_data_op(struct nand_chip *chip, const void *buf,
1914 unsigned int len, bool force_8bit)
1915{
1916 if (!len || !buf)
1917 return -EINVAL;
1918
1919 if (nand_has_exec_op(chip)) {
1920 struct nand_op_instr instrs[] = {
1921 NAND_OP_DATA_OUT(len, buf, 0),
1922 };
1923 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1924
1925 instrs[0].ctx.data.force_8bit = force_8bit;
1926
1927 return nand_exec_op(chip, &op);
1928 }
1929
1930 if (force_8bit) {
1931 const u8 *p = buf;
1932 unsigned int i;
1933
1934 for (i = 0; i < len; i++)
1935 chip->legacy.write_byte(chip, p[i]);
1936 } else {
1937 chip->legacy.write_buf(chip, buf, len);
1938 }
1939
1940 return 0;
1941}
1942EXPORT_SYMBOL_GPL(nand_write_data_op);
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953struct nand_op_parser_ctx {
1954 const struct nand_op_instr *instrs;
1955 unsigned int ninstrs;
1956 struct nand_subop subop;
1957};
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979static bool
1980nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1981 const struct nand_op_instr *instr,
1982 unsigned int *start_offset)
1983{
1984 switch (pat->type) {
1985 case NAND_OP_ADDR_INSTR:
1986 if (!pat->ctx.addr.maxcycles)
1987 break;
1988
1989 if (instr->ctx.addr.naddrs - *start_offset >
1990 pat->ctx.addr.maxcycles) {
1991 *start_offset += pat->ctx.addr.maxcycles;
1992 return true;
1993 }
1994 break;
1995
1996 case NAND_OP_DATA_IN_INSTR:
1997 case NAND_OP_DATA_OUT_INSTR:
1998 if (!pat->ctx.data.maxlen)
1999 break;
2000
2001 if (instr->ctx.data.len - *start_offset >
2002 pat->ctx.data.maxlen) {
2003 *start_offset += pat->ctx.data.maxlen;
2004 return true;
2005 }
2006 break;
2007
2008 default:
2009 break;
2010 }
2011
2012 return false;
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026static bool
2027nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2028 struct nand_op_parser_ctx *ctx)
2029{
2030 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2031 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2032 const struct nand_op_instr *instr = ctx->subop.instrs;
2033 unsigned int i, ninstrs;
2034
2035 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2036
2037
2038
2039
2040
2041
2042
2043 if (instr->type != pat->elems[i].type) {
2044 if (!pat->elems[i].optional)
2045 return false;
2046
2047 continue;
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2059 &instr_offset)) {
2060 ninstrs++;
2061 i++;
2062 break;
2063 }
2064
2065 instr++;
2066 ninstrs++;
2067 instr_offset = 0;
2068 }
2069
2070
2071
2072
2073
2074
2075
2076 if (!ninstrs)
2077 return false;
2078
2079
2080
2081
2082
2083
2084 for (; i < pat->nelems; i++) {
2085 if (!pat->elems[i].optional)
2086 return false;
2087 }
2088
2089
2090
2091
2092
2093 ctx->subop.ninstrs = ninstrs;
2094 ctx->subop.last_instr_end_off = instr_offset;
2095
2096 return true;
2097}
2098
2099#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2100static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2101{
2102 const struct nand_op_instr *instr;
2103 char *prefix = " ";
2104 unsigned int i;
2105
2106 pr_debug("executing subop:\n");
2107
2108 for (i = 0; i < ctx->ninstrs; i++) {
2109 instr = &ctx->instrs[i];
2110
2111 if (instr == &ctx->subop.instrs[0])
2112 prefix = " ->";
2113
2114 nand_op_trace(prefix, instr);
2115
2116 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2117 prefix = " ";
2118 }
2119}
2120#else
2121static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2122{
2123
2124}
2125#endif
2126
2127static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2128 const struct nand_op_parser_ctx *b)
2129{
2130 if (a->subop.ninstrs < b->subop.ninstrs)
2131 return -1;
2132 else if (a->subop.ninstrs > b->subop.ninstrs)
2133 return 1;
2134
2135 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2136 return -1;
2137 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2138 return 1;
2139
2140 return 0;
2141}
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165int nand_op_parser_exec_op(struct nand_chip *chip,
2166 const struct nand_op_parser *parser,
2167 const struct nand_operation *op, bool check_only)
2168{
2169 struct nand_op_parser_ctx ctx = {
2170 .subop.instrs = op->instrs,
2171 .instrs = op->instrs,
2172 .ninstrs = op->ninstrs,
2173 };
2174 unsigned int i;
2175
2176 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2177 const struct nand_op_parser_pattern *pattern;
2178 struct nand_op_parser_ctx best_ctx;
2179 int ret, best_pattern = -1;
2180
2181 for (i = 0; i < parser->npatterns; i++) {
2182 struct nand_op_parser_ctx test_ctx = ctx;
2183
2184 pattern = &parser->patterns[i];
2185 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2186 continue;
2187
2188 if (best_pattern >= 0 &&
2189 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2190 continue;
2191
2192 best_pattern = i;
2193 best_ctx = test_ctx;
2194 }
2195
2196 if (best_pattern < 0) {
2197 pr_debug("->exec_op() parser: pattern not found!\n");
2198 return -ENOTSUPP;
2199 }
2200
2201 ctx = best_ctx;
2202 nand_op_parser_trace(&ctx);
2203
2204 if (!check_only) {
2205 pattern = &parser->patterns[best_pattern];
2206 ret = pattern->exec(chip, &ctx.subop);
2207 if (ret)
2208 return ret;
2209 }
2210
2211
2212
2213
2214
2215 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2216 if (ctx.subop.last_instr_end_off)
2217 ctx.subop.instrs -= 1;
2218
2219 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2220 }
2221
2222 return 0;
2223}
2224EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2225
2226static bool nand_instr_is_data(const struct nand_op_instr *instr)
2227{
2228 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2229 instr->type == NAND_OP_DATA_OUT_INSTR);
2230}
2231
2232static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2233 unsigned int instr_idx)
2234{
2235 return subop && instr_idx < subop->ninstrs;
2236}
2237
2238static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2239 unsigned int instr_idx)
2240{
2241 if (instr_idx)
2242 return 0;
2243
2244 return subop->first_instr_start_off;
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2259 unsigned int instr_idx)
2260{
2261 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2262 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2263 return 0;
2264
2265 return nand_subop_get_start_off(subop, instr_idx);
2266}
2267EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2281 unsigned int instr_idx)
2282{
2283 int start_off, end_off;
2284
2285 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2286 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2287 return 0;
2288
2289 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2290
2291 if (instr_idx == subop->ninstrs - 1 &&
2292 subop->last_instr_end_off)
2293 end_off = subop->last_instr_end_off;
2294 else
2295 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2296
2297 return end_off - start_off;
2298}
2299EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2313 unsigned int instr_idx)
2314{
2315 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2316 !nand_instr_is_data(&subop->instrs[instr_idx])))
2317 return 0;
2318
2319 return nand_subop_get_start_off(subop, instr_idx);
2320}
2321EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2335 unsigned int instr_idx)
2336{
2337 int start_off = 0, end_off;
2338
2339 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2340 !nand_instr_is_data(&subop->instrs[instr_idx])))
2341 return 0;
2342
2343 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2344
2345 if (instr_idx == subop->ninstrs - 1 &&
2346 subop->last_instr_end_off)
2347 end_off = subop->last_instr_end_off;
2348 else
2349 end_off = subop->instrs[instr_idx].ctx.data.len;
2350
2351 return end_off - start_off;
2352}
2353EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366int nand_reset(struct nand_chip *chip, int chipnr)
2367{
2368 struct nand_data_interface saved_data_intf = chip->data_interface;
2369 int ret;
2370
2371 ret = nand_reset_data_interface(chip, chipnr);
2372 if (ret)
2373 return ret;
2374
2375
2376
2377
2378
2379
2380 nand_select_target(chip, chipnr);
2381 ret = nand_reset_op(chip);
2382 nand_deselect_target(chip);
2383 if (ret)
2384 return ret;
2385
2386
2387
2388
2389
2390
2391
2392
2393 if (!chip->onfi_timing_mode_default)
2394 return 0;
2395
2396 chip->data_interface = saved_data_intf;
2397 ret = nand_setup_data_interface(chip, chipnr);
2398 if (ret)
2399 return ret;
2400
2401 return 0;
2402}
2403EXPORT_SYMBOL_GPL(nand_reset);
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414int nand_get_features(struct nand_chip *chip, int addr,
2415 u8 *subfeature_param)
2416{
2417 if (!nand_supports_get_features(chip, addr))
2418 return -ENOTSUPP;
2419
2420 if (chip->legacy.get_features)
2421 return chip->legacy.get_features(chip, addr, subfeature_param);
2422
2423 return nand_get_features_op(chip, addr, subfeature_param);
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435int nand_set_features(struct nand_chip *chip, int addr,
2436 u8 *subfeature_param)
2437{
2438 if (!nand_supports_set_features(chip, addr))
2439 return -ENOTSUPP;
2440
2441 if (chip->legacy.set_features)
2442 return chip->legacy.set_features(chip, addr, subfeature_param);
2443
2444 return nand_set_features_op(chip, addr, subfeature_param);
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2467{
2468 const unsigned char *bitmap = buf;
2469 int bitflips = 0;
2470 int weight;
2471
2472 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2473 len--, bitmap++) {
2474 weight = hweight8(*bitmap);
2475 bitflips += BITS_PER_BYTE - weight;
2476 if (unlikely(bitflips > bitflips_threshold))
2477 return -EBADMSG;
2478 }
2479
2480 for (; len >= sizeof(long);
2481 len -= sizeof(long), bitmap += sizeof(long)) {
2482 unsigned long d = *((unsigned long *)bitmap);
2483 if (d == ~0UL)
2484 continue;
2485 weight = hweight_long(d);
2486 bitflips += BITS_PER_LONG - weight;
2487 if (unlikely(bitflips > bitflips_threshold))
2488 return -EBADMSG;
2489 }
2490
2491 for (; len > 0; len--, bitmap++) {
2492 weight = hweight8(*bitmap);
2493 bitflips += BITS_PER_BYTE - weight;
2494 if (unlikely(bitflips > bitflips_threshold))
2495 return -EBADMSG;
2496 }
2497
2498 return bitflips;
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540int nand_check_erased_ecc_chunk(void *data, int datalen,
2541 void *ecc, int ecclen,
2542 void *extraoob, int extraooblen,
2543 int bitflips_threshold)
2544{
2545 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2546
2547 data_bitflips = nand_check_erased_buf(data, datalen,
2548 bitflips_threshold);
2549 if (data_bitflips < 0)
2550 return data_bitflips;
2551
2552 bitflips_threshold -= data_bitflips;
2553
2554 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2555 if (ecc_bitflips < 0)
2556 return ecc_bitflips;
2557
2558 bitflips_threshold -= ecc_bitflips;
2559
2560 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2561 bitflips_threshold);
2562 if (extraoob_bitflips < 0)
2563 return extraoob_bitflips;
2564
2565 if (data_bitflips)
2566 memset(data, 0xff, datalen);
2567
2568 if (ecc_bitflips)
2569 memset(ecc, 0xff, ecclen);
2570
2571 if (extraoob_bitflips)
2572 memset(extraoob, 0xff, extraooblen);
2573
2574 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2575}
2576EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2588 int oob_required, int page)
2589{
2590 return -ENOTSUPP;
2591}
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2603 int page)
2604{
2605 struct mtd_info *mtd = nand_to_mtd(chip);
2606 int ret;
2607
2608 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2609 if (ret)
2610 return ret;
2611
2612 if (oob_required) {
2613 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2614 false);
2615 if (ret)
2616 return ret;
2617 }
2618
2619 return 0;
2620}
2621EXPORT_SYMBOL(nand_read_page_raw);
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2633 int oob_required, int page)
2634{
2635 struct mtd_info *mtd = nand_to_mtd(chip);
2636 int eccsize = chip->ecc.size;
2637 int eccbytes = chip->ecc.bytes;
2638 uint8_t *oob = chip->oob_poi;
2639 int steps, size, ret;
2640
2641 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2642 if (ret)
2643 return ret;
2644
2645 for (steps = chip->ecc.steps; steps > 0; steps--) {
2646 ret = nand_read_data_op(chip, buf, eccsize, false);
2647 if (ret)
2648 return ret;
2649
2650 buf += eccsize;
2651
2652 if (chip->ecc.prepad) {
2653 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2654 false);
2655 if (ret)
2656 return ret;
2657
2658 oob += chip->ecc.prepad;
2659 }
2660
2661 ret = nand_read_data_op(chip, oob, eccbytes, false);
2662 if (ret)
2663 return ret;
2664
2665 oob += eccbytes;
2666
2667 if (chip->ecc.postpad) {
2668 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2669 false);
2670 if (ret)
2671 return ret;
2672
2673 oob += chip->ecc.postpad;
2674 }
2675 }
2676
2677 size = mtd->oobsize - (oob - chip->oob_poi);
2678 if (size) {
2679 ret = nand_read_data_op(chip, oob, size, false);
2680 if (ret)
2681 return ret;
2682 }
2683
2684 return 0;
2685}
2686
2687
2688
2689
2690
2691
2692
2693
2694static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2695 int oob_required, int page)
2696{
2697 struct mtd_info *mtd = nand_to_mtd(chip);
2698 int i, eccsize = chip->ecc.size, ret;
2699 int eccbytes = chip->ecc.bytes;
2700 int eccsteps = chip->ecc.steps;
2701 uint8_t *p = buf;
2702 uint8_t *ecc_calc = chip->ecc.calc_buf;
2703 uint8_t *ecc_code = chip->ecc.code_buf;
2704 unsigned int max_bitflips = 0;
2705
2706 chip->ecc.read_page_raw(chip, buf, 1, page);
2707
2708 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2709 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2710
2711 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2712 chip->ecc.total);
2713 if (ret)
2714 return ret;
2715
2716 eccsteps = chip->ecc.steps;
2717 p = buf;
2718
2719 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2720 int stat;
2721
2722 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2723 if (stat < 0) {
2724 mtd->ecc_stats.failed++;
2725 } else {
2726 mtd->ecc_stats.corrected += stat;
2727 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2728 }
2729 }
2730 return max_bitflips;
2731}
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2742 uint32_t readlen, uint8_t *bufpoi, int page)
2743{
2744 struct mtd_info *mtd = nand_to_mtd(chip);
2745 int start_step, end_step, num_steps, ret;
2746 uint8_t *p;
2747 int data_col_addr, i, gaps = 0;
2748 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2749 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2750 int index, section = 0;
2751 unsigned int max_bitflips = 0;
2752 struct mtd_oob_region oobregion = { };
2753
2754
2755 start_step = data_offs / chip->ecc.size;
2756 end_step = (data_offs + readlen - 1) / chip->ecc.size;
2757 num_steps = end_step - start_step + 1;
2758 index = start_step * chip->ecc.bytes;
2759
2760
2761 datafrag_len = num_steps * chip->ecc.size;
2762 eccfrag_len = num_steps * chip->ecc.bytes;
2763
2764 data_col_addr = start_step * chip->ecc.size;
2765
2766 p = bufpoi + data_col_addr;
2767 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2768 if (ret)
2769 return ret;
2770
2771
2772 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2773 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2774
2775
2776
2777
2778
2779 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
2780 if (ret)
2781 return ret;
2782
2783 if (oobregion.length < eccfrag_len)
2784 gaps = 1;
2785
2786 if (gaps) {
2787 ret = nand_change_read_column_op(chip, mtd->writesize,
2788 chip->oob_poi, mtd->oobsize,
2789 false);
2790 if (ret)
2791 return ret;
2792 } else {
2793
2794
2795
2796
2797 aligned_pos = oobregion.offset & ~(busw - 1);
2798 aligned_len = eccfrag_len;
2799 if (oobregion.offset & (busw - 1))
2800 aligned_len++;
2801 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2802 (busw - 1))
2803 aligned_len++;
2804
2805 ret = nand_change_read_column_op(chip,
2806 mtd->writesize + aligned_pos,
2807 &chip->oob_poi[aligned_pos],
2808 aligned_len, false);
2809 if (ret)
2810 return ret;
2811 }
2812
2813 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2814 chip->oob_poi, index, eccfrag_len);
2815 if (ret)
2816 return ret;
2817
2818 p = bufpoi + data_col_addr;
2819 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2820 int stat;
2821
2822 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2823 &chip->ecc.calc_buf[i]);
2824 if (stat == -EBADMSG &&
2825 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2826
2827 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2828 &chip->ecc.code_buf[i],
2829 chip->ecc.bytes,
2830 NULL, 0,
2831 chip->ecc.strength);
2832 }
2833
2834 if (stat < 0) {
2835 mtd->ecc_stats.failed++;
2836 } else {
2837 mtd->ecc_stats.corrected += stat;
2838 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2839 }
2840 }
2841 return max_bitflips;
2842}
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2854 int oob_required, int page)
2855{
2856 struct mtd_info *mtd = nand_to_mtd(chip);
2857 int i, eccsize = chip->ecc.size, ret;
2858 int eccbytes = chip->ecc.bytes;
2859 int eccsteps = chip->ecc.steps;
2860 uint8_t *p = buf;
2861 uint8_t *ecc_calc = chip->ecc.calc_buf;
2862 uint8_t *ecc_code = chip->ecc.code_buf;
2863 unsigned int max_bitflips = 0;
2864
2865 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2866 if (ret)
2867 return ret;
2868
2869 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2870 chip->ecc.hwctl(chip, NAND_ECC_READ);
2871
2872 ret = nand_read_data_op(chip, p, eccsize, false);
2873 if (ret)
2874 return ret;
2875
2876 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2877 }
2878
2879 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2880 if (ret)
2881 return ret;
2882
2883 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2884 chip->ecc.total);
2885 if (ret)
2886 return ret;
2887
2888 eccsteps = chip->ecc.steps;
2889 p = buf;
2890
2891 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2892 int stat;
2893
2894 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2895 if (stat == -EBADMSG &&
2896 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2897
2898 stat = nand_check_erased_ecc_chunk(p, eccsize,
2899 &ecc_code[i], eccbytes,
2900 NULL, 0,
2901 chip->ecc.strength);
2902 }
2903
2904 if (stat < 0) {
2905 mtd->ecc_stats.failed++;
2906 } else {
2907 mtd->ecc_stats.corrected += stat;
2908 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2909 }
2910 }
2911 return max_bitflips;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
2928 int oob_required, int page)
2929{
2930 struct mtd_info *mtd = nand_to_mtd(chip);
2931 int i, eccsize = chip->ecc.size, ret;
2932 int eccbytes = chip->ecc.bytes;
2933 int eccsteps = chip->ecc.steps;
2934 uint8_t *p = buf;
2935 uint8_t *ecc_code = chip->ecc.code_buf;
2936 uint8_t *ecc_calc = chip->ecc.calc_buf;
2937 unsigned int max_bitflips = 0;
2938
2939
2940 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2941 if (ret)
2942 return ret;
2943
2944 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2945 if (ret)
2946 return ret;
2947
2948 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2949 chip->ecc.total);
2950 if (ret)
2951 return ret;
2952
2953 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2954 int stat;
2955
2956 chip->ecc.hwctl(chip, NAND_ECC_READ);
2957
2958 ret = nand_read_data_op(chip, p, eccsize, false);
2959 if (ret)
2960 return ret;
2961
2962 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2963
2964 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
2965 if (stat == -EBADMSG &&
2966 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2967
2968 stat = nand_check_erased_ecc_chunk(p, eccsize,
2969 &ecc_code[i], eccbytes,
2970 NULL, 0,
2971 chip->ecc.strength);
2972 }
2973
2974 if (stat < 0) {
2975 mtd->ecc_stats.failed++;
2976 } else {
2977 mtd->ecc_stats.corrected += stat;
2978 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2979 }
2980 }
2981 return max_bitflips;
2982}
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2995 int oob_required, int page)
2996{
2997 struct mtd_info *mtd = nand_to_mtd(chip);
2998 int ret, i, eccsize = chip->ecc.size;
2999 int eccbytes = chip->ecc.bytes;
3000 int eccsteps = chip->ecc.steps;
3001 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3002 uint8_t *p = buf;
3003 uint8_t *oob = chip->oob_poi;
3004 unsigned int max_bitflips = 0;
3005
3006 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3007 if (ret)
3008 return ret;
3009
3010 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3011 int stat;
3012
3013 chip->ecc.hwctl(chip, NAND_ECC_READ);
3014
3015 ret = nand_read_data_op(chip, p, eccsize, false);
3016 if (ret)
3017 return ret;
3018
3019 if (chip->ecc.prepad) {
3020 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3021 false);
3022 if (ret)
3023 return ret;
3024
3025 oob += chip->ecc.prepad;
3026 }
3027
3028 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3029
3030 ret = nand_read_data_op(chip, oob, eccbytes, false);
3031 if (ret)
3032 return ret;
3033
3034 stat = chip->ecc.correct(chip, p, oob, NULL);
3035
3036 oob += eccbytes;
3037
3038 if (chip->ecc.postpad) {
3039 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3040 false);
3041 if (ret)
3042 return ret;
3043
3044 oob += chip->ecc.postpad;
3045 }
3046
3047 if (stat == -EBADMSG &&
3048 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3049
3050 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3051 oob - eccpadbytes,
3052 eccpadbytes,
3053 NULL, 0,
3054 chip->ecc.strength);
3055 }
3056
3057 if (stat < 0) {
3058 mtd->ecc_stats.failed++;
3059 } else {
3060 mtd->ecc_stats.corrected += stat;
3061 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3062 }
3063 }
3064
3065
3066 i = mtd->oobsize - (oob - chip->oob_poi);
3067 if (i) {
3068 ret = nand_read_data_op(chip, oob, i, false);
3069 if (ret)
3070 return ret;
3071 }
3072
3073 return max_bitflips;
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3084 struct mtd_oob_ops *ops, size_t len)
3085{
3086 struct mtd_info *mtd = nand_to_mtd(chip);
3087 int ret;
3088
3089 switch (ops->mode) {
3090
3091 case MTD_OPS_PLACE_OOB:
3092 case MTD_OPS_RAW:
3093 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3094 return oob + len;
3095
3096 case MTD_OPS_AUTO_OOB:
3097 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3098 ops->ooboffs, len);
3099 BUG_ON(ret);
3100 return oob + len;
3101
3102 default:
3103 BUG();
3104 }
3105 return NULL;
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3118{
3119 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3120
3121 if (retry_mode >= chip->read_retries)
3122 return -EINVAL;
3123
3124 if (!chip->setup_read_retry)
3125 return -EOPNOTSUPP;
3126
3127 return chip->setup_read_retry(chip, retry_mode);
3128}
3129
3130static void nand_wait_readrdy(struct nand_chip *chip)
3131{
3132 const struct nand_sdr_timings *sdr;
3133
3134 if (!(chip->options & NAND_NEED_READRDY))
3135 return;
3136
3137 sdr = nand_get_sdr_timings(&chip->data_interface);
3138 WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3139}
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3150 struct mtd_oob_ops *ops)
3151{
3152 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3153 struct mtd_info *mtd = nand_to_mtd(chip);
3154 int ret = 0;
3155 uint32_t readlen = ops->len;
3156 uint32_t oobreadlen = ops->ooblen;
3157 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3158
3159 uint8_t *bufpoi, *oob, *buf;
3160 int use_bufpoi;
3161 unsigned int max_bitflips = 0;
3162 int retry_mode = 0;
3163 bool ecc_fail = false;
3164
3165 chipnr = (int)(from >> chip->chip_shift);
3166 nand_select_target(chip, chipnr);
3167
3168 realpage = (int)(from >> chip->page_shift);
3169 page = realpage & chip->pagemask;
3170
3171 col = (int)(from & (mtd->writesize - 1));
3172
3173 buf = ops->datbuf;
3174 oob = ops->oobbuf;
3175 oob_required = oob ? 1 : 0;
3176
3177 while (1) {
3178 unsigned int ecc_failures = mtd->ecc_stats.failed;
3179
3180 bytes = min(mtd->writesize - col, readlen);
3181 aligned = (bytes == mtd->writesize);
3182
3183 if (!aligned)
3184 use_bufpoi = 1;
3185 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3186 use_bufpoi = !virt_addr_valid(buf) ||
3187 !IS_ALIGNED((unsigned long)buf,
3188 chip->buf_align);
3189 else
3190 use_bufpoi = 0;
3191
3192
3193 if (realpage != chip->pagecache.page || oob) {
3194 bufpoi = use_bufpoi ? chip->data_buf : buf;
3195
3196 if (use_bufpoi && aligned)
3197 pr_debug("%s: using read bounce buffer for buf@%p\n",
3198 __func__, buf);
3199
3200read_retry:
3201
3202
3203
3204
3205 if (unlikely(ops->mode == MTD_OPS_RAW))
3206 ret = chip->ecc.read_page_raw(chip, bufpoi,
3207 oob_required,
3208 page);
3209 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3210 !oob)
3211 ret = chip->ecc.read_subpage(chip, col, bytes,
3212 bufpoi, page);
3213 else
3214 ret = chip->ecc.read_page(chip, bufpoi,
3215 oob_required, page);
3216 if (ret < 0) {
3217 if (use_bufpoi)
3218
3219 chip->pagecache.page = -1;
3220 break;
3221 }
3222
3223
3224 if (use_bufpoi) {
3225 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3226 !(mtd->ecc_stats.failed - ecc_failures) &&
3227 (ops->mode != MTD_OPS_RAW)) {
3228 chip->pagecache.page = realpage;
3229 chip->pagecache.bitflips = ret;
3230 } else {
3231
3232 chip->pagecache.page = -1;
3233 }
3234 memcpy(buf, chip->data_buf + col, bytes);
3235 }
3236
3237 if (unlikely(oob)) {
3238 int toread = min(oobreadlen, max_oobsize);
3239
3240 if (toread) {
3241 oob = nand_transfer_oob(chip, oob, ops,
3242 toread);
3243 oobreadlen -= toread;
3244 }
3245 }
3246
3247 nand_wait_readrdy(chip);
3248
3249 if (mtd->ecc_stats.failed - ecc_failures) {
3250 if (retry_mode + 1 < chip->read_retries) {
3251 retry_mode++;
3252 ret = nand_setup_read_retry(chip,
3253 retry_mode);
3254 if (ret < 0)
3255 break;
3256
3257
3258 mtd->ecc_stats.failed = ecc_failures;
3259 goto read_retry;
3260 } else {
3261
3262 ecc_fail = true;
3263 }
3264 }
3265
3266 buf += bytes;
3267 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3268 } else {
3269 memcpy(buf, chip->data_buf + col, bytes);
3270 buf += bytes;
3271 max_bitflips = max_t(unsigned int, max_bitflips,
3272 chip->pagecache.bitflips);
3273 }
3274
3275 readlen -= bytes;
3276
3277
3278 if (retry_mode) {
3279 ret = nand_setup_read_retry(chip, 0);
3280 if (ret < 0)
3281 break;
3282 retry_mode = 0;
3283 }
3284
3285 if (!readlen)
3286 break;
3287
3288
3289 col = 0;
3290
3291 realpage++;
3292
3293 page = realpage & chip->pagemask;
3294
3295 if (!page) {
3296 chipnr++;
3297 nand_deselect_target(chip);
3298 nand_select_target(chip, chipnr);
3299 }
3300 }
3301 nand_deselect_target(chip);
3302
3303 ops->retlen = ops->len - (size_t) readlen;
3304 if (oob)
3305 ops->oobretlen = ops->ooblen - oobreadlen;
3306
3307 if (ret < 0)
3308 return ret;
3309
3310 if (ecc_fail)
3311 return -EBADMSG;
3312
3313 return max_bitflips;
3314}
3315
3316
3317
3318
3319
3320
3321int nand_read_oob_std(struct nand_chip *chip, int page)
3322{
3323 struct mtd_info *mtd = nand_to_mtd(chip);
3324
3325 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3326}
3327EXPORT_SYMBOL(nand_read_oob_std);
3328
3329
3330
3331
3332
3333
3334
3335static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3336{
3337 struct mtd_info *mtd = nand_to_mtd(chip);
3338 int length = mtd->oobsize;
3339 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3340 int eccsize = chip->ecc.size;
3341 uint8_t *bufpoi = chip->oob_poi;
3342 int i, toread, sndrnd = 0, pos, ret;
3343
3344 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3345 if (ret)
3346 return ret;
3347
3348 for (i = 0; i < chip->ecc.steps; i++) {
3349 if (sndrnd) {
3350 int ret;
3351
3352 pos = eccsize + i * (eccsize + chunk);
3353 if (mtd->writesize > 512)
3354 ret = nand_change_read_column_op(chip, pos,
3355 NULL, 0,
3356 false);
3357 else
3358 ret = nand_read_page_op(chip, page, pos, NULL,
3359 0);
3360
3361 if (ret)
3362 return ret;
3363 } else
3364 sndrnd = 1;
3365 toread = min_t(int, length, chunk);
3366
3367 ret = nand_read_data_op(chip, bufpoi, toread, false);
3368 if (ret)
3369 return ret;
3370
3371 bufpoi += toread;
3372 length -= toread;
3373 }
3374 if (length > 0) {
3375 ret = nand_read_data_op(chip, bufpoi, length, false);
3376 if (ret)
3377 return ret;
3378 }
3379
3380 return 0;
3381}
3382
3383
3384
3385
3386
3387
3388int nand_write_oob_std(struct nand_chip *chip, int page)
3389{
3390 struct mtd_info *mtd = nand_to_mtd(chip);
3391
3392 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3393 mtd->oobsize);
3394}
3395EXPORT_SYMBOL(nand_write_oob_std);
3396
3397
3398
3399
3400
3401
3402
3403static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3404{
3405 struct mtd_info *mtd = nand_to_mtd(chip);
3406 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3407 int eccsize = chip->ecc.size, length = mtd->oobsize;
3408 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3409 const uint8_t *bufpoi = chip->oob_poi;
3410
3411
3412
3413
3414
3415
3416 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3417 pos = steps * (eccsize + chunk);
3418 steps = 0;
3419 } else
3420 pos = eccsize;
3421
3422 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3423 if (ret)
3424 return ret;
3425
3426 for (i = 0; i < steps; i++) {
3427 if (sndcmd) {
3428 if (mtd->writesize <= 512) {
3429 uint32_t fill = 0xFFFFFFFF;
3430
3431 len = eccsize;
3432 while (len > 0) {
3433 int num = min_t(int, len, 4);
3434
3435 ret = nand_write_data_op(chip, &fill,
3436 num, false);
3437 if (ret)
3438 return ret;
3439
3440 len -= num;
3441 }
3442 } else {
3443 pos = eccsize + i * (eccsize + chunk);
3444 ret = nand_change_write_column_op(chip, pos,
3445 NULL, 0,
3446 false);
3447 if (ret)
3448 return ret;
3449 }
3450 } else
3451 sndcmd = 1;
3452 len = min_t(int, length, chunk);
3453
3454 ret = nand_write_data_op(chip, bufpoi, len, false);
3455 if (ret)
3456 return ret;
3457
3458 bufpoi += len;
3459 length -= len;
3460 }
3461 if (length > 0) {
3462 ret = nand_write_data_op(chip, bufpoi, length, false);
3463 if (ret)
3464 return ret;
3465 }
3466
3467 return nand_prog_page_end_op(chip);
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3479 struct mtd_oob_ops *ops)
3480{
3481 struct mtd_info *mtd = nand_to_mtd(chip);
3482 unsigned int max_bitflips = 0;
3483 int page, realpage, chipnr;
3484 struct mtd_ecc_stats stats;
3485 int readlen = ops->ooblen;
3486 int len;
3487 uint8_t *buf = ops->oobbuf;
3488 int ret = 0;
3489
3490 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3491 __func__, (unsigned long long)from, readlen);
3492
3493 stats = mtd->ecc_stats;
3494
3495 len = mtd_oobavail(mtd, ops);
3496
3497 chipnr = (int)(from >> chip->chip_shift);
3498 nand_select_target(chip, chipnr);
3499
3500
3501 realpage = (int)(from >> chip->page_shift);
3502 page = realpage & chip->pagemask;
3503
3504 while (1) {
3505 if (ops->mode == MTD_OPS_RAW)
3506 ret = chip->ecc.read_oob_raw(chip, page);
3507 else
3508 ret = chip->ecc.read_oob(chip, page);
3509
3510 if (ret < 0)
3511 break;
3512
3513 len = min(len, readlen);
3514 buf = nand_transfer_oob(chip, buf, ops, len);
3515
3516 nand_wait_readrdy(chip);
3517
3518 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3519
3520 readlen -= len;
3521 if (!readlen)
3522 break;
3523
3524
3525 realpage++;
3526
3527 page = realpage & chip->pagemask;
3528
3529 if (!page) {
3530 chipnr++;
3531 nand_deselect_target(chip);
3532 nand_select_target(chip, chipnr);
3533 }
3534 }
3535 nand_deselect_target(chip);
3536
3537 ops->oobretlen = ops->ooblen - readlen;
3538
3539 if (ret < 0)
3540 return ret;
3541
3542 if (mtd->ecc_stats.failed - stats.failed)
3543 return -EBADMSG;
3544
3545 return max_bitflips;
3546}
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3557 struct mtd_oob_ops *ops)
3558{
3559 struct nand_chip *chip = mtd_to_nand(mtd);
3560 int ret;
3561
3562 ops->retlen = 0;
3563
3564 if (ops->mode != MTD_OPS_PLACE_OOB &&
3565 ops->mode != MTD_OPS_AUTO_OOB &&
3566 ops->mode != MTD_OPS_RAW)
3567 return -ENOTSUPP;
3568
3569 ret = nand_get_device(chip);
3570 if (ret)
3571 return ret;
3572
3573 if (!ops->datbuf)
3574 ret = nand_do_read_oob(chip, from, ops);
3575 else
3576 ret = nand_do_read_ops(chip, from, ops);
3577
3578 nand_release_device(chip);
3579 return ret;
3580}
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3592 int oob_required, int page)
3593{
3594 return -ENOTSUPP;
3595}
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3607 int oob_required, int page)
3608{
3609 struct mtd_info *mtd = nand_to_mtd(chip);
3610 int ret;
3611
3612 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3613 if (ret)
3614 return ret;
3615
3616 if (oob_required) {
3617 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3618 false);
3619 if (ret)
3620 return ret;
3621 }
3622
3623 return nand_prog_page_end_op(chip);
3624}
3625EXPORT_SYMBOL(nand_write_page_raw);
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3637 const uint8_t *buf, int oob_required,
3638 int page)
3639{
3640 struct mtd_info *mtd = nand_to_mtd(chip);
3641 int eccsize = chip->ecc.size;
3642 int eccbytes = chip->ecc.bytes;
3643 uint8_t *oob = chip->oob_poi;
3644 int steps, size, ret;
3645
3646 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3647 if (ret)
3648 return ret;
3649
3650 for (steps = chip->ecc.steps; steps > 0; steps--) {
3651 ret = nand_write_data_op(chip, buf, eccsize, false);
3652 if (ret)
3653 return ret;
3654
3655 buf += eccsize;
3656
3657 if (chip->ecc.prepad) {
3658 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3659 false);
3660 if (ret)
3661 return ret;
3662
3663 oob += chip->ecc.prepad;
3664 }
3665
3666 ret = nand_write_data_op(chip, oob, eccbytes, false);
3667 if (ret)
3668 return ret;
3669
3670 oob += eccbytes;
3671
3672 if (chip->ecc.postpad) {
3673 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3674 false);
3675 if (ret)
3676 return ret;
3677
3678 oob += chip->ecc.postpad;
3679 }
3680 }
3681
3682 size = mtd->oobsize - (oob - chip->oob_poi);
3683 if (size) {
3684 ret = nand_write_data_op(chip, oob, size, false);
3685 if (ret)
3686 return ret;
3687 }
3688
3689 return nand_prog_page_end_op(chip);
3690}
3691
3692
3693
3694
3695
3696
3697
3698static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3699 int oob_required, int page)
3700{
3701 struct mtd_info *mtd = nand_to_mtd(chip);
3702 int i, eccsize = chip->ecc.size, ret;
3703 int eccbytes = chip->ecc.bytes;
3704 int eccsteps = chip->ecc.steps;
3705 uint8_t *ecc_calc = chip->ecc.calc_buf;
3706 const uint8_t *p = buf;
3707
3708
3709 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3710 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3711
3712 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3713 chip->ecc.total);
3714 if (ret)
3715 return ret;
3716
3717 return chip->ecc.write_page_raw(chip, buf, 1, page);
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3728 int oob_required, int page)
3729{
3730 struct mtd_info *mtd = nand_to_mtd(chip);
3731 int i, eccsize = chip->ecc.size, ret;
3732 int eccbytes = chip->ecc.bytes;
3733 int eccsteps = chip->ecc.steps;
3734 uint8_t *ecc_calc = chip->ecc.calc_buf;
3735 const uint8_t *p = buf;
3736
3737 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3738 if (ret)
3739 return ret;
3740
3741 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3742 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3743
3744 ret = nand_write_data_op(chip, p, eccsize, false);
3745 if (ret)
3746 return ret;
3747
3748 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3749 }
3750
3751 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3752 chip->ecc.total);
3753 if (ret)
3754 return ret;
3755
3756 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3757 if (ret)
3758 return ret;
3759
3760 return nand_prog_page_end_op(chip);
3761}
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3774 uint32_t data_len, const uint8_t *buf,
3775 int oob_required, int page)
3776{
3777 struct mtd_info *mtd = nand_to_mtd(chip);
3778 uint8_t *oob_buf = chip->oob_poi;
3779 uint8_t *ecc_calc = chip->ecc.calc_buf;
3780 int ecc_size = chip->ecc.size;
3781 int ecc_bytes = chip->ecc.bytes;
3782 int ecc_steps = chip->ecc.steps;
3783 uint32_t start_step = offset / ecc_size;
3784 uint32_t end_step = (offset + data_len - 1) / ecc_size;
3785 int oob_bytes = mtd->oobsize / ecc_steps;
3786 int step, ret;
3787
3788 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3789 if (ret)
3790 return ret;
3791
3792 for (step = 0; step < ecc_steps; step++) {
3793
3794 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3795
3796
3797 ret = nand_write_data_op(chip, buf, ecc_size, false);
3798 if (ret)
3799 return ret;
3800
3801
3802 if ((step < start_step) || (step > end_step))
3803 memset(ecc_calc, 0xff, ecc_bytes);
3804 else
3805 chip->ecc.calculate(chip, buf, ecc_calc);
3806
3807
3808
3809 if (!oob_required || (step < start_step) || (step > end_step))
3810 memset(oob_buf, 0xff, oob_bytes);
3811
3812 buf += ecc_size;
3813 ecc_calc += ecc_bytes;
3814 oob_buf += oob_bytes;
3815 }
3816
3817
3818
3819 ecc_calc = chip->ecc.calc_buf;
3820 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3821 chip->ecc.total);
3822 if (ret)
3823 return ret;
3824
3825
3826 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3827 if (ret)
3828 return ret;
3829
3830 return nand_prog_page_end_op(chip);
3831}
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3845 int oob_required, int page)
3846{
3847 struct mtd_info *mtd = nand_to_mtd(chip);
3848 int i, eccsize = chip->ecc.size;
3849 int eccbytes = chip->ecc.bytes;
3850 int eccsteps = chip->ecc.steps;
3851 const uint8_t *p = buf;
3852 uint8_t *oob = chip->oob_poi;
3853 int ret;
3854
3855 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3856 if (ret)
3857 return ret;
3858
3859 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3860 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3861
3862 ret = nand_write_data_op(chip, p, eccsize, false);
3863 if (ret)
3864 return ret;
3865
3866 if (chip->ecc.prepad) {
3867 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3868 false);
3869 if (ret)
3870 return ret;
3871
3872 oob += chip->ecc.prepad;
3873 }
3874
3875 chip->ecc.calculate(chip, p, oob);
3876
3877 ret = nand_write_data_op(chip, oob, eccbytes, false);
3878 if (ret)
3879 return ret;
3880
3881 oob += eccbytes;
3882
3883 if (chip->ecc.postpad) {
3884 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3885 false);
3886 if (ret)
3887 return ret;
3888
3889 oob += chip->ecc.postpad;
3890 }
3891 }
3892
3893
3894 i = mtd->oobsize - (oob - chip->oob_poi);
3895 if (i) {
3896 ret = nand_write_data_op(chip, oob, i, false);
3897 if (ret)
3898 return ret;
3899 }
3900
3901 return nand_prog_page_end_op(chip);
3902}
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3915 int data_len, const uint8_t *buf, int oob_required,
3916 int page, int raw)
3917{
3918 struct mtd_info *mtd = nand_to_mtd(chip);
3919 int status, subpage;
3920
3921 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3922 chip->ecc.write_subpage)
3923 subpage = offset || (data_len < mtd->writesize);
3924 else
3925 subpage = 0;
3926
3927 if (unlikely(raw))
3928 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3929 page);
3930 else if (subpage)
3931 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3932 oob_required, page);
3933 else
3934 status = chip->ecc.write_page(chip, buf, oob_required, page);
3935
3936 if (status < 0)
3937 return status;
3938
3939 return 0;
3940}
3941
3942#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3953 struct mtd_oob_ops *ops)
3954{
3955 struct mtd_info *mtd = nand_to_mtd(chip);
3956 int chipnr, realpage, page, column;
3957 uint32_t writelen = ops->len;
3958
3959 uint32_t oobwritelen = ops->ooblen;
3960 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3961
3962 uint8_t *oob = ops->oobbuf;
3963 uint8_t *buf = ops->datbuf;
3964 int ret;
3965 int oob_required = oob ? 1 : 0;
3966
3967 ops->retlen = 0;
3968 if (!writelen)
3969 return 0;
3970
3971
3972 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3973 pr_notice("%s: attempt to write non page aligned data\n",
3974 __func__);
3975 return -EINVAL;
3976 }
3977
3978 column = to & (mtd->writesize - 1);
3979
3980 chipnr = (int)(to >> chip->chip_shift);
3981 nand_select_target(chip, chipnr);
3982
3983
3984 if (nand_check_wp(chip)) {
3985 ret = -EIO;
3986 goto err_out;
3987 }
3988
3989 realpage = (int)(to >> chip->page_shift);
3990 page = realpage & chip->pagemask;
3991
3992
3993 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3994 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3995 chip->pagecache.page = -1;
3996
3997
3998 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
3999 ret = -EINVAL;
4000 goto err_out;
4001 }
4002
4003 while (1) {
4004 int bytes = mtd->writesize;
4005 uint8_t *wbuf = buf;
4006 int use_bufpoi;
4007 int part_pagewr = (column || writelen < mtd->writesize);
4008
4009 if (part_pagewr)
4010 use_bufpoi = 1;
4011 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
4012 use_bufpoi = !virt_addr_valid(buf) ||
4013 !IS_ALIGNED((unsigned long)buf,
4014 chip->buf_align);
4015 else
4016 use_bufpoi = 0;
4017
4018
4019 if (use_bufpoi) {
4020 pr_debug("%s: using write bounce buffer for buf@%p\n",
4021 __func__, buf);
4022 if (part_pagewr)
4023 bytes = min_t(int, bytes - column, writelen);
4024 wbuf = nand_get_data_buf(chip);
4025 memset(wbuf, 0xff, mtd->writesize);
4026 memcpy(&wbuf[column], buf, bytes);
4027 }
4028
4029 if (unlikely(oob)) {
4030 size_t len = min(oobwritelen, oobmaxlen);
4031 oob = nand_fill_oob(chip, oob, len, ops);
4032 oobwritelen -= len;
4033 } else {
4034
4035 memset(chip->oob_poi, 0xff, mtd->oobsize);
4036 }
4037
4038 ret = nand_write_page(chip, column, bytes, wbuf,
4039 oob_required, page,
4040 (ops->mode == MTD_OPS_RAW));
4041 if (ret)
4042 break;
4043
4044 writelen -= bytes;
4045 if (!writelen)
4046 break;
4047
4048 column = 0;
4049 buf += bytes;
4050 realpage++;
4051
4052 page = realpage & chip->pagemask;
4053
4054 if (!page) {
4055 chipnr++;
4056 nand_deselect_target(chip);
4057 nand_select_target(chip, chipnr);
4058 }
4059 }
4060
4061 ops->retlen = ops->len - writelen;
4062 if (unlikely(oob))
4063 ops->oobretlen = ops->ooblen;
4064
4065err_out:
4066 nand_deselect_target(chip);
4067 return ret;
4068}
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4082 size_t *retlen, const uint8_t *buf)
4083{
4084 struct nand_chip *chip = mtd_to_nand(mtd);
4085 int chipnr = (int)(to >> chip->chip_shift);
4086 struct mtd_oob_ops ops;
4087 int ret;
4088
4089 nand_select_target(chip, chipnr);
4090
4091
4092 panic_nand_wait(chip, 400);
4093
4094 memset(&ops, 0, sizeof(ops));
4095 ops.len = len;
4096 ops.datbuf = (uint8_t *)buf;
4097 ops.mode = MTD_OPS_PLACE_OOB;
4098
4099 ret = nand_do_write_ops(chip, to, &ops);
4100
4101 *retlen = ops.retlen;
4102 return ret;
4103}
4104
4105
4106
4107
4108
4109
4110
4111static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4112 struct mtd_oob_ops *ops)
4113{
4114 struct nand_chip *chip = mtd_to_nand(mtd);
4115 int ret = -ENOTSUPP;
4116
4117 ops->retlen = 0;
4118
4119 ret = nand_get_device(chip);
4120 if (ret)
4121 return ret;
4122
4123 switch (ops->mode) {
4124 case MTD_OPS_PLACE_OOB:
4125 case MTD_OPS_AUTO_OOB:
4126 case MTD_OPS_RAW:
4127 break;
4128
4129 default:
4130 goto out;
4131 }
4132
4133 if (!ops->datbuf)
4134 ret = nand_do_write_oob(chip, to, ops);
4135 else
4136 ret = nand_do_write_ops(chip, to, ops);
4137
4138out:
4139 nand_release_device(chip);
4140 return ret;
4141}
4142
4143
4144
4145
4146
4147
4148
4149
4150static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4151{
4152 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4153}
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4164 int allowbbt)
4165{
4166 int page, pages_per_block, ret, chipnr;
4167 loff_t len;
4168
4169 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4170 __func__, (unsigned long long)instr->addr,
4171 (unsigned long long)instr->len);
4172
4173 if (check_offs_len(chip, instr->addr, instr->len))
4174 return -EINVAL;
4175
4176
4177 ret = nand_get_device(chip);
4178 if (ret)
4179 return ret;
4180
4181
4182 page = (int)(instr->addr >> chip->page_shift);
4183 chipnr = (int)(instr->addr >> chip->chip_shift);
4184
4185
4186 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4187
4188
4189 nand_select_target(chip, chipnr);
4190
4191
4192 if (nand_check_wp(chip)) {
4193 pr_debug("%s: device is write protected!\n",
4194 __func__);
4195 ret = -EIO;
4196 goto erase_exit;
4197 }
4198
4199
4200 len = instr->len;
4201
4202 while (len) {
4203
4204 if (nand_block_checkbad(chip, ((loff_t) page) <<
4205 chip->page_shift, allowbbt)) {
4206 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4207 __func__, page);
4208 ret = -EIO;
4209 goto erase_exit;
4210 }
4211
4212
4213
4214
4215
4216 if (page <= chip->pagecache.page && chip->pagecache.page <
4217 (page + pages_per_block))
4218 chip->pagecache.page = -1;
4219
4220 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4221 (chip->phys_erase_shift - chip->page_shift));
4222 if (ret) {
4223 pr_debug("%s: failed erase, page 0x%08x\n",
4224 __func__, page);
4225 instr->fail_addr =
4226 ((loff_t)page << chip->page_shift);
4227 goto erase_exit;
4228 }
4229
4230
4231 len -= (1ULL << chip->phys_erase_shift);
4232 page += pages_per_block;
4233
4234
4235 if (len && !(page & chip->pagemask)) {
4236 chipnr++;
4237 nand_deselect_target(chip);
4238 nand_select_target(chip, chipnr);
4239 }
4240 }
4241
4242 ret = 0;
4243erase_exit:
4244
4245
4246 nand_deselect_target(chip);
4247 nand_release_device(chip);
4248
4249
4250 return ret;
4251}
4252
4253
4254
4255
4256
4257
4258
4259static void nand_sync(struct mtd_info *mtd)
4260{
4261 struct nand_chip *chip = mtd_to_nand(mtd);
4262
4263 pr_debug("%s: called\n", __func__);
4264
4265
4266 WARN_ON(nand_get_device(chip));
4267
4268 nand_release_device(chip);
4269}
4270
4271
4272
4273
4274
4275
4276static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4277{
4278 struct nand_chip *chip = mtd_to_nand(mtd);
4279 int chipnr = (int)(offs >> chip->chip_shift);
4280 int ret;
4281
4282
4283 ret = nand_get_device(chip);
4284 if (ret)
4285 return ret;
4286
4287 nand_select_target(chip, chipnr);
4288
4289 ret = nand_block_checkbad(chip, offs, 0);
4290
4291 nand_deselect_target(chip);
4292 nand_release_device(chip);
4293
4294 return ret;
4295}
4296
4297
4298
4299
4300
4301
4302static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4303{
4304 int ret;
4305
4306 ret = nand_block_isbad(mtd, ofs);
4307 if (ret) {
4308
4309 if (ret > 0)
4310 return 0;
4311 return ret;
4312 }
4313
4314 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4315}
4316
4317
4318
4319
4320
4321static int nand_suspend(struct mtd_info *mtd)
4322{
4323 struct nand_chip *chip = mtd_to_nand(mtd);
4324
4325 mutex_lock(&chip->lock);
4326 chip->suspended = 1;
4327 mutex_unlock(&chip->lock);
4328
4329 return 0;
4330}
4331
4332
4333
4334
4335
4336static void nand_resume(struct mtd_info *mtd)
4337{
4338 struct nand_chip *chip = mtd_to_nand(mtd);
4339
4340 mutex_lock(&chip->lock);
4341 if (chip->suspended)
4342 chip->suspended = 0;
4343 else
4344 pr_err("%s called for a chip which is not in suspended state\n",
4345 __func__);
4346 mutex_unlock(&chip->lock);
4347}
4348
4349
4350
4351
4352
4353
4354static void nand_shutdown(struct mtd_info *mtd)
4355{
4356 nand_suspend(mtd);
4357}
4358
4359
4360static void nand_set_defaults(struct nand_chip *chip)
4361{
4362
4363 if (!chip->controller) {
4364 chip->controller = &chip->legacy.dummy_controller;
4365 nand_controller_init(chip->controller);
4366 }
4367
4368 nand_legacy_set_defaults(chip);
4369
4370 if (!chip->buf_align)
4371 chip->buf_align = 1;
4372}
4373
4374
4375void sanitize_string(uint8_t *s, size_t len)
4376{
4377 ssize_t i;
4378
4379
4380 s[len - 1] = 0;
4381
4382
4383 for (i = 0; i < len - 1; i++) {
4384 if (s[i] < ' ' || s[i] > 127)
4385 s[i] = '?';
4386 }
4387
4388
4389 strim(s);
4390}
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4404{
4405 int i, j;
4406 for (i = 0; i < period; i++)
4407 for (j = i + period; j < arrlen; j += period)
4408 if (id_data[i] != id_data[j])
4409 return 0;
4410 return 1;
4411}
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421static int nand_id_len(u8 *id_data, int arrlen)
4422{
4423 int last_nonzero, period;
4424
4425
4426 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4427 if (id_data[last_nonzero])
4428 break;
4429
4430
4431 if (last_nonzero < 0)
4432 return 0;
4433
4434
4435 for (period = 1; period < arrlen; period++)
4436 if (nand_id_has_period(id_data, arrlen, period))
4437 break;
4438
4439
4440 if (period < arrlen)
4441 return period;
4442
4443
4444 if (last_nonzero < arrlen - 1)
4445 return last_nonzero + 1;
4446
4447
4448 return arrlen;
4449}
4450
4451
4452static int nand_get_bits_per_cell(u8 cellinfo)
4453{
4454 int bits;
4455
4456 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4457 bits >>= NAND_CI_CELLTYPE_SHIFT;
4458 return bits + 1;
4459}
4460
4461
4462
4463
4464
4465
4466void nand_decode_ext_id(struct nand_chip *chip)
4467{
4468 struct nand_memory_organization *memorg;
4469 struct mtd_info *mtd = nand_to_mtd(chip);
4470 int extid;
4471 u8 *id_data = chip->id.data;
4472
4473 memorg = nanddev_get_memorg(&chip->base);
4474
4475
4476 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4477
4478 extid = id_data[3];
4479
4480
4481 memorg->pagesize = 1024 << (extid & 0x03);
4482 mtd->writesize = memorg->pagesize;
4483 extid >>= 2;
4484
4485 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4486 mtd->oobsize = memorg->oobsize;
4487 extid >>= 2;
4488
4489 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4490 memorg->pagesize;
4491 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4492 extid >>= 2;
4493
4494 if (extid & 0x1)
4495 chip->options |= NAND_BUSWIDTH_16;
4496}
4497EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4498
4499
4500
4501
4502
4503
4504static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4505{
4506 struct mtd_info *mtd = nand_to_mtd(chip);
4507 struct nand_memory_organization *memorg;
4508
4509 memorg = nanddev_get_memorg(&chip->base);
4510
4511 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4512 mtd->erasesize = type->erasesize;
4513 memorg->pagesize = type->pagesize;
4514 mtd->writesize = memorg->pagesize;
4515 memorg->oobsize = memorg->pagesize / 32;
4516 mtd->oobsize = memorg->oobsize;
4517
4518
4519 memorg->bits_per_cell = 1;
4520}
4521
4522
4523
4524
4525
4526
4527static void nand_decode_bbm_options(struct nand_chip *chip)
4528{
4529 struct mtd_info *mtd = nand_to_mtd(chip);
4530
4531
4532 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4533 chip->badblockpos = NAND_BBM_POS_LARGE;
4534 else
4535 chip->badblockpos = NAND_BBM_POS_SMALL;
4536}
4537
4538static inline bool is_full_id_nand(struct nand_flash_dev *type)
4539{
4540 return type->id_len;
4541}
4542
4543static bool find_full_id_nand(struct nand_chip *chip,
4544 struct nand_flash_dev *type)
4545{
4546 struct mtd_info *mtd = nand_to_mtd(chip);
4547 struct nand_memory_organization *memorg;
4548 u8 *id_data = chip->id.data;
4549
4550 memorg = nanddev_get_memorg(&chip->base);
4551
4552 if (!strncmp(type->id, id_data, type->id_len)) {
4553 memorg->pagesize = type->pagesize;
4554 mtd->writesize = memorg->pagesize;
4555 memorg->pages_per_eraseblock = type->erasesize /
4556 type->pagesize;
4557 mtd->erasesize = type->erasesize;
4558 memorg->oobsize = type->oobsize;
4559 mtd->oobsize = memorg->oobsize;
4560
4561 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4562 memorg->eraseblocks_per_lun =
4563 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4564 memorg->pagesize *
4565 memorg->pages_per_eraseblock);
4566 chip->options |= type->options;
4567 chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
4568 chip->base.eccreq.step_size = NAND_ECC_STEP(type);
4569 chip->onfi_timing_mode_default =
4570 type->onfi_timing_mode_default;
4571
4572 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4573 if (!chip->parameters.model)
4574 return false;
4575
4576 return true;
4577 }
4578 return false;
4579}
4580
4581
4582
4583
4584
4585
4586static void nand_manufacturer_detect(struct nand_chip *chip)
4587{
4588
4589
4590
4591
4592 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4593 chip->manufacturer.desc->ops->detect) {
4594 struct nand_memory_organization *memorg;
4595
4596 memorg = nanddev_get_memorg(&chip->base);
4597
4598
4599 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4600 chip->manufacturer.desc->ops->detect(chip);
4601 } else {
4602 nand_decode_ext_id(chip);
4603 }
4604}
4605
4606
4607
4608
4609
4610
4611
4612static int nand_manufacturer_init(struct nand_chip *chip)
4613{
4614 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4615 !chip->manufacturer.desc->ops->init)
4616 return 0;
4617
4618 return chip->manufacturer.desc->ops->init(chip);
4619}
4620
4621
4622
4623
4624
4625
4626
4627static void nand_manufacturer_cleanup(struct nand_chip *chip)
4628{
4629
4630 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4631 chip->manufacturer.desc->ops->cleanup)
4632 chip->manufacturer.desc->ops->cleanup(chip);
4633}
4634
4635static const char *
4636nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
4637{
4638 return manufacturer ? manufacturer->name : "Unknown";
4639}
4640
4641
4642
4643
4644static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4645{
4646 const struct nand_manufacturer *manufacturer;
4647 struct mtd_info *mtd = nand_to_mtd(chip);
4648 struct nand_memory_organization *memorg;
4649 int busw, ret;
4650 u8 *id_data = chip->id.data;
4651 u8 maf_id, dev_id;
4652 u64 targetsize;
4653
4654
4655
4656
4657
4658 memorg = nanddev_get_memorg(&chip->base);
4659 memorg->planes_per_lun = 1;
4660 memorg->luns_per_target = 1;
4661
4662
4663
4664
4665
4666 ret = nand_reset(chip, 0);
4667 if (ret)
4668 return ret;
4669
4670
4671 nand_select_target(chip, 0);
4672
4673
4674 ret = nand_readid_op(chip, 0, id_data, 2);
4675 if (ret)
4676 return ret;
4677
4678
4679 maf_id = id_data[0];
4680 dev_id = id_data[1];
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4691 if (ret)
4692 return ret;
4693
4694 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4695 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4696 maf_id, dev_id, id_data[0], id_data[1]);
4697 return -ENODEV;
4698 }
4699
4700 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4701
4702
4703 manufacturer = nand_get_manufacturer(maf_id);
4704 chip->manufacturer.desc = manufacturer;
4705
4706 if (!type)
4707 type = nand_flash_ids;
4708
4709
4710
4711
4712
4713
4714
4715
4716 busw = chip->options & NAND_BUSWIDTH_16;
4717
4718
4719
4720
4721
4722 chip->options &= ~NAND_BUSWIDTH_16;
4723
4724 for (; type->name != NULL; type++) {
4725 if (is_full_id_nand(type)) {
4726 if (find_full_id_nand(chip, type))
4727 goto ident_done;
4728 } else if (dev_id == type->dev_id) {
4729 break;
4730 }
4731 }
4732
4733 if (!type->name || !type->pagesize) {
4734
4735 ret = nand_onfi_detect(chip);
4736 if (ret < 0)
4737 return ret;
4738 else if (ret)
4739 goto ident_done;
4740
4741
4742 ret = nand_jedec_detect(chip);
4743 if (ret < 0)
4744 return ret;
4745 else if (ret)
4746 goto ident_done;
4747 }
4748
4749 if (!type->name)
4750 return -ENODEV;
4751
4752 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4753 if (!chip->parameters.model)
4754 return -ENOMEM;
4755
4756 if (!type->pagesize)
4757 nand_manufacturer_detect(chip);
4758 else
4759 nand_decode_id(chip, type);
4760
4761
4762 chip->options |= type->options;
4763
4764 memorg->eraseblocks_per_lun =
4765 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4766 memorg->pagesize *
4767 memorg->pages_per_eraseblock);
4768
4769ident_done:
4770 if (!mtd->name)
4771 mtd->name = chip->parameters.model;
4772
4773 if (chip->options & NAND_BUSWIDTH_AUTO) {
4774 WARN_ON(busw & NAND_BUSWIDTH_16);
4775 nand_set_defaults(chip);
4776 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4777
4778
4779
4780
4781 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4782 maf_id, dev_id);
4783 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4784 mtd->name);
4785 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4786 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4787 ret = -EINVAL;
4788
4789 goto free_detect_allocation;
4790 }
4791
4792 nand_decode_bbm_options(chip);
4793
4794
4795 chip->page_shift = ffs(mtd->writesize) - 1;
4796
4797 targetsize = nanddev_target_size(&chip->base);
4798 chip->pagemask = (targetsize >> chip->page_shift) - 1;
4799
4800 chip->bbt_erase_shift = chip->phys_erase_shift =
4801 ffs(mtd->erasesize) - 1;
4802 if (targetsize & 0xffffffff)
4803 chip->chip_shift = ffs((unsigned)targetsize) - 1;
4804 else {
4805 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4806 chip->chip_shift += 32 - 1;
4807 }
4808
4809 if (chip->chip_shift - chip->page_shift > 16)
4810 chip->options |= NAND_ROW_ADDR_3;
4811
4812 chip->badblockbits = 8;
4813
4814 nand_legacy_adjust_cmdfunc(chip);
4815
4816 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4817 maf_id, dev_id);
4818 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4819 chip->parameters.model);
4820 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4821 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4822 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4823 return 0;
4824
4825free_detect_allocation:
4826 kfree(chip->parameters.model);
4827
4828 return ret;
4829}
4830
4831static const char * const nand_ecc_modes[] = {
4832 [NAND_ECC_NONE] = "none",
4833 [NAND_ECC_SOFT] = "soft",
4834 [NAND_ECC_HW] = "hw",
4835 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4836 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4837 [NAND_ECC_ON_DIE] = "on-die",
4838};
4839
4840static int of_get_nand_ecc_mode(struct device_node *np)
4841{
4842 const char *pm;
4843 int err, i;
4844
4845 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4846 if (err < 0)
4847 return err;
4848
4849 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4850 if (!strcasecmp(pm, nand_ecc_modes[i]))
4851 return i;
4852
4853
4854
4855
4856
4857
4858 if (!strcasecmp(pm, "soft_bch"))
4859 return NAND_ECC_SOFT;
4860
4861 return -ENODEV;
4862}
4863
4864static const char * const nand_ecc_algos[] = {
4865 [NAND_ECC_HAMMING] = "hamming",
4866 [NAND_ECC_BCH] = "bch",
4867 [NAND_ECC_RS] = "rs",
4868};
4869
4870static int of_get_nand_ecc_algo(struct device_node *np)
4871{
4872 const char *pm;
4873 int err, i;
4874
4875 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4876 if (!err) {
4877 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4878 if (!strcasecmp(pm, nand_ecc_algos[i]))
4879 return i;
4880 return -ENODEV;
4881 }
4882
4883
4884
4885
4886
4887 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4888 if (err < 0)
4889 return err;
4890
4891 if (!strcasecmp(pm, "soft"))
4892 return NAND_ECC_HAMMING;
4893 else if (!strcasecmp(pm, "soft_bch"))
4894 return NAND_ECC_BCH;
4895
4896 return -ENODEV;
4897}
4898
4899static int of_get_nand_ecc_step_size(struct device_node *np)
4900{
4901 int ret;
4902 u32 val;
4903
4904 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4905 return ret ? ret : val;
4906}
4907
4908static int of_get_nand_ecc_strength(struct device_node *np)
4909{
4910 int ret;
4911 u32 val;
4912
4913 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4914 return ret ? ret : val;
4915}
4916
4917static int of_get_nand_bus_width(struct device_node *np)
4918{
4919 u32 val;
4920
4921 if (of_property_read_u32(np, "nand-bus-width", &val))
4922 return 8;
4923
4924 switch (val) {
4925 case 8:
4926 case 16:
4927 return val;
4928 default:
4929 return -EIO;
4930 }
4931}
4932
4933static bool of_get_nand_on_flash_bbt(struct device_node *np)
4934{
4935 return of_property_read_bool(np, "nand-on-flash-bbt");
4936}
4937
4938static int nand_dt_init(struct nand_chip *chip)
4939{
4940 struct device_node *dn = nand_get_flash_node(chip);
4941 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4942
4943 if (!dn)
4944 return 0;
4945
4946 if (of_get_nand_bus_width(dn) == 16)
4947 chip->options |= NAND_BUSWIDTH_16;
4948
4949 if (of_property_read_bool(dn, "nand-is-boot-medium"))
4950 chip->options |= NAND_IS_BOOT_MEDIUM;
4951
4952 if (of_get_nand_on_flash_bbt(dn))
4953 chip->bbt_options |= NAND_BBT_USE_FLASH;
4954
4955 ecc_mode = of_get_nand_ecc_mode(dn);
4956 ecc_algo = of_get_nand_ecc_algo(dn);
4957 ecc_strength = of_get_nand_ecc_strength(dn);
4958 ecc_step = of_get_nand_ecc_step_size(dn);
4959
4960 if (ecc_mode >= 0)
4961 chip->ecc.mode = ecc_mode;
4962
4963 if (ecc_algo >= 0)
4964 chip->ecc.algo = ecc_algo;
4965
4966 if (ecc_strength >= 0)
4967 chip->ecc.strength = ecc_strength;
4968
4969 if (ecc_step > 0)
4970 chip->ecc.size = ecc_step;
4971
4972 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4973 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4974
4975 return 0;
4976}
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
4993 struct nand_flash_dev *table)
4994{
4995 struct mtd_info *mtd = nand_to_mtd(chip);
4996 struct nand_memory_organization *memorg;
4997 int nand_maf_id, nand_dev_id;
4998 unsigned int i;
4999 int ret;
5000
5001 memorg = nanddev_get_memorg(&chip->base);
5002
5003
5004 chip->cur_cs = -1;
5005
5006 mutex_init(&chip->lock);
5007
5008
5009 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
5010
5011 ret = nand_dt_init(chip);
5012 if (ret)
5013 return ret;
5014
5015 if (!mtd->name && mtd->dev.parent)
5016 mtd->name = dev_name(mtd->dev.parent);
5017
5018
5019 nand_set_defaults(chip);
5020
5021 ret = nand_legacy_check_hooks(chip);
5022 if (ret)
5023 return ret;
5024
5025 memorg->ntargets = maxchips;
5026
5027
5028 ret = nand_detect(chip, table);
5029 if (ret) {
5030 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5031 pr_warn("No NAND device found\n");
5032 nand_deselect_target(chip);
5033 return ret;
5034 }
5035
5036 nand_maf_id = chip->id.data[0];
5037 nand_dev_id = chip->id.data[1];
5038
5039 nand_deselect_target(chip);
5040
5041
5042 for (i = 1; i < maxchips; i++) {
5043 u8 id[2];
5044
5045
5046 ret = nand_reset(chip, i);
5047 if (ret)
5048 break;
5049
5050 nand_select_target(chip, i);
5051
5052 ret = nand_readid_op(chip, 0, id, sizeof(id));
5053 if (ret)
5054 break;
5055
5056 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5057 nand_deselect_target(chip);
5058 break;
5059 }
5060 nand_deselect_target(chip);
5061 }
5062 if (i > 1)
5063 pr_info("%d chips detected\n", i);
5064
5065
5066 memorg->ntargets = i;
5067 mtd->size = i * nanddev_target_size(&chip->base);
5068
5069 return 0;
5070}
5071
5072static void nand_scan_ident_cleanup(struct nand_chip *chip)
5073{
5074 kfree(chip->parameters.model);
5075 kfree(chip->parameters.onfi);
5076}
5077
5078static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5079{
5080 struct mtd_info *mtd = nand_to_mtd(chip);
5081 struct nand_ecc_ctrl *ecc = &chip->ecc;
5082
5083 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5084 return -EINVAL;
5085
5086 switch (ecc->algo) {
5087 case NAND_ECC_HAMMING:
5088 ecc->calculate = nand_calculate_ecc;
5089 ecc->correct = nand_correct_data;
5090 ecc->read_page = nand_read_page_swecc;
5091 ecc->read_subpage = nand_read_subpage;
5092 ecc->write_page = nand_write_page_swecc;
5093 ecc->read_page_raw = nand_read_page_raw;
5094 ecc->write_page_raw = nand_write_page_raw;
5095 ecc->read_oob = nand_read_oob_std;
5096 ecc->write_oob = nand_write_oob_std;
5097 if (!ecc->size)
5098 ecc->size = 256;
5099 ecc->bytes = 3;
5100 ecc->strength = 1;
5101
5102 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5103 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5104
5105 return 0;
5106 case NAND_ECC_BCH:
5107 if (!mtd_nand_has_bch()) {
5108 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5109 return -EINVAL;
5110 }
5111 ecc->calculate = nand_bch_calculate_ecc;
5112 ecc->correct = nand_bch_correct_data;
5113 ecc->read_page = nand_read_page_swecc;
5114 ecc->read_subpage = nand_read_subpage;
5115 ecc->write_page = nand_write_page_swecc;
5116 ecc->read_page_raw = nand_read_page_raw;
5117 ecc->write_page_raw = nand_write_page_raw;
5118 ecc->read_oob = nand_read_oob_std;
5119 ecc->write_oob = nand_write_oob_std;
5120
5121
5122
5123
5124
5125
5126 if (!ecc->size && (mtd->oobsize >= 64)) {
5127 ecc->size = 512;
5128 ecc->strength = 4;
5129 }
5130
5131
5132
5133
5134
5135 if (!mtd->ooblayout) {
5136
5137 if (mtd->oobsize < 64) {
5138 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5139 return -EINVAL;
5140 }
5141
5142 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5143
5144 }
5145
5146
5147
5148
5149
5150
5151 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5152 ecc->options & NAND_ECC_MAXIMIZE) {
5153 int steps, bytes;
5154
5155
5156 ecc->size = 1024;
5157 steps = mtd->writesize / ecc->size;
5158
5159
5160 bytes = (mtd->oobsize - 2) / steps;
5161 ecc->strength = bytes * 8 / fls(8 * ecc->size);
5162 }
5163
5164
5165 ecc->bytes = 0;
5166 ecc->priv = nand_bch_init(mtd);
5167 if (!ecc->priv) {
5168 WARN(1, "BCH ECC initialization failed!\n");
5169 return -EINVAL;
5170 }
5171 return 0;
5172 default:
5173 WARN(1, "Unsupported ECC algorithm!\n");
5174 return -EINVAL;
5175 }
5176}
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188static int
5189nand_check_ecc_caps(struct nand_chip *chip,
5190 const struct nand_ecc_caps *caps, int oobavail)
5191{
5192 struct mtd_info *mtd = nand_to_mtd(chip);
5193 const struct nand_ecc_step_info *stepinfo;
5194 int preset_step = chip->ecc.size;
5195 int preset_strength = chip->ecc.strength;
5196 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5197 int i, j;
5198
5199 for (i = 0; i < caps->nstepinfos; i++) {
5200 stepinfo = &caps->stepinfos[i];
5201
5202 if (stepinfo->stepsize != preset_step)
5203 continue;
5204
5205 for (j = 0; j < stepinfo->nstrengths; j++) {
5206 if (stepinfo->strengths[j] != preset_strength)
5207 continue;
5208
5209 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5210 preset_strength);
5211 if (WARN_ON_ONCE(ecc_bytes < 0))
5212 return ecc_bytes;
5213
5214 if (ecc_bytes * nsteps > oobavail) {
5215 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5216 preset_step, preset_strength);
5217 return -ENOSPC;
5218 }
5219
5220 chip->ecc.bytes = ecc_bytes;
5221
5222 return 0;
5223 }
5224 }
5225
5226 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5227 preset_step, preset_strength);
5228
5229 return -ENOTSUPP;
5230}
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242static int
5243nand_match_ecc_req(struct nand_chip *chip,
5244 const struct nand_ecc_caps *caps, int oobavail)
5245{
5246 struct mtd_info *mtd = nand_to_mtd(chip);
5247 const struct nand_ecc_step_info *stepinfo;
5248 int req_step = chip->base.eccreq.step_size;
5249 int req_strength = chip->base.eccreq.strength;
5250 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5251 int best_step, best_strength, best_ecc_bytes;
5252 int best_ecc_bytes_total = INT_MAX;
5253 int i, j;
5254
5255
5256 if (!req_step || !req_strength)
5257 return -ENOTSUPP;
5258
5259
5260 req_corr = mtd->writesize / req_step * req_strength;
5261
5262 for (i = 0; i < caps->nstepinfos; i++) {
5263 stepinfo = &caps->stepinfos[i];
5264 step_size = stepinfo->stepsize;
5265
5266 for (j = 0; j < stepinfo->nstrengths; j++) {
5267 strength = stepinfo->strengths[j];
5268
5269
5270
5271
5272
5273
5274 if (step_size < req_step && strength < req_strength)
5275 continue;
5276
5277 if (mtd->writesize % step_size)
5278 continue;
5279
5280 nsteps = mtd->writesize / step_size;
5281
5282 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5283 if (WARN_ON_ONCE(ecc_bytes < 0))
5284 continue;
5285 ecc_bytes_total = ecc_bytes * nsteps;
5286
5287 if (ecc_bytes_total > oobavail ||
5288 strength * nsteps < req_corr)
5289 continue;
5290
5291
5292
5293
5294
5295 if (ecc_bytes_total < best_ecc_bytes_total) {
5296 best_ecc_bytes_total = ecc_bytes_total;
5297 best_step = step_size;
5298 best_strength = strength;
5299 best_ecc_bytes = ecc_bytes;
5300 }
5301 }
5302 }
5303
5304 if (best_ecc_bytes_total == INT_MAX)
5305 return -ENOTSUPP;
5306
5307 chip->ecc.size = best_step;
5308 chip->ecc.strength = best_strength;
5309 chip->ecc.bytes = best_ecc_bytes;
5310
5311 return 0;
5312}
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323static int
5324nand_maximize_ecc(struct nand_chip *chip,
5325 const struct nand_ecc_caps *caps, int oobavail)
5326{
5327 struct mtd_info *mtd = nand_to_mtd(chip);
5328 const struct nand_ecc_step_info *stepinfo;
5329 int step_size, strength, nsteps, ecc_bytes, corr;
5330 int best_corr = 0;
5331 int best_step = 0;
5332 int best_strength, best_ecc_bytes;
5333 int i, j;
5334
5335 for (i = 0; i < caps->nstepinfos; i++) {
5336 stepinfo = &caps->stepinfos[i];
5337 step_size = stepinfo->stepsize;
5338
5339
5340 if (chip->ecc.size && step_size != chip->ecc.size)
5341 continue;
5342
5343 for (j = 0; j < stepinfo->nstrengths; j++) {
5344 strength = stepinfo->strengths[j];
5345
5346 if (mtd->writesize % step_size)
5347 continue;
5348
5349 nsteps = mtd->writesize / step_size;
5350
5351 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5352 if (WARN_ON_ONCE(ecc_bytes < 0))
5353 continue;
5354
5355 if (ecc_bytes * nsteps > oobavail)
5356 continue;
5357
5358 corr = strength * nsteps;
5359
5360
5361
5362
5363
5364 if (corr > best_corr ||
5365 (corr == best_corr && step_size > best_step)) {
5366 best_corr = corr;
5367 best_step = step_size;
5368 best_strength = strength;
5369 best_ecc_bytes = ecc_bytes;
5370 }
5371 }
5372 }
5373
5374 if (!best_corr)
5375 return -ENOTSUPP;
5376
5377 chip->ecc.size = best_step;
5378 chip->ecc.strength = best_strength;
5379 chip->ecc.bytes = best_ecc_bytes;
5380
5381 return 0;
5382}
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401int nand_ecc_choose_conf(struct nand_chip *chip,
5402 const struct nand_ecc_caps *caps, int oobavail)
5403{
5404 struct mtd_info *mtd = nand_to_mtd(chip);
5405
5406 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5407 return -EINVAL;
5408
5409 if (chip->ecc.size && chip->ecc.strength)
5410 return nand_check_ecc_caps(chip, caps, oobavail);
5411
5412 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5413 return nand_maximize_ecc(chip, caps, oobavail);
5414
5415 if (!nand_match_ecc_req(chip, caps, oobavail))
5416 return 0;
5417
5418 return nand_maximize_ecc(chip, caps, oobavail);
5419}
5420EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436static bool nand_ecc_strength_good(struct nand_chip *chip)
5437{
5438 struct mtd_info *mtd = nand_to_mtd(chip);
5439 struct nand_ecc_ctrl *ecc = &chip->ecc;
5440 int corr, ds_corr;
5441
5442 if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
5443
5444 return true;
5445
5446
5447
5448
5449
5450 corr = (mtd->writesize * ecc->strength) / ecc->size;
5451 ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
5452 chip->base.eccreq.step_size;
5453
5454 return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
5455}
5456
5457static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5458{
5459 struct nand_chip *chip = container_of(nand, struct nand_chip,
5460 base);
5461 unsigned int eb = nanddev_pos_to_row(nand, pos);
5462 int ret;
5463
5464 eb >>= nand->rowconv.eraseblock_addr_shift;
5465
5466 nand_select_target(chip, pos->target);
5467 ret = nand_erase_op(chip, eb);
5468 nand_deselect_target(chip);
5469
5470 return ret;
5471}
5472
5473static int rawnand_markbad(struct nand_device *nand,
5474 const struct nand_pos *pos)
5475{
5476 struct nand_chip *chip = container_of(nand, struct nand_chip,
5477 base);
5478
5479 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5480}
5481
5482static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5483{
5484 struct nand_chip *chip = container_of(nand, struct nand_chip,
5485 base);
5486 int ret;
5487
5488 nand_select_target(chip, pos->target);
5489 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5490 nand_deselect_target(chip);
5491
5492 return ret;
5493}
5494
5495static const struct nand_ops rawnand_ops = {
5496 .erase = rawnand_erase,
5497 .markbad = rawnand_markbad,
5498 .isbad = rawnand_isbad,
5499};
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509static int nand_scan_tail(struct nand_chip *chip)
5510{
5511 struct mtd_info *mtd = nand_to_mtd(chip);
5512 struct nand_ecc_ctrl *ecc = &chip->ecc;
5513 int ret, i;
5514
5515
5516 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5517 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5518 return -EINVAL;
5519 }
5520
5521 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5522 if (!chip->data_buf)
5523 return -ENOMEM;
5524
5525
5526
5527
5528
5529
5530
5531 nand_select_target(chip, 0);
5532 ret = nand_manufacturer_init(chip);
5533 nand_deselect_target(chip);
5534 if (ret)
5535 goto err_free_buf;
5536
5537
5538 chip->oob_poi = chip->data_buf + mtd->writesize;
5539
5540
5541
5542
5543 if (!mtd->ooblayout &&
5544 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5545 switch (mtd->oobsize) {
5546 case 8:
5547 case 16:
5548 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5549 break;
5550 case 64:
5551 case 128:
5552 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5553 break;
5554 default:
5555
5556
5557
5558
5559
5560
5561
5562 if (ecc->mode == NAND_ECC_NONE) {
5563 mtd_set_ooblayout(mtd,
5564 &nand_ooblayout_lp_ops);
5565 break;
5566 }
5567
5568 WARN(1, "No oob scheme defined for oobsize %d\n",
5569 mtd->oobsize);
5570 ret = -EINVAL;
5571 goto err_nand_manuf_cleanup;
5572 }
5573 }
5574
5575
5576
5577
5578
5579
5580 switch (ecc->mode) {
5581 case NAND_ECC_HW_OOB_FIRST:
5582
5583 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5584 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5585 ret = -EINVAL;
5586 goto err_nand_manuf_cleanup;
5587 }
5588 if (!ecc->read_page)
5589 ecc->read_page = nand_read_page_hwecc_oob_first;
5590
5591
5592 case NAND_ECC_HW:
5593
5594 if (!ecc->read_page)
5595 ecc->read_page = nand_read_page_hwecc;
5596 if (!ecc->write_page)
5597 ecc->write_page = nand_write_page_hwecc;
5598 if (!ecc->read_page_raw)
5599 ecc->read_page_raw = nand_read_page_raw;
5600 if (!ecc->write_page_raw)
5601 ecc->write_page_raw = nand_write_page_raw;
5602 if (!ecc->read_oob)
5603 ecc->read_oob = nand_read_oob_std;
5604 if (!ecc->write_oob)
5605 ecc->write_oob = nand_write_oob_std;
5606 if (!ecc->read_subpage)
5607 ecc->read_subpage = nand_read_subpage;
5608 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5609 ecc->write_subpage = nand_write_subpage_hwecc;
5610
5611
5612 case NAND_ECC_HW_SYNDROME:
5613 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5614 (!ecc->read_page ||
5615 ecc->read_page == nand_read_page_hwecc ||
5616 !ecc->write_page ||
5617 ecc->write_page == nand_write_page_hwecc)) {
5618 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5619 ret = -EINVAL;
5620 goto err_nand_manuf_cleanup;
5621 }
5622
5623 if (!ecc->read_page)
5624 ecc->read_page = nand_read_page_syndrome;
5625 if (!ecc->write_page)
5626 ecc->write_page = nand_write_page_syndrome;
5627 if (!ecc->read_page_raw)
5628 ecc->read_page_raw = nand_read_page_raw_syndrome;
5629 if (!ecc->write_page_raw)
5630 ecc->write_page_raw = nand_write_page_raw_syndrome;
5631 if (!ecc->read_oob)
5632 ecc->read_oob = nand_read_oob_syndrome;
5633 if (!ecc->write_oob)
5634 ecc->write_oob = nand_write_oob_syndrome;
5635
5636 if (mtd->writesize >= ecc->size) {
5637 if (!ecc->strength) {
5638 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5639 ret = -EINVAL;
5640 goto err_nand_manuf_cleanup;
5641 }
5642 break;
5643 }
5644 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5645 ecc->size, mtd->writesize);
5646 ecc->mode = NAND_ECC_SOFT;
5647 ecc->algo = NAND_ECC_HAMMING;
5648
5649
5650 case NAND_ECC_SOFT:
5651 ret = nand_set_ecc_soft_ops(chip);
5652 if (ret) {
5653 ret = -EINVAL;
5654 goto err_nand_manuf_cleanup;
5655 }
5656 break;
5657
5658 case NAND_ECC_ON_DIE:
5659 if (!ecc->read_page || !ecc->write_page) {
5660 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5661 ret = -EINVAL;
5662 goto err_nand_manuf_cleanup;
5663 }
5664 if (!ecc->read_oob)
5665 ecc->read_oob = nand_read_oob_std;
5666 if (!ecc->write_oob)
5667 ecc->write_oob = nand_write_oob_std;
5668 break;
5669
5670 case NAND_ECC_NONE:
5671 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5672 ecc->read_page = nand_read_page_raw;
5673 ecc->write_page = nand_write_page_raw;
5674 ecc->read_oob = nand_read_oob_std;
5675 ecc->read_page_raw = nand_read_page_raw;
5676 ecc->write_page_raw = nand_write_page_raw;
5677 ecc->write_oob = nand_write_oob_std;
5678 ecc->size = mtd->writesize;
5679 ecc->bytes = 0;
5680 ecc->strength = 0;
5681 break;
5682
5683 default:
5684 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5685 ret = -EINVAL;
5686 goto err_nand_manuf_cleanup;
5687 }
5688
5689 if (ecc->correct || ecc->calculate) {
5690 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5691 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5692 if (!ecc->calc_buf || !ecc->code_buf) {
5693 ret = -ENOMEM;
5694 goto err_nand_manuf_cleanup;
5695 }
5696 }
5697
5698
5699 if (!ecc->read_oob_raw)
5700 ecc->read_oob_raw = ecc->read_oob;
5701 if (!ecc->write_oob_raw)
5702 ecc->write_oob_raw = ecc->write_oob;
5703
5704
5705 mtd->ecc_strength = ecc->strength;
5706 mtd->ecc_step_size = ecc->size;
5707
5708
5709
5710
5711
5712 ecc->steps = mtd->writesize / ecc->size;
5713 if (ecc->steps * ecc->size != mtd->writesize) {
5714 WARN(1, "Invalid ECC parameters\n");
5715 ret = -EINVAL;
5716 goto err_nand_manuf_cleanup;
5717 }
5718 ecc->total = ecc->steps * ecc->bytes;
5719 if (ecc->total > mtd->oobsize) {
5720 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5721 ret = -EINVAL;
5722 goto err_nand_manuf_cleanup;
5723 }
5724
5725
5726
5727
5728
5729 ret = mtd_ooblayout_count_freebytes(mtd);
5730 if (ret < 0)
5731 ret = 0;
5732
5733 mtd->oobavail = ret;
5734
5735
5736 if (!nand_ecc_strength_good(chip))
5737 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5738 mtd->name);
5739
5740
5741 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5742 switch (ecc->steps) {
5743 case 2:
5744 mtd->subpage_sft = 1;
5745 break;
5746 case 4:
5747 case 8:
5748 case 16:
5749 mtd->subpage_sft = 2;
5750 break;
5751 }
5752 }
5753 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5754
5755
5756 chip->pagecache.page = -1;
5757
5758
5759 switch (ecc->mode) {
5760 case NAND_ECC_SOFT:
5761 if (chip->page_shift > 9)
5762 chip->options |= NAND_SUBPAGE_READ;
5763 break;
5764
5765 default:
5766 break;
5767 }
5768
5769 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5770 if (ret)
5771 goto err_nand_manuf_cleanup;
5772
5773
5774 if (chip->options & NAND_ROM)
5775 mtd->flags = MTD_CAP_ROM;
5776
5777
5778 mtd->_erase = nand_erase;
5779 mtd->_point = NULL;
5780 mtd->_unpoint = NULL;
5781 mtd->_panic_write = panic_nand_write;
5782 mtd->_read_oob = nand_read_oob;
5783 mtd->_write_oob = nand_write_oob;
5784 mtd->_sync = nand_sync;
5785 mtd->_lock = NULL;
5786 mtd->_unlock = NULL;
5787 mtd->_suspend = nand_suspend;
5788 mtd->_resume = nand_resume;
5789 mtd->_reboot = nand_shutdown;
5790 mtd->_block_isreserved = nand_block_isreserved;
5791 mtd->_block_isbad = nand_block_isbad;
5792 mtd->_block_markbad = nand_block_markbad;
5793 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5794
5795
5796
5797
5798
5799
5800 if (!mtd->bitflip_threshold)
5801 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5802
5803
5804 ret = nand_init_data_interface(chip);
5805 if (ret)
5806 goto err_nanddev_cleanup;
5807
5808
5809 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5810 ret = nand_setup_data_interface(chip, i);
5811 if (ret)
5812 goto err_nanddev_cleanup;
5813 }
5814
5815
5816 if (chip->options & NAND_SKIP_BBTSCAN)
5817 return 0;
5818
5819
5820 ret = nand_create_bbt(chip);
5821 if (ret)
5822 goto err_nanddev_cleanup;
5823
5824 return 0;
5825
5826
5827err_nanddev_cleanup:
5828 nanddev_cleanup(&chip->base);
5829
5830err_nand_manuf_cleanup:
5831 nand_manufacturer_cleanup(chip);
5832
5833err_free_buf:
5834 kfree(chip->data_buf);
5835 kfree(ecc->code_buf);
5836 kfree(ecc->calc_buf);
5837
5838 return ret;
5839}
5840
5841static int nand_attach(struct nand_chip *chip)
5842{
5843 if (chip->controller->ops && chip->controller->ops->attach_chip)
5844 return chip->controller->ops->attach_chip(chip);
5845
5846 return 0;
5847}
5848
5849static void nand_detach(struct nand_chip *chip)
5850{
5851 if (chip->controller->ops && chip->controller->ops->detach_chip)
5852 chip->controller->ops->detach_chip(chip);
5853}
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5866 struct nand_flash_dev *ids)
5867{
5868 int ret;
5869
5870 if (!maxchips)
5871 return -EINVAL;
5872
5873 ret = nand_scan_ident(chip, maxchips, ids);
5874 if (ret)
5875 return ret;
5876
5877 ret = nand_attach(chip);
5878 if (ret)
5879 goto cleanup_ident;
5880
5881 ret = nand_scan_tail(chip);
5882 if (ret)
5883 goto detach_chip;
5884
5885 return 0;
5886
5887detach_chip:
5888 nand_detach(chip);
5889cleanup_ident:
5890 nand_scan_ident_cleanup(chip);
5891
5892 return ret;
5893}
5894EXPORT_SYMBOL(nand_scan_with_ids);
5895
5896
5897
5898
5899
5900void nand_cleanup(struct nand_chip *chip)
5901{
5902 if (chip->ecc.mode == NAND_ECC_SOFT &&
5903 chip->ecc.algo == NAND_ECC_BCH)
5904 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5905
5906
5907 kfree(chip->bbt);
5908 kfree(chip->data_buf);
5909 kfree(chip->ecc.code_buf);
5910 kfree(chip->ecc.calc_buf);
5911
5912
5913 if (chip->badblock_pattern && chip->badblock_pattern->options
5914 & NAND_BBT_DYNAMICSTRUCT)
5915 kfree(chip->badblock_pattern);
5916
5917
5918 nand_manufacturer_cleanup(chip);
5919
5920
5921 nand_detach(chip);
5922
5923
5924 nand_scan_ident_cleanup(chip);
5925}
5926
5927EXPORT_SYMBOL_GPL(nand_cleanup);
5928
5929
5930
5931
5932
5933
5934void nand_release(struct nand_chip *chip)
5935{
5936 mtd_device_unregister(nand_to_mtd(chip));
5937 nand_cleanup(chip);
5938}
5939EXPORT_SYMBOL_GPL(nand_release);
5940
5941MODULE_LICENSE("GPL");
5942MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5943MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5944MODULE_DESCRIPTION("Generic NAND flash driver code");
5945