1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/err.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/mm.h>
35#include <linux/types.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/nand_ecc.h>
38#include <linux/mtd/nand_bch.h>
39#include <linux/interrupt.h>
40#include <linux/bitops.h>
41#include <linux/io.h>
42#include <linux/mtd/partitions.h>
43#include <linux/of.h>
44#include <linux/gpio/consumer.h>
45
46#include "internals.h"
47
48
49static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
50 struct mtd_oob_region *oobregion)
51{
52 struct nand_chip *chip = mtd_to_nand(mtd);
53 struct nand_ecc_ctrl *ecc = &chip->ecc;
54
55 if (section > 1)
56 return -ERANGE;
57
58 if (!section) {
59 oobregion->offset = 0;
60 if (mtd->oobsize == 16)
61 oobregion->length = 4;
62 else
63 oobregion->length = 3;
64 } else {
65 if (mtd->oobsize == 8)
66 return -ERANGE;
67
68 oobregion->offset = 6;
69 oobregion->length = ecc->total - 4;
70 }
71
72 return 0;
73}
74
75static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
76 struct mtd_oob_region *oobregion)
77{
78 if (section > 1)
79 return -ERANGE;
80
81 if (mtd->oobsize == 16) {
82 if (section)
83 return -ERANGE;
84
85 oobregion->length = 8;
86 oobregion->offset = 8;
87 } else {
88 oobregion->length = 2;
89 if (!section)
90 oobregion->offset = 3;
91 else
92 oobregion->offset = 6;
93 }
94
95 return 0;
96}
97
98const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
99 .ecc = nand_ooblayout_ecc_sp,
100 .free = nand_ooblayout_free_sp,
101};
102EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
103
104static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
105 struct mtd_oob_region *oobregion)
106{
107 struct nand_chip *chip = mtd_to_nand(mtd);
108 struct nand_ecc_ctrl *ecc = &chip->ecc;
109
110 if (section || !ecc->total)
111 return -ERANGE;
112
113 oobregion->length = ecc->total;
114 oobregion->offset = mtd->oobsize - oobregion->length;
115
116 return 0;
117}
118
119static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
120 struct mtd_oob_region *oobregion)
121{
122 struct nand_chip *chip = mtd_to_nand(mtd);
123 struct nand_ecc_ctrl *ecc = &chip->ecc;
124
125 if (section)
126 return -ERANGE;
127
128 oobregion->length = mtd->oobsize - ecc->total - 2;
129 oobregion->offset = 2;
130
131 return 0;
132}
133
134const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
135 .ecc = nand_ooblayout_ecc_lp,
136 .free = nand_ooblayout_free_lp,
137};
138EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
139
140
141
142
143
144static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
145 struct mtd_oob_region *oobregion)
146{
147 struct nand_chip *chip = mtd_to_nand(mtd);
148 struct nand_ecc_ctrl *ecc = &chip->ecc;
149
150 if (section)
151 return -ERANGE;
152
153 switch (mtd->oobsize) {
154 case 64:
155 oobregion->offset = 40;
156 break;
157 case 128:
158 oobregion->offset = 80;
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 oobregion->length = ecc->total;
165 if (oobregion->offset + oobregion->length > mtd->oobsize)
166 return -ERANGE;
167
168 return 0;
169}
170
171static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
172 struct mtd_oob_region *oobregion)
173{
174 struct nand_chip *chip = mtd_to_nand(mtd);
175 struct nand_ecc_ctrl *ecc = &chip->ecc;
176 int ecc_offset = 0;
177
178 if (section < 0 || section > 1)
179 return -ERANGE;
180
181 switch (mtd->oobsize) {
182 case 64:
183 ecc_offset = 40;
184 break;
185 case 128:
186 ecc_offset = 80;
187 break;
188 default:
189 return -EINVAL;
190 }
191
192 if (section == 0) {
193 oobregion->offset = 2;
194 oobregion->length = ecc_offset - 2;
195 } else {
196 oobregion->offset = ecc_offset + ecc->total;
197 oobregion->length = mtd->oobsize - oobregion->offset;
198 }
199
200 return 0;
201}
202
203static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
204 .ecc = nand_ooblayout_ecc_lp_hamming,
205 .free = nand_ooblayout_free_lp_hamming,
206};
207
208static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
209 struct mtd_pairing_info *info)
210{
211 int lastpage = (mtd->erasesize / mtd->writesize) - 1;
212 int dist = 3;
213
214 if (page == lastpage)
215 dist = 2;
216
217 if (!page || (page & 1)) {
218 info->group = 0;
219 info->pair = (page + 1) / 2;
220 } else {
221 info->group = 1;
222 info->pair = (page + 1 - dist) / 2;
223 }
224
225 return 0;
226}
227
228static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
229 const struct mtd_pairing_info *info)
230{
231 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
232 int page = info->pair * 2;
233 int dist = 3;
234
235 if (!info->group && !info->pair)
236 return 0;
237
238 if (info->pair == lastpair && info->group)
239 dist = 2;
240
241 if (!info->group)
242 page--;
243 else if (info->pair)
244 page += dist - 1;
245
246 if (page >= mtd->erasesize / mtd->writesize)
247 return -EINVAL;
248
249 return page;
250}
251
252const struct mtd_pairing_scheme dist3_pairing_scheme = {
253 .ngroups = 2,
254 .get_info = nand_pairing_dist3_get_info,
255 .get_wunit = nand_pairing_dist3_get_wunit,
256};
257
258static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
259{
260 int ret = 0;
261
262
263 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
264 pr_debug("%s: unaligned address\n", __func__);
265 ret = -EINVAL;
266 }
267
268
269 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
270 pr_debug("%s: length not block aligned\n", __func__);
271 ret = -EINVAL;
272 }
273
274 return ret;
275}
276
277
278
279
280
281
282
283
284
285
286
287void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
288 unsigned int src_off, unsigned int nbits)
289{
290 unsigned int tmp, n;
291
292 dst += dst_off / 8;
293 dst_off %= 8;
294 src += src_off / 8;
295 src_off %= 8;
296
297 while (nbits) {
298 n = min3(8 - dst_off, 8 - src_off, nbits);
299
300 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
301 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
302 *dst |= tmp << dst_off;
303
304 dst_off += n;
305 if (dst_off >= 8) {
306 dst++;
307 dst_off -= 8;
308 }
309
310 src_off += n;
311 if (src_off >= 8) {
312 src++;
313 src_off -= 8;
314 }
315
316 nbits -= n;
317 }
318}
319EXPORT_SYMBOL_GPL(nand_extract_bits);
320
321
322
323
324
325
326
327
328
329
330void nand_select_target(struct nand_chip *chip, unsigned int cs)
331{
332
333
334
335
336 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
337 return;
338
339 chip->cur_cs = cs;
340
341 if (chip->legacy.select_chip)
342 chip->legacy.select_chip(chip, cs);
343}
344EXPORT_SYMBOL_GPL(nand_select_target);
345
346
347
348
349
350
351
352
353void nand_deselect_target(struct nand_chip *chip)
354{
355 if (chip->legacy.select_chip)
356 chip->legacy.select_chip(chip, -1);
357
358 chip->cur_cs = -1;
359}
360EXPORT_SYMBOL_GPL(nand_deselect_target);
361
362
363
364
365
366
367
368static void nand_release_device(struct nand_chip *chip)
369{
370
371 mutex_unlock(&chip->controller->lock);
372 mutex_unlock(&chip->lock);
373}
374
375
376
377
378
379
380
381
382
383
384int nand_bbm_get_next_page(struct nand_chip *chip, int page)
385{
386 struct mtd_info *mtd = nand_to_mtd(chip);
387 int last_page = ((mtd->erasesize - mtd->writesize) >>
388 chip->page_shift) & chip->pagemask;
389 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
390 | NAND_BBM_LASTPAGE;
391
392 if (page == 0 && !(chip->options & bbm_flags))
393 return 0;
394 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
395 return 0;
396 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
397 return 1;
398 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
399 return last_page;
400
401 return -EINVAL;
402}
403
404
405
406
407
408
409
410
411static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
412{
413 int first_page, page_offset;
414 int res;
415 u8 bad;
416
417 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
418 page_offset = nand_bbm_get_next_page(chip, 0);
419
420 while (page_offset >= 0) {
421 res = chip->ecc.read_oob(chip, first_page + page_offset);
422 if (res < 0)
423 return res;
424
425 bad = chip->oob_poi[chip->badblockpos];
426
427 if (likely(chip->badblockbits == 8))
428 res = bad != 0xFF;
429 else
430 res = hweight8(bad) < chip->badblockbits;
431 if (res)
432 return res;
433
434 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
435 }
436
437 return 0;
438}
439
440static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
441{
442 if (chip->options & NAND_NO_BBM_QUIRK)
443 return 0;
444
445 if (chip->legacy.block_bad)
446 return chip->legacy.block_bad(chip, ofs);
447
448 return nand_block_bad(chip, ofs);
449}
450
451
452
453
454
455
456
457
458
459static int nand_get_device(struct nand_chip *chip)
460{
461 mutex_lock(&chip->lock);
462 if (chip->suspended) {
463 mutex_unlock(&chip->lock);
464 return -EBUSY;
465 }
466 mutex_lock(&chip->controller->lock);
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478static int nand_check_wp(struct nand_chip *chip)
479{
480 u8 status;
481 int ret;
482
483
484 if (chip->options & NAND_BROKEN_XD)
485 return 0;
486
487
488 ret = nand_status_op(chip, &status);
489 if (ret)
490 return ret;
491
492 return status & NAND_STATUS_WP ? 0 : 1;
493}
494
495
496
497
498
499
500
501
502static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
503 struct mtd_oob_ops *ops)
504{
505 struct mtd_info *mtd = nand_to_mtd(chip);
506 int ret;
507
508
509
510
511
512 memset(chip->oob_poi, 0xff, mtd->oobsize);
513
514 switch (ops->mode) {
515
516 case MTD_OPS_PLACE_OOB:
517 case MTD_OPS_RAW:
518 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
519 return oob + len;
520
521 case MTD_OPS_AUTO_OOB:
522 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
523 ops->ooboffs, len);
524 BUG_ON(ret);
525 return oob + len;
526
527 default:
528 BUG();
529 }
530 return NULL;
531}
532
533
534
535
536
537
538
539
540
541static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
542 struct mtd_oob_ops *ops)
543{
544 struct mtd_info *mtd = nand_to_mtd(chip);
545 int chipnr, page, status, len, ret;
546
547 pr_debug("%s: to = 0x%08x, len = %i\n",
548 __func__, (unsigned int)to, (int)ops->ooblen);
549
550 len = mtd_oobavail(mtd, ops);
551
552
553 if ((ops->ooboffs + ops->ooblen) > len) {
554 pr_debug("%s: attempt to write past end of page\n",
555 __func__);
556 return -EINVAL;
557 }
558
559 chipnr = (int)(to >> chip->chip_shift);
560
561
562
563
564
565
566
567 ret = nand_reset(chip, chipnr);
568 if (ret)
569 return ret;
570
571 nand_select_target(chip, chipnr);
572
573
574 page = (int)(to >> chip->page_shift);
575
576
577 if (nand_check_wp(chip)) {
578 nand_deselect_target(chip);
579 return -EROFS;
580 }
581
582
583 if (page == chip->pagecache.page)
584 chip->pagecache.page = -1;
585
586 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
587
588 if (ops->mode == MTD_OPS_RAW)
589 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
590 else
591 status = chip->ecc.write_oob(chip, page & chip->pagemask);
592
593 nand_deselect_target(chip);
594
595 if (status)
596 return status;
597
598 ops->oobretlen = ops->ooblen;
599
600 return 0;
601}
602
603
604
605
606
607
608
609
610
611
612static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
613{
614 struct mtd_info *mtd = nand_to_mtd(chip);
615 struct mtd_oob_ops ops;
616 uint8_t buf[2] = { 0, 0 };
617 int ret = 0, res, page_offset;
618
619 memset(&ops, 0, sizeof(ops));
620 ops.oobbuf = buf;
621 ops.ooboffs = chip->badblockpos;
622 if (chip->options & NAND_BUSWIDTH_16) {
623 ops.ooboffs &= ~0x01;
624 ops.len = ops.ooblen = 2;
625 } else {
626 ops.len = ops.ooblen = 1;
627 }
628 ops.mode = MTD_OPS_PLACE_OOB;
629
630 page_offset = nand_bbm_get_next_page(chip, 0);
631
632 while (page_offset >= 0) {
633 res = nand_do_write_oob(chip,
634 ofs + (page_offset * mtd->writesize),
635 &ops);
636
637 if (!ret)
638 ret = res;
639
640 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
641 }
642
643 return ret;
644}
645
646
647
648
649
650
651int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
652{
653 if (chip->legacy.block_markbad)
654 return chip->legacy.block_markbad(chip, ofs);
655
656 return nand_default_block_markbad(chip, ofs);
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
679{
680 struct mtd_info *mtd = nand_to_mtd(chip);
681 int res, ret = 0;
682
683 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
684 struct erase_info einfo;
685
686
687 memset(&einfo, 0, sizeof(einfo));
688 einfo.addr = ofs;
689 einfo.len = 1ULL << chip->phys_erase_shift;
690 nand_erase_nand(chip, &einfo, 0);
691
692
693 ret = nand_get_device(chip);
694 if (ret)
695 return ret;
696
697 ret = nand_markbad_bbm(chip, ofs);
698 nand_release_device(chip);
699 }
700
701
702 if (chip->bbt) {
703 res = nand_markbad_bbt(chip, ofs);
704 if (!ret)
705 ret = res;
706 }
707
708 if (!ret)
709 mtd->ecc_stats.badblocks++;
710
711 return ret;
712}
713
714
715
716
717
718
719
720
721static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
722{
723 struct nand_chip *chip = mtd_to_nand(mtd);
724
725 if (!chip->bbt)
726 return 0;
727
728 return nand_isreserved_bbt(chip, ofs);
729}
730
731
732
733
734
735
736
737
738
739
740static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
741{
742
743 if (chip->bbt)
744 return nand_isbad_bbt(chip, ofs, allowbbt);
745
746 return nand_isbad_bbm(chip, ofs);
747}
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
767{
768 const struct nand_sdr_timings *timings;
769 u8 status = 0;
770 int ret;
771
772 if (!nand_has_exec_op(chip))
773 return -ENOTSUPP;
774
775
776 timings = nand_get_sdr_timings(nand_get_interface_config(chip));
777 ndelay(PSEC_TO_NSEC(timings->tWB_max));
778
779 ret = nand_status_op(chip, NULL);
780 if (ret)
781 return ret;
782
783
784
785
786
787
788 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
789 do {
790 ret = nand_read_data_op(chip, &status, sizeof(status), true,
791 false);
792 if (ret)
793 break;
794
795 if (status & NAND_STATUS_READY)
796 break;
797
798
799
800
801
802
803 udelay(10);
804 } while (time_before(jiffies, timeout_ms));
805
806
807
808
809
810
811 nand_exit_status_op(chip);
812
813 if (ret)
814 return ret;
815
816 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
817};
818EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
835 unsigned long timeout_ms)
836{
837
838
839
840
841
842
843
844 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
845 do {
846 if (gpiod_get_value_cansleep(gpiod))
847 return 0;
848
849 cond_resched();
850 } while (time_before(jiffies, timeout_ms));
851
852 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
853};
854EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
855
856
857
858
859
860
861
862
863
864
865void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
866{
867 int i;
868 for (i = 0; i < timeo; i++) {
869 if (chip->legacy.dev_ready) {
870 if (chip->legacy.dev_ready(chip))
871 break;
872 } else {
873 int ret;
874 u8 status;
875
876 ret = nand_read_data_op(chip, &status, sizeof(status),
877 true, false);
878 if (ret)
879 return;
880
881 if (status & NAND_STATUS_READY)
882 break;
883 }
884 mdelay(1);
885 }
886}
887
888static bool nand_supports_get_features(struct nand_chip *chip, int addr)
889{
890 return (chip->parameters.supports_set_get_features &&
891 test_bit(addr, chip->parameters.get_feature_list));
892}
893
894static bool nand_supports_set_features(struct nand_chip *chip, int addr)
895{
896 return (chip->parameters.supports_set_get_features &&
897 test_bit(addr, chip->parameters.set_feature_list));
898}
899
900
901
902
903
904
905
906
907
908
909static int nand_reset_interface(struct nand_chip *chip, int chipnr)
910{
911 const struct nand_controller_ops *ops = chip->controller->ops;
912 int ret;
913
914 if (!nand_controller_can_setup_interface(chip))
915 return 0;
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931 chip->current_interface_config = nand_get_reset_interface_config();
932 ret = ops->setup_interface(chip, chipnr,
933 chip->current_interface_config);
934 if (ret)
935 pr_err("Failed to configure data interface to SDR timing mode 0\n");
936
937 return ret;
938}
939
940
941
942
943
944
945
946
947
948
949
950static int nand_setup_interface(struct nand_chip *chip, int chipnr)
951{
952 const struct nand_controller_ops *ops = chip->controller->ops;
953 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
954 int ret;
955
956 if (!nand_controller_can_setup_interface(chip))
957 return 0;
958
959
960
961
962
963
964
965
966 if (!chip->best_interface_config)
967 return 0;
968
969 tmode_param[0] = chip->best_interface_config->timings.mode;
970
971
972 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
973 nand_select_target(chip, chipnr);
974 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
975 tmode_param);
976 nand_deselect_target(chip);
977 if (ret)
978 return ret;
979 }
980
981
982 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
983 if (ret)
984 return ret;
985
986
987 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
988 goto update_interface_config;
989
990 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
991 nand_select_target(chip, chipnr);
992 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
993 tmode_param);
994 nand_deselect_target(chip);
995 if (ret)
996 goto err_reset_chip;
997
998 if (tmode_param[0] != chip->best_interface_config->timings.mode) {
999 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
1000 chip->best_interface_config->timings.mode);
1001 goto err_reset_chip;
1002 }
1003
1004update_interface_config:
1005 chip->current_interface_config = chip->best_interface_config;
1006
1007 return 0;
1008
1009err_reset_chip:
1010
1011
1012
1013
1014 nand_reset_interface(chip, chipnr);
1015 nand_select_target(chip, chipnr);
1016 nand_reset_op(chip);
1017 nand_deselect_target(chip);
1018
1019 return ret;
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032int nand_choose_best_sdr_timings(struct nand_chip *chip,
1033 struct nand_interface_config *iface,
1034 struct nand_sdr_timings *spec_timings)
1035{
1036 const struct nand_controller_ops *ops = chip->controller->ops;
1037 int best_mode = 0, mode, ret;
1038
1039 iface->type = NAND_SDR_IFACE;
1040
1041 if (spec_timings) {
1042 iface->timings.sdr = *spec_timings;
1043 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
1044
1045
1046 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1047 iface);
1048 if (!ret) {
1049 chip->best_interface_config = iface;
1050 return ret;
1051 }
1052
1053
1054 best_mode = iface->timings.mode;
1055 } else if (chip->parameters.onfi) {
1056 best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
1057 }
1058
1059 for (mode = best_mode; mode >= 0; mode--) {
1060 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
1061
1062 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1063 iface);
1064 if (!ret)
1065 break;
1066 }
1067
1068 chip->best_interface_config = iface;
1069
1070 return 0;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086static int nand_choose_interface_config(struct nand_chip *chip)
1087{
1088 struct nand_interface_config *iface;
1089 int ret;
1090
1091 if (!nand_controller_can_setup_interface(chip))
1092 return 0;
1093
1094 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
1095 if (!iface)
1096 return -ENOMEM;
1097
1098 if (chip->ops.choose_interface_config)
1099 ret = chip->ops.choose_interface_config(chip, iface);
1100 else
1101 ret = nand_choose_best_sdr_timings(chip, iface, NULL);
1102
1103 if (ret)
1104 kfree(iface);
1105
1106 return ret;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1122 unsigned int offset_in_page)
1123{
1124 struct mtd_info *mtd = nand_to_mtd(chip);
1125
1126
1127 if (offset_in_page > mtd->writesize + mtd->oobsize)
1128 return -EINVAL;
1129
1130
1131
1132
1133
1134
1135 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1136 offset_in_page -= mtd->writesize;
1137
1138
1139
1140
1141
1142 if (chip->options & NAND_BUSWIDTH_16) {
1143 if (WARN_ON(offset_in_page % 2))
1144 return -EINVAL;
1145
1146 offset_in_page /= 2;
1147 }
1148
1149 addrs[0] = offset_in_page;
1150
1151
1152
1153
1154
1155 if (mtd->writesize <= 512)
1156 return 1;
1157
1158 addrs[1] = offset_in_page >> 8;
1159
1160 return 2;
1161}
1162
1163static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1164 unsigned int offset_in_page, void *buf,
1165 unsigned int len)
1166{
1167 const struct nand_sdr_timings *sdr =
1168 nand_get_sdr_timings(nand_get_interface_config(chip));
1169 struct mtd_info *mtd = nand_to_mtd(chip);
1170 u8 addrs[4];
1171 struct nand_op_instr instrs[] = {
1172 NAND_OP_CMD(NAND_CMD_READ0, 0),
1173 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1174 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1175 PSEC_TO_NSEC(sdr->tRR_min)),
1176 NAND_OP_DATA_IN(len, buf, 0),
1177 };
1178 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1179 int ret;
1180
1181
1182 if (!len)
1183 op.ninstrs--;
1184
1185 if (offset_in_page >= mtd->writesize)
1186 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1187 else if (offset_in_page >= 256 &&
1188 !(chip->options & NAND_BUSWIDTH_16))
1189 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1190
1191 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1192 if (ret < 0)
1193 return ret;
1194
1195 addrs[1] = page;
1196 addrs[2] = page >> 8;
1197
1198 if (chip->options & NAND_ROW_ADDR_3) {
1199 addrs[3] = page >> 16;
1200 instrs[1].ctx.addr.naddrs++;
1201 }
1202
1203 return nand_exec_op(chip, &op);
1204}
1205
1206static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1207 unsigned int offset_in_page, void *buf,
1208 unsigned int len)
1209{
1210 const struct nand_sdr_timings *sdr =
1211 nand_get_sdr_timings(nand_get_interface_config(chip));
1212 u8 addrs[5];
1213 struct nand_op_instr instrs[] = {
1214 NAND_OP_CMD(NAND_CMD_READ0, 0),
1215 NAND_OP_ADDR(4, addrs, 0),
1216 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1217 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1218 PSEC_TO_NSEC(sdr->tRR_min)),
1219 NAND_OP_DATA_IN(len, buf, 0),
1220 };
1221 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1222 int ret;
1223
1224
1225 if (!len)
1226 op.ninstrs--;
1227
1228 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1229 if (ret < 0)
1230 return ret;
1231
1232 addrs[2] = page;
1233 addrs[3] = page >> 8;
1234
1235 if (chip->options & NAND_ROW_ADDR_3) {
1236 addrs[4] = page >> 16;
1237 instrs[1].ctx.addr.naddrs++;
1238 }
1239
1240 return nand_exec_op(chip, &op);
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1257 unsigned int offset_in_page, void *buf, unsigned int len)
1258{
1259 struct mtd_info *mtd = nand_to_mtd(chip);
1260
1261 if (len && !buf)
1262 return -EINVAL;
1263
1264 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1265 return -EINVAL;
1266
1267 if (nand_has_exec_op(chip)) {
1268 if (mtd->writesize > 512)
1269 return nand_lp_exec_read_page_op(chip, page,
1270 offset_in_page, buf,
1271 len);
1272
1273 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1274 buf, len);
1275 }
1276
1277 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1278 if (len)
1279 chip->legacy.read_buf(chip, buf, len);
1280
1281 return 0;
1282}
1283EXPORT_SYMBOL_GPL(nand_read_page_op);
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1298 unsigned int len)
1299{
1300 unsigned int i;
1301 u8 *p = buf;
1302
1303 if (len && !buf)
1304 return -EINVAL;
1305
1306 if (nand_has_exec_op(chip)) {
1307 const struct nand_sdr_timings *sdr =
1308 nand_get_sdr_timings(nand_get_interface_config(chip));
1309 struct nand_op_instr instrs[] = {
1310 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1311 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1312 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1313 PSEC_TO_NSEC(sdr->tRR_min)),
1314 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1315 };
1316 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1317
1318
1319 if (!len)
1320 op.ninstrs--;
1321
1322 return nand_exec_op(chip, &op);
1323 }
1324
1325 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1326 for (i = 0; i < len; i++)
1327 p[i] = chip->legacy.read_byte(chip);
1328
1329 return 0;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345int nand_change_read_column_op(struct nand_chip *chip,
1346 unsigned int offset_in_page, void *buf,
1347 unsigned int len, bool force_8bit)
1348{
1349 struct mtd_info *mtd = nand_to_mtd(chip);
1350
1351 if (len && !buf)
1352 return -EINVAL;
1353
1354 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1355 return -EINVAL;
1356
1357
1358 if (mtd->writesize <= 512)
1359 return -ENOTSUPP;
1360
1361 if (nand_has_exec_op(chip)) {
1362 const struct nand_sdr_timings *sdr =
1363 nand_get_sdr_timings(nand_get_interface_config(chip));
1364 u8 addrs[2] = {};
1365 struct nand_op_instr instrs[] = {
1366 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1367 NAND_OP_ADDR(2, addrs, 0),
1368 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1369 PSEC_TO_NSEC(sdr->tCCS_min)),
1370 NAND_OP_DATA_IN(len, buf, 0),
1371 };
1372 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1373 int ret;
1374
1375 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1376 if (ret < 0)
1377 return ret;
1378
1379
1380 if (!len)
1381 op.ninstrs--;
1382
1383 instrs[3].ctx.data.force_8bit = force_8bit;
1384
1385 return nand_exec_op(chip, &op);
1386 }
1387
1388 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1389 if (len)
1390 chip->legacy.read_buf(chip, buf, len);
1391
1392 return 0;
1393}
1394EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1410 unsigned int offset_in_oob, void *buf, unsigned int len)
1411{
1412 struct mtd_info *mtd = nand_to_mtd(chip);
1413
1414 if (len && !buf)
1415 return -EINVAL;
1416
1417 if (offset_in_oob + len > mtd->oobsize)
1418 return -EINVAL;
1419
1420 if (nand_has_exec_op(chip))
1421 return nand_read_page_op(chip, page,
1422 mtd->writesize + offset_in_oob,
1423 buf, len);
1424
1425 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1426 if (len)
1427 chip->legacy.read_buf(chip, buf, len);
1428
1429 return 0;
1430}
1431EXPORT_SYMBOL_GPL(nand_read_oob_op);
1432
1433static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1434 unsigned int offset_in_page, const void *buf,
1435 unsigned int len, bool prog)
1436{
1437 const struct nand_sdr_timings *sdr =
1438 nand_get_sdr_timings(nand_get_interface_config(chip));
1439 struct mtd_info *mtd = nand_to_mtd(chip);
1440 u8 addrs[5] = {};
1441 struct nand_op_instr instrs[] = {
1442
1443
1444
1445
1446
1447 NAND_OP_CMD(NAND_CMD_READ0, 0),
1448 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1449 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1450 NAND_OP_DATA_OUT(len, buf, 0),
1451 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1452 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1453 };
1454 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1455 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1456 int ret;
1457 u8 status;
1458
1459 if (naddrs < 0)
1460 return naddrs;
1461
1462 addrs[naddrs++] = page;
1463 addrs[naddrs++] = page >> 8;
1464 if (chip->options & NAND_ROW_ADDR_3)
1465 addrs[naddrs++] = page >> 16;
1466
1467 instrs[2].ctx.addr.naddrs = naddrs;
1468
1469
1470 if (!prog) {
1471 op.ninstrs -= 2;
1472
1473 if (!len)
1474 op.ninstrs--;
1475 }
1476
1477 if (mtd->writesize <= 512) {
1478
1479
1480
1481
1482
1483 if (offset_in_page >= mtd->writesize)
1484 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1485 else if (offset_in_page >= 256 &&
1486 !(chip->options & NAND_BUSWIDTH_16))
1487 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1488 } else {
1489
1490
1491
1492
1493 op.instrs++;
1494 op.ninstrs--;
1495 }
1496
1497 ret = nand_exec_op(chip, &op);
1498 if (!prog || ret)
1499 return ret;
1500
1501 ret = nand_status_op(chip, &status);
1502 if (ret)
1503 return ret;
1504
1505 return status;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1522 unsigned int offset_in_page, const void *buf,
1523 unsigned int len)
1524{
1525 struct mtd_info *mtd = nand_to_mtd(chip);
1526
1527 if (len && !buf)
1528 return -EINVAL;
1529
1530 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1531 return -EINVAL;
1532
1533 if (nand_has_exec_op(chip))
1534 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1535 len, false);
1536
1537 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1538
1539 if (buf)
1540 chip->legacy.write_buf(chip, buf, len);
1541
1542 return 0;
1543}
1544EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555int nand_prog_page_end_op(struct nand_chip *chip)
1556{
1557 int ret;
1558 u8 status;
1559
1560 if (nand_has_exec_op(chip)) {
1561 const struct nand_sdr_timings *sdr =
1562 nand_get_sdr_timings(nand_get_interface_config(chip));
1563 struct nand_op_instr instrs[] = {
1564 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1565 PSEC_TO_NSEC(sdr->tWB_max)),
1566 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1567 };
1568 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1569
1570 ret = nand_exec_op(chip, &op);
1571 if (ret)
1572 return ret;
1573
1574 ret = nand_status_op(chip, &status);
1575 if (ret)
1576 return ret;
1577 } else {
1578 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1579 ret = chip->legacy.waitfunc(chip);
1580 if (ret < 0)
1581 return ret;
1582
1583 status = ret;
1584 }
1585
1586 if (status & NAND_STATUS_FAIL)
1587 return -EIO;
1588
1589 return 0;
1590}
1591EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1607 unsigned int offset_in_page, const void *buf,
1608 unsigned int len)
1609{
1610 struct mtd_info *mtd = nand_to_mtd(chip);
1611 int status;
1612
1613 if (!len || !buf)
1614 return -EINVAL;
1615
1616 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1617 return -EINVAL;
1618
1619 if (nand_has_exec_op(chip)) {
1620 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1621 len, true);
1622 } else {
1623 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1624 page);
1625 chip->legacy.write_buf(chip, buf, len);
1626 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1627 status = chip->legacy.waitfunc(chip);
1628 }
1629
1630 if (status & NAND_STATUS_FAIL)
1631 return -EIO;
1632
1633 return 0;
1634}
1635EXPORT_SYMBOL_GPL(nand_prog_page_op);
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650int nand_change_write_column_op(struct nand_chip *chip,
1651 unsigned int offset_in_page,
1652 const void *buf, unsigned int len,
1653 bool force_8bit)
1654{
1655 struct mtd_info *mtd = nand_to_mtd(chip);
1656
1657 if (len && !buf)
1658 return -EINVAL;
1659
1660 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1661 return -EINVAL;
1662
1663
1664 if (mtd->writesize <= 512)
1665 return -ENOTSUPP;
1666
1667 if (nand_has_exec_op(chip)) {
1668 const struct nand_sdr_timings *sdr =
1669 nand_get_sdr_timings(nand_get_interface_config(chip));
1670 u8 addrs[2];
1671 struct nand_op_instr instrs[] = {
1672 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1673 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1674 NAND_OP_DATA_OUT(len, buf, 0),
1675 };
1676 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1677 int ret;
1678
1679 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1680 if (ret < 0)
1681 return ret;
1682
1683 instrs[2].ctx.data.force_8bit = force_8bit;
1684
1685
1686 if (!len)
1687 op.ninstrs--;
1688
1689 return nand_exec_op(chip, &op);
1690 }
1691
1692 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1693 if (len)
1694 chip->legacy.write_buf(chip, buf, len);
1695
1696 return 0;
1697}
1698EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1714 unsigned int len)
1715{
1716 unsigned int i;
1717 u8 *id = buf;
1718
1719 if (len && !buf)
1720 return -EINVAL;
1721
1722 if (nand_has_exec_op(chip)) {
1723 const struct nand_sdr_timings *sdr =
1724 nand_get_sdr_timings(nand_get_interface_config(chip));
1725 struct nand_op_instr instrs[] = {
1726 NAND_OP_CMD(NAND_CMD_READID, 0),
1727 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1728 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1729 };
1730 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1731
1732
1733 if (!len)
1734 op.ninstrs--;
1735
1736 return nand_exec_op(chip, &op);
1737 }
1738
1739 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1740
1741 for (i = 0; i < len; i++)
1742 id[i] = chip->legacy.read_byte(chip);
1743
1744 return 0;
1745}
1746EXPORT_SYMBOL_GPL(nand_readid_op);
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759int nand_status_op(struct nand_chip *chip, u8 *status)
1760{
1761 if (nand_has_exec_op(chip)) {
1762 const struct nand_sdr_timings *sdr =
1763 nand_get_sdr_timings(nand_get_interface_config(chip));
1764 struct nand_op_instr instrs[] = {
1765 NAND_OP_CMD(NAND_CMD_STATUS,
1766 PSEC_TO_NSEC(sdr->tADL_min)),
1767 NAND_OP_8BIT_DATA_IN(1, status, 0),
1768 };
1769 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1770
1771 if (!status)
1772 op.ninstrs--;
1773
1774 return nand_exec_op(chip, &op);
1775 }
1776
1777 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1778 if (status)
1779 *status = chip->legacy.read_byte(chip);
1780
1781 return 0;
1782}
1783EXPORT_SYMBOL_GPL(nand_status_op);
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796int nand_exit_status_op(struct nand_chip *chip)
1797{
1798 if (nand_has_exec_op(chip)) {
1799 struct nand_op_instr instrs[] = {
1800 NAND_OP_CMD(NAND_CMD_READ0, 0),
1801 };
1802 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1803
1804 return nand_exec_op(chip, &op);
1805 }
1806
1807 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1808
1809 return 0;
1810}
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1824{
1825 unsigned int page = eraseblock <<
1826 (chip->phys_erase_shift - chip->page_shift);
1827 int ret;
1828 u8 status;
1829
1830 if (nand_has_exec_op(chip)) {
1831 const struct nand_sdr_timings *sdr =
1832 nand_get_sdr_timings(nand_get_interface_config(chip));
1833 u8 addrs[3] = { page, page >> 8, page >> 16 };
1834 struct nand_op_instr instrs[] = {
1835 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1836 NAND_OP_ADDR(2, addrs, 0),
1837 NAND_OP_CMD(NAND_CMD_ERASE2,
1838 PSEC_TO_MSEC(sdr->tWB_max)),
1839 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1840 };
1841 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1842
1843 if (chip->options & NAND_ROW_ADDR_3)
1844 instrs[1].ctx.addr.naddrs++;
1845
1846 ret = nand_exec_op(chip, &op);
1847 if (ret)
1848 return ret;
1849
1850 ret = nand_status_op(chip, &status);
1851 if (ret)
1852 return ret;
1853 } else {
1854 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1855 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1856
1857 ret = chip->legacy.waitfunc(chip);
1858 if (ret < 0)
1859 return ret;
1860
1861 status = ret;
1862 }
1863
1864 if (status & NAND_STATUS_FAIL)
1865 return -EIO;
1866
1867 return 0;
1868}
1869EXPORT_SYMBOL_GPL(nand_erase_op);
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1884 const void *data)
1885{
1886 const u8 *params = data;
1887 int i, ret;
1888
1889 if (nand_has_exec_op(chip)) {
1890 const struct nand_sdr_timings *sdr =
1891 nand_get_sdr_timings(nand_get_interface_config(chip));
1892 struct nand_op_instr instrs[] = {
1893 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1894 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1895 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1896 PSEC_TO_NSEC(sdr->tWB_max)),
1897 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1898 };
1899 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1900
1901 return nand_exec_op(chip, &op);
1902 }
1903
1904 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1905 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1906 chip->legacy.write_byte(chip, params[i]);
1907
1908 ret = chip->legacy.waitfunc(chip);
1909 if (ret < 0)
1910 return ret;
1911
1912 if (ret & NAND_STATUS_FAIL)
1913 return -EIO;
1914
1915 return 0;
1916}
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1931 void *data)
1932{
1933 u8 *params = data;
1934 int i;
1935
1936 if (nand_has_exec_op(chip)) {
1937 const struct nand_sdr_timings *sdr =
1938 nand_get_sdr_timings(nand_get_interface_config(chip));
1939 struct nand_op_instr instrs[] = {
1940 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1941 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1942 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1943 PSEC_TO_NSEC(sdr->tRR_min)),
1944 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1945 data, 0),
1946 };
1947 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1948
1949 return nand_exec_op(chip, &op);
1950 }
1951
1952 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1953 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1954 params[i] = chip->legacy.read_byte(chip);
1955
1956 return 0;
1957}
1958
1959static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1960 unsigned int delay_ns)
1961{
1962 if (nand_has_exec_op(chip)) {
1963 struct nand_op_instr instrs[] = {
1964 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1965 PSEC_TO_NSEC(delay_ns)),
1966 };
1967 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1968
1969 return nand_exec_op(chip, &op);
1970 }
1971
1972
1973 if (!chip->legacy.dev_ready)
1974 udelay(chip->legacy.chip_delay);
1975 else
1976 nand_wait_ready(chip);
1977
1978 return 0;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991int nand_reset_op(struct nand_chip *chip)
1992{
1993 if (nand_has_exec_op(chip)) {
1994 const struct nand_sdr_timings *sdr =
1995 nand_get_sdr_timings(nand_get_interface_config(chip));
1996 struct nand_op_instr instrs[] = {
1997 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1998 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1999 };
2000 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2001
2002 return nand_exec_op(chip, &op);
2003 }
2004
2005 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
2006
2007 return 0;
2008}
2009EXPORT_SYMBOL_GPL(nand_reset_op);
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2027 bool force_8bit, bool check_only)
2028{
2029 if (!len || !buf)
2030 return -EINVAL;
2031
2032 if (nand_has_exec_op(chip)) {
2033 struct nand_op_instr instrs[] = {
2034 NAND_OP_DATA_IN(len, buf, 0),
2035 };
2036 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2037
2038 instrs[0].ctx.data.force_8bit = force_8bit;
2039
2040 if (check_only)
2041 return nand_check_op(chip, &op);
2042
2043 return nand_exec_op(chip, &op);
2044 }
2045
2046 if (check_only)
2047 return 0;
2048
2049 if (force_8bit) {
2050 u8 *p = buf;
2051 unsigned int i;
2052
2053 for (i = 0; i < len; i++)
2054 p[i] = chip->legacy.read_byte(chip);
2055 } else {
2056 chip->legacy.read_buf(chip, buf, len);
2057 }
2058
2059 return 0;
2060}
2061EXPORT_SYMBOL_GPL(nand_read_data_op);
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076int nand_write_data_op(struct nand_chip *chip, const void *buf,
2077 unsigned int len, bool force_8bit)
2078{
2079 if (!len || !buf)
2080 return -EINVAL;
2081
2082 if (nand_has_exec_op(chip)) {
2083 struct nand_op_instr instrs[] = {
2084 NAND_OP_DATA_OUT(len, buf, 0),
2085 };
2086 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2087
2088 instrs[0].ctx.data.force_8bit = force_8bit;
2089
2090 return nand_exec_op(chip, &op);
2091 }
2092
2093 if (force_8bit) {
2094 const u8 *p = buf;
2095 unsigned int i;
2096
2097 for (i = 0; i < len; i++)
2098 chip->legacy.write_byte(chip, p[i]);
2099 } else {
2100 chip->legacy.write_buf(chip, buf, len);
2101 }
2102
2103 return 0;
2104}
2105EXPORT_SYMBOL_GPL(nand_write_data_op);
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116struct nand_op_parser_ctx {
2117 const struct nand_op_instr *instrs;
2118 unsigned int ninstrs;
2119 struct nand_subop subop;
2120};
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static bool
2143nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2144 const struct nand_op_instr *instr,
2145 unsigned int *start_offset)
2146{
2147 switch (pat->type) {
2148 case NAND_OP_ADDR_INSTR:
2149 if (!pat->ctx.addr.maxcycles)
2150 break;
2151
2152 if (instr->ctx.addr.naddrs - *start_offset >
2153 pat->ctx.addr.maxcycles) {
2154 *start_offset += pat->ctx.addr.maxcycles;
2155 return true;
2156 }
2157 break;
2158
2159 case NAND_OP_DATA_IN_INSTR:
2160 case NAND_OP_DATA_OUT_INSTR:
2161 if (!pat->ctx.data.maxlen)
2162 break;
2163
2164 if (instr->ctx.data.len - *start_offset >
2165 pat->ctx.data.maxlen) {
2166 *start_offset += pat->ctx.data.maxlen;
2167 return true;
2168 }
2169 break;
2170
2171 default:
2172 break;
2173 }
2174
2175 return false;
2176}
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189static bool
2190nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2191 struct nand_op_parser_ctx *ctx)
2192{
2193 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2194 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2195 const struct nand_op_instr *instr = ctx->subop.instrs;
2196 unsigned int i, ninstrs;
2197
2198 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2199
2200
2201
2202
2203
2204
2205
2206 if (instr->type != pat->elems[i].type) {
2207 if (!pat->elems[i].optional)
2208 return false;
2209
2210 continue;
2211 }
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2222 &instr_offset)) {
2223 ninstrs++;
2224 i++;
2225 break;
2226 }
2227
2228 instr++;
2229 ninstrs++;
2230 instr_offset = 0;
2231 }
2232
2233
2234
2235
2236
2237
2238
2239 if (!ninstrs)
2240 return false;
2241
2242
2243
2244
2245
2246
2247 for (; i < pat->nelems; i++) {
2248 if (!pat->elems[i].optional)
2249 return false;
2250 }
2251
2252
2253
2254
2255
2256 ctx->subop.ninstrs = ninstrs;
2257 ctx->subop.last_instr_end_off = instr_offset;
2258
2259 return true;
2260}
2261
2262#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2263static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2264{
2265 const struct nand_op_instr *instr;
2266 char *prefix = " ";
2267 unsigned int i;
2268
2269 pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2270
2271 for (i = 0; i < ctx->ninstrs; i++) {
2272 instr = &ctx->instrs[i];
2273
2274 if (instr == &ctx->subop.instrs[0])
2275 prefix = " ->";
2276
2277 nand_op_trace(prefix, instr);
2278
2279 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2280 prefix = " ";
2281 }
2282}
2283#else
2284static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2285{
2286
2287}
2288#endif
2289
2290static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2291 const struct nand_op_parser_ctx *b)
2292{
2293 if (a->subop.ninstrs < b->subop.ninstrs)
2294 return -1;
2295 else if (a->subop.ninstrs > b->subop.ninstrs)
2296 return 1;
2297
2298 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2299 return -1;
2300 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2301 return 1;
2302
2303 return 0;
2304}
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328int nand_op_parser_exec_op(struct nand_chip *chip,
2329 const struct nand_op_parser *parser,
2330 const struct nand_operation *op, bool check_only)
2331{
2332 struct nand_op_parser_ctx ctx = {
2333 .subop.cs = op->cs,
2334 .subop.instrs = op->instrs,
2335 .instrs = op->instrs,
2336 .ninstrs = op->ninstrs,
2337 };
2338 unsigned int i;
2339
2340 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2341 const struct nand_op_parser_pattern *pattern;
2342 struct nand_op_parser_ctx best_ctx;
2343 int ret, best_pattern = -1;
2344
2345 for (i = 0; i < parser->npatterns; i++) {
2346 struct nand_op_parser_ctx test_ctx = ctx;
2347
2348 pattern = &parser->patterns[i];
2349 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2350 continue;
2351
2352 if (best_pattern >= 0 &&
2353 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2354 continue;
2355
2356 best_pattern = i;
2357 best_ctx = test_ctx;
2358 }
2359
2360 if (best_pattern < 0) {
2361 pr_debug("->exec_op() parser: pattern not found!\n");
2362 return -ENOTSUPP;
2363 }
2364
2365 ctx = best_ctx;
2366 nand_op_parser_trace(&ctx);
2367
2368 if (!check_only) {
2369 pattern = &parser->patterns[best_pattern];
2370 ret = pattern->exec(chip, &ctx.subop);
2371 if (ret)
2372 return ret;
2373 }
2374
2375
2376
2377
2378
2379 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2380 if (ctx.subop.last_instr_end_off)
2381 ctx.subop.instrs -= 1;
2382
2383 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2384 }
2385
2386 return 0;
2387}
2388EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2389
2390static bool nand_instr_is_data(const struct nand_op_instr *instr)
2391{
2392 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2393 instr->type == NAND_OP_DATA_OUT_INSTR);
2394}
2395
2396static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2397 unsigned int instr_idx)
2398{
2399 return subop && instr_idx < subop->ninstrs;
2400}
2401
2402static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2403 unsigned int instr_idx)
2404{
2405 if (instr_idx)
2406 return 0;
2407
2408 return subop->first_instr_start_off;
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2423 unsigned int instr_idx)
2424{
2425 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2426 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2427 return 0;
2428
2429 return nand_subop_get_start_off(subop, instr_idx);
2430}
2431EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2445 unsigned int instr_idx)
2446{
2447 int start_off, end_off;
2448
2449 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2450 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2451 return 0;
2452
2453 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2454
2455 if (instr_idx == subop->ninstrs - 1 &&
2456 subop->last_instr_end_off)
2457 end_off = subop->last_instr_end_off;
2458 else
2459 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2460
2461 return end_off - start_off;
2462}
2463EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2477 unsigned int instr_idx)
2478{
2479 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2480 !nand_instr_is_data(&subop->instrs[instr_idx])))
2481 return 0;
2482
2483 return nand_subop_get_start_off(subop, instr_idx);
2484}
2485EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2499 unsigned int instr_idx)
2500{
2501 int start_off = 0, end_off;
2502
2503 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2504 !nand_instr_is_data(&subop->instrs[instr_idx])))
2505 return 0;
2506
2507 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2508
2509 if (instr_idx == subop->ninstrs - 1 &&
2510 subop->last_instr_end_off)
2511 end_off = subop->last_instr_end_off;
2512 else
2513 end_off = subop->instrs[instr_idx].ctx.data.len;
2514
2515 return end_off - start_off;
2516}
2517EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530int nand_reset(struct nand_chip *chip, int chipnr)
2531{
2532 int ret;
2533
2534 ret = nand_reset_interface(chip, chipnr);
2535 if (ret)
2536 return ret;
2537
2538
2539
2540
2541
2542
2543 nand_select_target(chip, chipnr);
2544 ret = nand_reset_op(chip);
2545 nand_deselect_target(chip);
2546 if (ret)
2547 return ret;
2548
2549 ret = nand_setup_interface(chip, chipnr);
2550 if (ret)
2551 return ret;
2552
2553 return 0;
2554}
2555EXPORT_SYMBOL_GPL(nand_reset);
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566int nand_get_features(struct nand_chip *chip, int addr,
2567 u8 *subfeature_param)
2568{
2569 if (!nand_supports_get_features(chip, addr))
2570 return -ENOTSUPP;
2571
2572 if (chip->legacy.get_features)
2573 return chip->legacy.get_features(chip, addr, subfeature_param);
2574
2575 return nand_get_features_op(chip, addr, subfeature_param);
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587int nand_set_features(struct nand_chip *chip, int addr,
2588 u8 *subfeature_param)
2589{
2590 if (!nand_supports_set_features(chip, addr))
2591 return -ENOTSUPP;
2592
2593 if (chip->legacy.set_features)
2594 return chip->legacy.set_features(chip, addr, subfeature_param);
2595
2596 return nand_set_features_op(chip, addr, subfeature_param);
2597}
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2619{
2620 const unsigned char *bitmap = buf;
2621 int bitflips = 0;
2622 int weight;
2623
2624 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2625 len--, bitmap++) {
2626 weight = hweight8(*bitmap);
2627 bitflips += BITS_PER_BYTE - weight;
2628 if (unlikely(bitflips > bitflips_threshold))
2629 return -EBADMSG;
2630 }
2631
2632 for (; len >= sizeof(long);
2633 len -= sizeof(long), bitmap += sizeof(long)) {
2634 unsigned long d = *((unsigned long *)bitmap);
2635 if (d == ~0UL)
2636 continue;
2637 weight = hweight_long(d);
2638 bitflips += BITS_PER_LONG - weight;
2639 if (unlikely(bitflips > bitflips_threshold))
2640 return -EBADMSG;
2641 }
2642
2643 for (; len > 0; len--, bitmap++) {
2644 weight = hweight8(*bitmap);
2645 bitflips += BITS_PER_BYTE - weight;
2646 if (unlikely(bitflips > bitflips_threshold))
2647 return -EBADMSG;
2648 }
2649
2650 return bitflips;
2651}
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692int nand_check_erased_ecc_chunk(void *data, int datalen,
2693 void *ecc, int ecclen,
2694 void *extraoob, int extraooblen,
2695 int bitflips_threshold)
2696{
2697 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2698
2699 data_bitflips = nand_check_erased_buf(data, datalen,
2700 bitflips_threshold);
2701 if (data_bitflips < 0)
2702 return data_bitflips;
2703
2704 bitflips_threshold -= data_bitflips;
2705
2706 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2707 if (ecc_bitflips < 0)
2708 return ecc_bitflips;
2709
2710 bitflips_threshold -= ecc_bitflips;
2711
2712 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2713 bitflips_threshold);
2714 if (extraoob_bitflips < 0)
2715 return extraoob_bitflips;
2716
2717 if (data_bitflips)
2718 memset(data, 0xff, datalen);
2719
2720 if (ecc_bitflips)
2721 memset(ecc, 0xff, ecclen);
2722
2723 if (extraoob_bitflips)
2724 memset(extraoob, 0xff, extraooblen);
2725
2726 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2727}
2728EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2740 int oob_required, int page)
2741{
2742 return -ENOTSUPP;
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2755 int page)
2756{
2757 struct mtd_info *mtd = nand_to_mtd(chip);
2758 int ret;
2759
2760 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2761 if (ret)
2762 return ret;
2763
2764 if (oob_required) {
2765 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2766 false, false);
2767 if (ret)
2768 return ret;
2769 }
2770
2771 return 0;
2772}
2773EXPORT_SYMBOL(nand_read_page_raw);
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2791 int oob_required, int page)
2792{
2793 struct mtd_info *mtd = nand_to_mtd(chip);
2794 unsigned int size = mtd->writesize;
2795 u8 *read_buf = buf;
2796 int ret;
2797
2798 if (oob_required) {
2799 size += mtd->oobsize;
2800
2801 if (buf != chip->data_buf)
2802 read_buf = nand_get_data_buf(chip);
2803 }
2804
2805 ret = nand_read_page_op(chip, page, 0, read_buf, size);
2806 if (ret)
2807 return ret;
2808
2809 if (buf != chip->data_buf)
2810 memcpy(buf, read_buf, mtd->writesize);
2811
2812 return 0;
2813}
2814EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2826 int oob_required, int page)
2827{
2828 struct mtd_info *mtd = nand_to_mtd(chip);
2829 int eccsize = chip->ecc.size;
2830 int eccbytes = chip->ecc.bytes;
2831 uint8_t *oob = chip->oob_poi;
2832 int steps, size, ret;
2833
2834 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2835 if (ret)
2836 return ret;
2837
2838 for (steps = chip->ecc.steps; steps > 0; steps--) {
2839 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2840 if (ret)
2841 return ret;
2842
2843 buf += eccsize;
2844
2845 if (chip->ecc.prepad) {
2846 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2847 false, false);
2848 if (ret)
2849 return ret;
2850
2851 oob += chip->ecc.prepad;
2852 }
2853
2854 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2855 if (ret)
2856 return ret;
2857
2858 oob += eccbytes;
2859
2860 if (chip->ecc.postpad) {
2861 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2862 false, false);
2863 if (ret)
2864 return ret;
2865
2866 oob += chip->ecc.postpad;
2867 }
2868 }
2869
2870 size = mtd->oobsize - (oob - chip->oob_poi);
2871 if (size) {
2872 ret = nand_read_data_op(chip, oob, size, false, false);
2873 if (ret)
2874 return ret;
2875 }
2876
2877 return 0;
2878}
2879
2880
2881
2882
2883
2884
2885
2886
2887static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2888 int oob_required, int page)
2889{
2890 struct mtd_info *mtd = nand_to_mtd(chip);
2891 int i, eccsize = chip->ecc.size, ret;
2892 int eccbytes = chip->ecc.bytes;
2893 int eccsteps = chip->ecc.steps;
2894 uint8_t *p = buf;
2895 uint8_t *ecc_calc = chip->ecc.calc_buf;
2896 uint8_t *ecc_code = chip->ecc.code_buf;
2897 unsigned int max_bitflips = 0;
2898
2899 chip->ecc.read_page_raw(chip, buf, 1, page);
2900
2901 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2902 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2903
2904 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2905 chip->ecc.total);
2906 if (ret)
2907 return ret;
2908
2909 eccsteps = chip->ecc.steps;
2910 p = buf;
2911
2912 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2913 int stat;
2914
2915 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2916 if (stat < 0) {
2917 mtd->ecc_stats.failed++;
2918 } else {
2919 mtd->ecc_stats.corrected += stat;
2920 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2921 }
2922 }
2923 return max_bitflips;
2924}
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2935 uint32_t readlen, uint8_t *bufpoi, int page)
2936{
2937 struct mtd_info *mtd = nand_to_mtd(chip);
2938 int start_step, end_step, num_steps, ret;
2939 uint8_t *p;
2940 int data_col_addr, i, gaps = 0;
2941 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2942 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2943 int index, section = 0;
2944 unsigned int max_bitflips = 0;
2945 struct mtd_oob_region oobregion = { };
2946
2947
2948 start_step = data_offs / chip->ecc.size;
2949 end_step = (data_offs + readlen - 1) / chip->ecc.size;
2950 num_steps = end_step - start_step + 1;
2951 index = start_step * chip->ecc.bytes;
2952
2953
2954 datafrag_len = num_steps * chip->ecc.size;
2955 eccfrag_len = num_steps * chip->ecc.bytes;
2956
2957 data_col_addr = start_step * chip->ecc.size;
2958
2959 p = bufpoi + data_col_addr;
2960 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2961 if (ret)
2962 return ret;
2963
2964
2965 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2966 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2967
2968
2969
2970
2971
2972 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
2973 if (ret)
2974 return ret;
2975
2976 if (oobregion.length < eccfrag_len)
2977 gaps = 1;
2978
2979 if (gaps) {
2980 ret = nand_change_read_column_op(chip, mtd->writesize,
2981 chip->oob_poi, mtd->oobsize,
2982 false);
2983 if (ret)
2984 return ret;
2985 } else {
2986
2987
2988
2989
2990 aligned_pos = oobregion.offset & ~(busw - 1);
2991 aligned_len = eccfrag_len;
2992 if (oobregion.offset & (busw - 1))
2993 aligned_len++;
2994 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2995 (busw - 1))
2996 aligned_len++;
2997
2998 ret = nand_change_read_column_op(chip,
2999 mtd->writesize + aligned_pos,
3000 &chip->oob_poi[aligned_pos],
3001 aligned_len, false);
3002 if (ret)
3003 return ret;
3004 }
3005
3006 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3007 chip->oob_poi, index, eccfrag_len);
3008 if (ret)
3009 return ret;
3010
3011 p = bufpoi + data_col_addr;
3012 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3013 int stat;
3014
3015 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
3016 &chip->ecc.calc_buf[i]);
3017 if (stat == -EBADMSG &&
3018 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3019
3020 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3021 &chip->ecc.code_buf[i],
3022 chip->ecc.bytes,
3023 NULL, 0,
3024 chip->ecc.strength);
3025 }
3026
3027 if (stat < 0) {
3028 mtd->ecc_stats.failed++;
3029 } else {
3030 mtd->ecc_stats.corrected += stat;
3031 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3032 }
3033 }
3034 return max_bitflips;
3035}
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
3047 int oob_required, int page)
3048{
3049 struct mtd_info *mtd = nand_to_mtd(chip);
3050 int i, eccsize = chip->ecc.size, ret;
3051 int eccbytes = chip->ecc.bytes;
3052 int eccsteps = chip->ecc.steps;
3053 uint8_t *p = buf;
3054 uint8_t *ecc_calc = chip->ecc.calc_buf;
3055 uint8_t *ecc_code = chip->ecc.code_buf;
3056 unsigned int max_bitflips = 0;
3057
3058 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3059 if (ret)
3060 return ret;
3061
3062 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3063 chip->ecc.hwctl(chip, NAND_ECC_READ);
3064
3065 ret = nand_read_data_op(chip, p, eccsize, false, false);
3066 if (ret)
3067 return ret;
3068
3069 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3070 }
3071
3072 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
3073 false);
3074 if (ret)
3075 return ret;
3076
3077 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3078 chip->ecc.total);
3079 if (ret)
3080 return ret;
3081
3082 eccsteps = chip->ecc.steps;
3083 p = buf;
3084
3085 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3086 int stat;
3087
3088 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
3089 if (stat == -EBADMSG &&
3090 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3091
3092 stat = nand_check_erased_ecc_chunk(p, eccsize,
3093 &ecc_code[i], eccbytes,
3094 NULL, 0,
3095 chip->ecc.strength);
3096 }
3097
3098 if (stat < 0) {
3099 mtd->ecc_stats.failed++;
3100 } else {
3101 mtd->ecc_stats.corrected += stat;
3102 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3103 }
3104 }
3105 return max_bitflips;
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3119 int oob_required, int page)
3120{
3121 struct mtd_info *mtd = nand_to_mtd(chip);
3122 int ret, i, eccsize = chip->ecc.size;
3123 int eccbytes = chip->ecc.bytes;
3124 int eccsteps = chip->ecc.steps;
3125 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3126 uint8_t *p = buf;
3127 uint8_t *oob = chip->oob_poi;
3128 unsigned int max_bitflips = 0;
3129
3130 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3131 if (ret)
3132 return ret;
3133
3134 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3135 int stat;
3136
3137 chip->ecc.hwctl(chip, NAND_ECC_READ);
3138
3139 ret = nand_read_data_op(chip, p, eccsize, false, false);
3140 if (ret)
3141 return ret;
3142
3143 if (chip->ecc.prepad) {
3144 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3145 false, false);
3146 if (ret)
3147 return ret;
3148
3149 oob += chip->ecc.prepad;
3150 }
3151
3152 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3153
3154 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
3155 if (ret)
3156 return ret;
3157
3158 stat = chip->ecc.correct(chip, p, oob, NULL);
3159
3160 oob += eccbytes;
3161
3162 if (chip->ecc.postpad) {
3163 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3164 false, false);
3165 if (ret)
3166 return ret;
3167
3168 oob += chip->ecc.postpad;
3169 }
3170
3171 if (stat == -EBADMSG &&
3172 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3173
3174 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3175 oob - eccpadbytes,
3176 eccpadbytes,
3177 NULL, 0,
3178 chip->ecc.strength);
3179 }
3180
3181 if (stat < 0) {
3182 mtd->ecc_stats.failed++;
3183 } else {
3184 mtd->ecc_stats.corrected += stat;
3185 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3186 }
3187 }
3188
3189
3190 i = mtd->oobsize - (oob - chip->oob_poi);
3191 if (i) {
3192 ret = nand_read_data_op(chip, oob, i, false, false);
3193 if (ret)
3194 return ret;
3195 }
3196
3197 return max_bitflips;
3198}
3199
3200
3201
3202
3203
3204
3205
3206
3207static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3208 struct mtd_oob_ops *ops, size_t len)
3209{
3210 struct mtd_info *mtd = nand_to_mtd(chip);
3211 int ret;
3212
3213 switch (ops->mode) {
3214
3215 case MTD_OPS_PLACE_OOB:
3216 case MTD_OPS_RAW:
3217 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3218 return oob + len;
3219
3220 case MTD_OPS_AUTO_OOB:
3221 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3222 ops->ooboffs, len);
3223 BUG_ON(ret);
3224 return oob + len;
3225
3226 default:
3227 BUG();
3228 }
3229 return NULL;
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3242{
3243 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3244
3245 if (retry_mode >= chip->read_retries)
3246 return -EINVAL;
3247
3248 if (!chip->ops.setup_read_retry)
3249 return -EOPNOTSUPP;
3250
3251 return chip->ops.setup_read_retry(chip, retry_mode);
3252}
3253
3254static void nand_wait_readrdy(struct nand_chip *chip)
3255{
3256 const struct nand_sdr_timings *sdr;
3257
3258 if (!(chip->options & NAND_NEED_READRDY))
3259 return;
3260
3261 sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
3262 WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3274 struct mtd_oob_ops *ops)
3275{
3276 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3277 struct mtd_info *mtd = nand_to_mtd(chip);
3278 int ret = 0;
3279 uint32_t readlen = ops->len;
3280 uint32_t oobreadlen = ops->ooblen;
3281 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3282
3283 uint8_t *bufpoi, *oob, *buf;
3284 int use_bounce_buf;
3285 unsigned int max_bitflips = 0;
3286 int retry_mode = 0;
3287 bool ecc_fail = false;
3288
3289 chipnr = (int)(from >> chip->chip_shift);
3290 nand_select_target(chip, chipnr);
3291
3292 realpage = (int)(from >> chip->page_shift);
3293 page = realpage & chip->pagemask;
3294
3295 col = (int)(from & (mtd->writesize - 1));
3296
3297 buf = ops->datbuf;
3298 oob = ops->oobbuf;
3299 oob_required = oob ? 1 : 0;
3300
3301 while (1) {
3302 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3303
3304 bytes = min(mtd->writesize - col, readlen);
3305 aligned = (bytes == mtd->writesize);
3306
3307 if (!aligned)
3308 use_bounce_buf = 1;
3309 else if (chip->options & NAND_USES_DMA)
3310 use_bounce_buf = !virt_addr_valid(buf) ||
3311 !IS_ALIGNED((unsigned long)buf,
3312 chip->buf_align);
3313 else
3314 use_bounce_buf = 0;
3315
3316
3317 if (realpage != chip->pagecache.page || oob) {
3318 bufpoi = use_bounce_buf ? chip->data_buf : buf;
3319
3320 if (use_bounce_buf && aligned)
3321 pr_debug("%s: using read bounce buffer for buf@%p\n",
3322 __func__, buf);
3323
3324read_retry:
3325
3326
3327
3328
3329 if (unlikely(ops->mode == MTD_OPS_RAW))
3330 ret = chip->ecc.read_page_raw(chip, bufpoi,
3331 oob_required,
3332 page);
3333 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3334 !oob)
3335 ret = chip->ecc.read_subpage(chip, col, bytes,
3336 bufpoi, page);
3337 else
3338 ret = chip->ecc.read_page(chip, bufpoi,
3339 oob_required, page);
3340 if (ret < 0) {
3341 if (use_bounce_buf)
3342
3343 chip->pagecache.page = -1;
3344 break;
3345 }
3346
3347
3348
3349
3350
3351 if (use_bounce_buf) {
3352 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3353 !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3354 (ops->mode != MTD_OPS_RAW)) {
3355 chip->pagecache.page = realpage;
3356 chip->pagecache.bitflips = ret;
3357 } else {
3358
3359 chip->pagecache.page = -1;
3360 }
3361 memcpy(buf, bufpoi + col, bytes);
3362 }
3363
3364 if (unlikely(oob)) {
3365 int toread = min(oobreadlen, max_oobsize);
3366
3367 if (toread) {
3368 oob = nand_transfer_oob(chip, oob, ops,
3369 toread);
3370 oobreadlen -= toread;
3371 }
3372 }
3373
3374 nand_wait_readrdy(chip);
3375
3376 if (mtd->ecc_stats.failed - ecc_stats.failed) {
3377 if (retry_mode + 1 < chip->read_retries) {
3378 retry_mode++;
3379 ret = nand_setup_read_retry(chip,
3380 retry_mode);
3381 if (ret < 0)
3382 break;
3383
3384
3385 mtd->ecc_stats = ecc_stats;
3386 goto read_retry;
3387 } else {
3388
3389 ecc_fail = true;
3390 }
3391 }
3392
3393 buf += bytes;
3394 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3395 } else {
3396 memcpy(buf, chip->data_buf + col, bytes);
3397 buf += bytes;
3398 max_bitflips = max_t(unsigned int, max_bitflips,
3399 chip->pagecache.bitflips);
3400 }
3401
3402 readlen -= bytes;
3403
3404
3405 if (retry_mode) {
3406 ret = nand_setup_read_retry(chip, 0);
3407 if (ret < 0)
3408 break;
3409 retry_mode = 0;
3410 }
3411
3412 if (!readlen)
3413 break;
3414
3415
3416 col = 0;
3417
3418 realpage++;
3419
3420 page = realpage & chip->pagemask;
3421
3422 if (!page) {
3423 chipnr++;
3424 nand_deselect_target(chip);
3425 nand_select_target(chip, chipnr);
3426 }
3427 }
3428 nand_deselect_target(chip);
3429
3430 ops->retlen = ops->len - (size_t) readlen;
3431 if (oob)
3432 ops->oobretlen = ops->ooblen - oobreadlen;
3433
3434 if (ret < 0)
3435 return ret;
3436
3437 if (ecc_fail)
3438 return -EBADMSG;
3439
3440 return max_bitflips;
3441}
3442
3443
3444
3445
3446
3447
3448int nand_read_oob_std(struct nand_chip *chip, int page)
3449{
3450 struct mtd_info *mtd = nand_to_mtd(chip);
3451
3452 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3453}
3454EXPORT_SYMBOL(nand_read_oob_std);
3455
3456
3457
3458
3459
3460
3461
3462static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3463{
3464 struct mtd_info *mtd = nand_to_mtd(chip);
3465 int length = mtd->oobsize;
3466 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3467 int eccsize = chip->ecc.size;
3468 uint8_t *bufpoi = chip->oob_poi;
3469 int i, toread, sndrnd = 0, pos, ret;
3470
3471 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3472 if (ret)
3473 return ret;
3474
3475 for (i = 0; i < chip->ecc.steps; i++) {
3476 if (sndrnd) {
3477 int ret;
3478
3479 pos = eccsize + i * (eccsize + chunk);
3480 if (mtd->writesize > 512)
3481 ret = nand_change_read_column_op(chip, pos,
3482 NULL, 0,
3483 false);
3484 else
3485 ret = nand_read_page_op(chip, page, pos, NULL,
3486 0);
3487
3488 if (ret)
3489 return ret;
3490 } else
3491 sndrnd = 1;
3492 toread = min_t(int, length, chunk);
3493
3494 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3495 if (ret)
3496 return ret;
3497
3498 bufpoi += toread;
3499 length -= toread;
3500 }
3501 if (length > 0) {
3502 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3503 if (ret)
3504 return ret;
3505 }
3506
3507 return 0;
3508}
3509
3510
3511
3512
3513
3514
3515int nand_write_oob_std(struct nand_chip *chip, int page)
3516{
3517 struct mtd_info *mtd = nand_to_mtd(chip);
3518
3519 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3520 mtd->oobsize);
3521}
3522EXPORT_SYMBOL(nand_write_oob_std);
3523
3524
3525
3526
3527
3528
3529
3530static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3531{
3532 struct mtd_info *mtd = nand_to_mtd(chip);
3533 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3534 int eccsize = chip->ecc.size, length = mtd->oobsize;
3535 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3536 const uint8_t *bufpoi = chip->oob_poi;
3537
3538
3539
3540
3541
3542
3543 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3544 pos = steps * (eccsize + chunk);
3545 steps = 0;
3546 } else
3547 pos = eccsize;
3548
3549 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3550 if (ret)
3551 return ret;
3552
3553 for (i = 0; i < steps; i++) {
3554 if (sndcmd) {
3555 if (mtd->writesize <= 512) {
3556 uint32_t fill = 0xFFFFFFFF;
3557
3558 len = eccsize;
3559 while (len > 0) {
3560 int num = min_t(int, len, 4);
3561
3562 ret = nand_write_data_op(chip, &fill,
3563 num, false);
3564 if (ret)
3565 return ret;
3566
3567 len -= num;
3568 }
3569 } else {
3570 pos = eccsize + i * (eccsize + chunk);
3571 ret = nand_change_write_column_op(chip, pos,
3572 NULL, 0,
3573 false);
3574 if (ret)
3575 return ret;
3576 }
3577 } else
3578 sndcmd = 1;
3579 len = min_t(int, length, chunk);
3580
3581 ret = nand_write_data_op(chip, bufpoi, len, false);
3582 if (ret)
3583 return ret;
3584
3585 bufpoi += len;
3586 length -= len;
3587 }
3588 if (length > 0) {
3589 ret = nand_write_data_op(chip, bufpoi, length, false);
3590 if (ret)
3591 return ret;
3592 }
3593
3594 return nand_prog_page_end_op(chip);
3595}
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3606 struct mtd_oob_ops *ops)
3607{
3608 struct mtd_info *mtd = nand_to_mtd(chip);
3609 unsigned int max_bitflips = 0;
3610 int page, realpage, chipnr;
3611 struct mtd_ecc_stats stats;
3612 int readlen = ops->ooblen;
3613 int len;
3614 uint8_t *buf = ops->oobbuf;
3615 int ret = 0;
3616
3617 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3618 __func__, (unsigned long long)from, readlen);
3619
3620 stats = mtd->ecc_stats;
3621
3622 len = mtd_oobavail(mtd, ops);
3623
3624 chipnr = (int)(from >> chip->chip_shift);
3625 nand_select_target(chip, chipnr);
3626
3627
3628 realpage = (int)(from >> chip->page_shift);
3629 page = realpage & chip->pagemask;
3630
3631 while (1) {
3632 if (ops->mode == MTD_OPS_RAW)
3633 ret = chip->ecc.read_oob_raw(chip, page);
3634 else
3635 ret = chip->ecc.read_oob(chip, page);
3636
3637 if (ret < 0)
3638 break;
3639
3640 len = min(len, readlen);
3641 buf = nand_transfer_oob(chip, buf, ops, len);
3642
3643 nand_wait_readrdy(chip);
3644
3645 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3646
3647 readlen -= len;
3648 if (!readlen)
3649 break;
3650
3651
3652 realpage++;
3653
3654 page = realpage & chip->pagemask;
3655
3656 if (!page) {
3657 chipnr++;
3658 nand_deselect_target(chip);
3659 nand_select_target(chip, chipnr);
3660 }
3661 }
3662 nand_deselect_target(chip);
3663
3664 ops->oobretlen = ops->ooblen - readlen;
3665
3666 if (ret < 0)
3667 return ret;
3668
3669 if (mtd->ecc_stats.failed - stats.failed)
3670 return -EBADMSG;
3671
3672 return max_bitflips;
3673}
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3684 struct mtd_oob_ops *ops)
3685{
3686 struct nand_chip *chip = mtd_to_nand(mtd);
3687 int ret;
3688
3689 ops->retlen = 0;
3690
3691 if (ops->mode != MTD_OPS_PLACE_OOB &&
3692 ops->mode != MTD_OPS_AUTO_OOB &&
3693 ops->mode != MTD_OPS_RAW)
3694 return -ENOTSUPP;
3695
3696 ret = nand_get_device(chip);
3697 if (ret)
3698 return ret;
3699
3700 if (!ops->datbuf)
3701 ret = nand_do_read_oob(chip, from, ops);
3702 else
3703 ret = nand_do_read_ops(chip, from, ops);
3704
3705 nand_release_device(chip);
3706 return ret;
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3719 int oob_required, int page)
3720{
3721 return -ENOTSUPP;
3722}
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3734 int oob_required, int page)
3735{
3736 struct mtd_info *mtd = nand_to_mtd(chip);
3737 int ret;
3738
3739 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3740 if (ret)
3741 return ret;
3742
3743 if (oob_required) {
3744 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3745 false);
3746 if (ret)
3747 return ret;
3748 }
3749
3750 return nand_prog_page_end_op(chip);
3751}
3752EXPORT_SYMBOL(nand_write_page_raw);
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3771 int oob_required, int page)
3772{
3773 struct mtd_info *mtd = nand_to_mtd(chip);
3774 unsigned int size = mtd->writesize;
3775 u8 *write_buf = (u8 *)buf;
3776
3777 if (oob_required) {
3778 size += mtd->oobsize;
3779
3780 if (buf != chip->data_buf) {
3781 write_buf = nand_get_data_buf(chip);
3782 memcpy(write_buf, buf, mtd->writesize);
3783 }
3784 }
3785
3786 return nand_prog_page_op(chip, page, 0, write_buf, size);
3787}
3788EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3800 const uint8_t *buf, int oob_required,
3801 int page)
3802{
3803 struct mtd_info *mtd = nand_to_mtd(chip);
3804 int eccsize = chip->ecc.size;
3805 int eccbytes = chip->ecc.bytes;
3806 uint8_t *oob = chip->oob_poi;
3807 int steps, size, ret;
3808
3809 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3810 if (ret)
3811 return ret;
3812
3813 for (steps = chip->ecc.steps; steps > 0; steps--) {
3814 ret = nand_write_data_op(chip, buf, eccsize, false);
3815 if (ret)
3816 return ret;
3817
3818 buf += eccsize;
3819
3820 if (chip->ecc.prepad) {
3821 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3822 false);
3823 if (ret)
3824 return ret;
3825
3826 oob += chip->ecc.prepad;
3827 }
3828
3829 ret = nand_write_data_op(chip, oob, eccbytes, false);
3830 if (ret)
3831 return ret;
3832
3833 oob += eccbytes;
3834
3835 if (chip->ecc.postpad) {
3836 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3837 false);
3838 if (ret)
3839 return ret;
3840
3841 oob += chip->ecc.postpad;
3842 }
3843 }
3844
3845 size = mtd->oobsize - (oob - chip->oob_poi);
3846 if (size) {
3847 ret = nand_write_data_op(chip, oob, size, false);
3848 if (ret)
3849 return ret;
3850 }
3851
3852 return nand_prog_page_end_op(chip);
3853}
3854
3855
3856
3857
3858
3859
3860
3861static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3862 int oob_required, int page)
3863{
3864 struct mtd_info *mtd = nand_to_mtd(chip);
3865 int i, eccsize = chip->ecc.size, ret;
3866 int eccbytes = chip->ecc.bytes;
3867 int eccsteps = chip->ecc.steps;
3868 uint8_t *ecc_calc = chip->ecc.calc_buf;
3869 const uint8_t *p = buf;
3870
3871
3872 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3873 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3874
3875 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3876 chip->ecc.total);
3877 if (ret)
3878 return ret;
3879
3880 return chip->ecc.write_page_raw(chip, buf, 1, page);
3881}
3882
3883
3884
3885
3886
3887
3888
3889
3890static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3891 int oob_required, int page)
3892{
3893 struct mtd_info *mtd = nand_to_mtd(chip);
3894 int i, eccsize = chip->ecc.size, ret;
3895 int eccbytes = chip->ecc.bytes;
3896 int eccsteps = chip->ecc.steps;
3897 uint8_t *ecc_calc = chip->ecc.calc_buf;
3898 const uint8_t *p = buf;
3899
3900 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3901 if (ret)
3902 return ret;
3903
3904 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3905 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3906
3907 ret = nand_write_data_op(chip, p, eccsize, false);
3908 if (ret)
3909 return ret;
3910
3911 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3912 }
3913
3914 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3915 chip->ecc.total);
3916 if (ret)
3917 return ret;
3918
3919 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3920 if (ret)
3921 return ret;
3922
3923 return nand_prog_page_end_op(chip);
3924}
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3937 uint32_t data_len, const uint8_t *buf,
3938 int oob_required, int page)
3939{
3940 struct mtd_info *mtd = nand_to_mtd(chip);
3941 uint8_t *oob_buf = chip->oob_poi;
3942 uint8_t *ecc_calc = chip->ecc.calc_buf;
3943 int ecc_size = chip->ecc.size;
3944 int ecc_bytes = chip->ecc.bytes;
3945 int ecc_steps = chip->ecc.steps;
3946 uint32_t start_step = offset / ecc_size;
3947 uint32_t end_step = (offset + data_len - 1) / ecc_size;
3948 int oob_bytes = mtd->oobsize / ecc_steps;
3949 int step, ret;
3950
3951 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3952 if (ret)
3953 return ret;
3954
3955 for (step = 0; step < ecc_steps; step++) {
3956
3957 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3958
3959
3960 ret = nand_write_data_op(chip, buf, ecc_size, false);
3961 if (ret)
3962 return ret;
3963
3964
3965 if ((step < start_step) || (step > end_step))
3966 memset(ecc_calc, 0xff, ecc_bytes);
3967 else
3968 chip->ecc.calculate(chip, buf, ecc_calc);
3969
3970
3971
3972 if (!oob_required || (step < start_step) || (step > end_step))
3973 memset(oob_buf, 0xff, oob_bytes);
3974
3975 buf += ecc_size;
3976 ecc_calc += ecc_bytes;
3977 oob_buf += oob_bytes;
3978 }
3979
3980
3981
3982 ecc_calc = chip->ecc.calc_buf;
3983 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3984 chip->ecc.total);
3985 if (ret)
3986 return ret;
3987
3988
3989 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3990 if (ret)
3991 return ret;
3992
3993 return nand_prog_page_end_op(chip);
3994}
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
4008 int oob_required, int page)
4009{
4010 struct mtd_info *mtd = nand_to_mtd(chip);
4011 int i, eccsize = chip->ecc.size;
4012 int eccbytes = chip->ecc.bytes;
4013 int eccsteps = chip->ecc.steps;
4014 const uint8_t *p = buf;
4015 uint8_t *oob = chip->oob_poi;
4016 int ret;
4017
4018 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4019 if (ret)
4020 return ret;
4021
4022 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4023 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4024
4025 ret = nand_write_data_op(chip, p, eccsize, false);
4026 if (ret)
4027 return ret;
4028
4029 if (chip->ecc.prepad) {
4030 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4031 false);
4032 if (ret)
4033 return ret;
4034
4035 oob += chip->ecc.prepad;
4036 }
4037
4038 chip->ecc.calculate(chip, p, oob);
4039
4040 ret = nand_write_data_op(chip, oob, eccbytes, false);
4041 if (ret)
4042 return ret;
4043
4044 oob += eccbytes;
4045
4046 if (chip->ecc.postpad) {
4047 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4048 false);
4049 if (ret)
4050 return ret;
4051
4052 oob += chip->ecc.postpad;
4053 }
4054 }
4055
4056
4057 i = mtd->oobsize - (oob - chip->oob_poi);
4058 if (i) {
4059 ret = nand_write_data_op(chip, oob, i, false);
4060 if (ret)
4061 return ret;
4062 }
4063
4064 return nand_prog_page_end_op(chip);
4065}
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077static int nand_write_page(struct nand_chip *chip, uint32_t offset,
4078 int data_len, const uint8_t *buf, int oob_required,
4079 int page, int raw)
4080{
4081 struct mtd_info *mtd = nand_to_mtd(chip);
4082 int status, subpage;
4083
4084 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4085 chip->ecc.write_subpage)
4086 subpage = offset || (data_len < mtd->writesize);
4087 else
4088 subpage = 0;
4089
4090 if (unlikely(raw))
4091 status = chip->ecc.write_page_raw(chip, buf, oob_required,
4092 page);
4093 else if (subpage)
4094 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
4095 oob_required, page);
4096 else
4097 status = chip->ecc.write_page(chip, buf, oob_required, page);
4098
4099 if (status < 0)
4100 return status;
4101
4102 return 0;
4103}
4104
4105#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
4116 struct mtd_oob_ops *ops)
4117{
4118 struct mtd_info *mtd = nand_to_mtd(chip);
4119 int chipnr, realpage, page, column;
4120 uint32_t writelen = ops->len;
4121
4122 uint32_t oobwritelen = ops->ooblen;
4123 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4124
4125 uint8_t *oob = ops->oobbuf;
4126 uint8_t *buf = ops->datbuf;
4127 int ret;
4128 int oob_required = oob ? 1 : 0;
4129
4130 ops->retlen = 0;
4131 if (!writelen)
4132 return 0;
4133
4134
4135 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4136 pr_notice("%s: attempt to write non page aligned data\n",
4137 __func__);
4138 return -EINVAL;
4139 }
4140
4141 column = to & (mtd->writesize - 1);
4142
4143 chipnr = (int)(to >> chip->chip_shift);
4144 nand_select_target(chip, chipnr);
4145
4146
4147 if (nand_check_wp(chip)) {
4148 ret = -EIO;
4149 goto err_out;
4150 }
4151
4152 realpage = (int)(to >> chip->page_shift);
4153 page = realpage & chip->pagemask;
4154
4155
4156 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4157 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4158 chip->pagecache.page = -1;
4159
4160
4161 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4162 ret = -EINVAL;
4163 goto err_out;
4164 }
4165
4166 while (1) {
4167 int bytes = mtd->writesize;
4168 uint8_t *wbuf = buf;
4169 int use_bounce_buf;
4170 int part_pagewr = (column || writelen < mtd->writesize);
4171
4172 if (part_pagewr)
4173 use_bounce_buf = 1;
4174 else if (chip->options & NAND_USES_DMA)
4175 use_bounce_buf = !virt_addr_valid(buf) ||
4176 !IS_ALIGNED((unsigned long)buf,
4177 chip->buf_align);
4178 else
4179 use_bounce_buf = 0;
4180
4181
4182
4183
4184
4185 if (use_bounce_buf) {
4186 pr_debug("%s: using write bounce buffer for buf@%p\n",
4187 __func__, buf);
4188 if (part_pagewr)
4189 bytes = min_t(int, bytes - column, writelen);
4190 wbuf = nand_get_data_buf(chip);
4191 memset(wbuf, 0xff, mtd->writesize);
4192 memcpy(&wbuf[column], buf, bytes);
4193 }
4194
4195 if (unlikely(oob)) {
4196 size_t len = min(oobwritelen, oobmaxlen);
4197 oob = nand_fill_oob(chip, oob, len, ops);
4198 oobwritelen -= len;
4199 } else {
4200
4201 memset(chip->oob_poi, 0xff, mtd->oobsize);
4202 }
4203
4204 ret = nand_write_page(chip, column, bytes, wbuf,
4205 oob_required, page,
4206 (ops->mode == MTD_OPS_RAW));
4207 if (ret)
4208 break;
4209
4210 writelen -= bytes;
4211 if (!writelen)
4212 break;
4213
4214 column = 0;
4215 buf += bytes;
4216 realpage++;
4217
4218 page = realpage & chip->pagemask;
4219
4220 if (!page) {
4221 chipnr++;
4222 nand_deselect_target(chip);
4223 nand_select_target(chip, chipnr);
4224 }
4225 }
4226
4227 ops->retlen = ops->len - writelen;
4228 if (unlikely(oob))
4229 ops->oobretlen = ops->ooblen;
4230
4231err_out:
4232 nand_deselect_target(chip);
4233 return ret;
4234}
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4248 size_t *retlen, const uint8_t *buf)
4249{
4250 struct nand_chip *chip = mtd_to_nand(mtd);
4251 int chipnr = (int)(to >> chip->chip_shift);
4252 struct mtd_oob_ops ops;
4253 int ret;
4254
4255 nand_select_target(chip, chipnr);
4256
4257
4258 panic_nand_wait(chip, 400);
4259
4260 memset(&ops, 0, sizeof(ops));
4261 ops.len = len;
4262 ops.datbuf = (uint8_t *)buf;
4263 ops.mode = MTD_OPS_PLACE_OOB;
4264
4265 ret = nand_do_write_ops(chip, to, &ops);
4266
4267 *retlen = ops.retlen;
4268 return ret;
4269}
4270
4271
4272
4273
4274
4275
4276
4277static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4278 struct mtd_oob_ops *ops)
4279{
4280 struct nand_chip *chip = mtd_to_nand(mtd);
4281 int ret;
4282
4283 ops->retlen = 0;
4284
4285 ret = nand_get_device(chip);
4286 if (ret)
4287 return ret;
4288
4289 switch (ops->mode) {
4290 case MTD_OPS_PLACE_OOB:
4291 case MTD_OPS_AUTO_OOB:
4292 case MTD_OPS_RAW:
4293 break;
4294
4295 default:
4296 goto out;
4297 }
4298
4299 if (!ops->datbuf)
4300 ret = nand_do_write_oob(chip, to, ops);
4301 else
4302 ret = nand_do_write_ops(chip, to, ops);
4303
4304out:
4305 nand_release_device(chip);
4306 return ret;
4307}
4308
4309
4310
4311
4312
4313
4314
4315
4316static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4317{
4318 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4319}
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4330 int allowbbt)
4331{
4332 int page, pages_per_block, ret, chipnr;
4333 loff_t len;
4334
4335 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4336 __func__, (unsigned long long)instr->addr,
4337 (unsigned long long)instr->len);
4338
4339 if (check_offs_len(chip, instr->addr, instr->len))
4340 return -EINVAL;
4341
4342
4343 ret = nand_get_device(chip);
4344 if (ret)
4345 return ret;
4346
4347
4348 page = (int)(instr->addr >> chip->page_shift);
4349 chipnr = (int)(instr->addr >> chip->chip_shift);
4350
4351
4352 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4353
4354
4355 nand_select_target(chip, chipnr);
4356
4357
4358 if (nand_check_wp(chip)) {
4359 pr_debug("%s: device is write protected!\n",
4360 __func__);
4361 ret = -EIO;
4362 goto erase_exit;
4363 }
4364
4365
4366 len = instr->len;
4367
4368 while (len) {
4369
4370 if (nand_block_checkbad(chip, ((loff_t) page) <<
4371 chip->page_shift, allowbbt)) {
4372 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4373 __func__, page);
4374 ret = -EIO;
4375 goto erase_exit;
4376 }
4377
4378
4379
4380
4381
4382 if (page <= chip->pagecache.page && chip->pagecache.page <
4383 (page + pages_per_block))
4384 chip->pagecache.page = -1;
4385
4386 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4387 (chip->phys_erase_shift - chip->page_shift));
4388 if (ret) {
4389 pr_debug("%s: failed erase, page 0x%08x\n",
4390 __func__, page);
4391 instr->fail_addr =
4392 ((loff_t)page << chip->page_shift);
4393 goto erase_exit;
4394 }
4395
4396
4397 len -= (1ULL << chip->phys_erase_shift);
4398 page += pages_per_block;
4399
4400
4401 if (len && !(page & chip->pagemask)) {
4402 chipnr++;
4403 nand_deselect_target(chip);
4404 nand_select_target(chip, chipnr);
4405 }
4406 }
4407
4408 ret = 0;
4409erase_exit:
4410
4411
4412 nand_deselect_target(chip);
4413 nand_release_device(chip);
4414
4415
4416 return ret;
4417}
4418
4419
4420
4421
4422
4423
4424
4425static void nand_sync(struct mtd_info *mtd)
4426{
4427 struct nand_chip *chip = mtd_to_nand(mtd);
4428
4429 pr_debug("%s: called\n", __func__);
4430
4431
4432 WARN_ON(nand_get_device(chip));
4433
4434 nand_release_device(chip);
4435}
4436
4437
4438
4439
4440
4441
4442static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4443{
4444 struct nand_chip *chip = mtd_to_nand(mtd);
4445 int chipnr = (int)(offs >> chip->chip_shift);
4446 int ret;
4447
4448
4449 ret = nand_get_device(chip);
4450 if (ret)
4451 return ret;
4452
4453 nand_select_target(chip, chipnr);
4454
4455 ret = nand_block_checkbad(chip, offs, 0);
4456
4457 nand_deselect_target(chip);
4458 nand_release_device(chip);
4459
4460 return ret;
4461}
4462
4463
4464
4465
4466
4467
4468static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4469{
4470 int ret;
4471
4472 ret = nand_block_isbad(mtd, ofs);
4473 if (ret) {
4474
4475 if (ret > 0)
4476 return 0;
4477 return ret;
4478 }
4479
4480 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4481}
4482
4483
4484
4485
4486
4487
4488
4489static int nand_suspend(struct mtd_info *mtd)
4490{
4491 struct nand_chip *chip = mtd_to_nand(mtd);
4492 int ret = 0;
4493
4494 mutex_lock(&chip->lock);
4495 if (chip->ops.suspend)
4496 ret = chip->ops.suspend(chip);
4497 if (!ret)
4498 chip->suspended = 1;
4499 mutex_unlock(&chip->lock);
4500
4501 return ret;
4502}
4503
4504
4505
4506
4507
4508static void nand_resume(struct mtd_info *mtd)
4509{
4510 struct nand_chip *chip = mtd_to_nand(mtd);
4511
4512 mutex_lock(&chip->lock);
4513 if (chip->suspended) {
4514 if (chip->ops.resume)
4515 chip->ops.resume(chip);
4516 chip->suspended = 0;
4517 } else {
4518 pr_err("%s called for a chip which is not in suspended state\n",
4519 __func__);
4520 }
4521 mutex_unlock(&chip->lock);
4522}
4523
4524
4525
4526
4527
4528
4529static void nand_shutdown(struct mtd_info *mtd)
4530{
4531 nand_suspend(mtd);
4532}
4533
4534
4535
4536
4537
4538
4539
4540static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4541{
4542 struct nand_chip *chip = mtd_to_nand(mtd);
4543
4544 if (!chip->ops.lock_area)
4545 return -ENOTSUPP;
4546
4547 return chip->ops.lock_area(chip, ofs, len);
4548}
4549
4550
4551
4552
4553
4554
4555
4556static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4557{
4558 struct nand_chip *chip = mtd_to_nand(mtd);
4559
4560 if (!chip->ops.unlock_area)
4561 return -ENOTSUPP;
4562
4563 return chip->ops.unlock_area(chip, ofs, len);
4564}
4565
4566
4567static void nand_set_defaults(struct nand_chip *chip)
4568{
4569
4570 if (!chip->controller) {
4571 chip->controller = &chip->legacy.dummy_controller;
4572 nand_controller_init(chip->controller);
4573 }
4574
4575 nand_legacy_set_defaults(chip);
4576
4577 if (!chip->buf_align)
4578 chip->buf_align = 1;
4579}
4580
4581
4582void sanitize_string(uint8_t *s, size_t len)
4583{
4584 ssize_t i;
4585
4586
4587 s[len - 1] = 0;
4588
4589
4590 for (i = 0; i < len - 1; i++) {
4591 if (s[i] < ' ' || s[i] > 127)
4592 s[i] = '?';
4593 }
4594
4595
4596 strim(s);
4597}
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4611{
4612 int i, j;
4613 for (i = 0; i < period; i++)
4614 for (j = i + period; j < arrlen; j += period)
4615 if (id_data[i] != id_data[j])
4616 return 0;
4617 return 1;
4618}
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628static int nand_id_len(u8 *id_data, int arrlen)
4629{
4630 int last_nonzero, period;
4631
4632
4633 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4634 if (id_data[last_nonzero])
4635 break;
4636
4637
4638 if (last_nonzero < 0)
4639 return 0;
4640
4641
4642 for (period = 1; period < arrlen; period++)
4643 if (nand_id_has_period(id_data, arrlen, period))
4644 break;
4645
4646
4647 if (period < arrlen)
4648 return period;
4649
4650
4651 if (last_nonzero < arrlen - 1)
4652 return last_nonzero + 1;
4653
4654
4655 return arrlen;
4656}
4657
4658
4659static int nand_get_bits_per_cell(u8 cellinfo)
4660{
4661 int bits;
4662
4663 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4664 bits >>= NAND_CI_CELLTYPE_SHIFT;
4665 return bits + 1;
4666}
4667
4668
4669
4670
4671
4672
4673void nand_decode_ext_id(struct nand_chip *chip)
4674{
4675 struct nand_memory_organization *memorg;
4676 struct mtd_info *mtd = nand_to_mtd(chip);
4677 int extid;
4678 u8 *id_data = chip->id.data;
4679
4680 memorg = nanddev_get_memorg(&chip->base);
4681
4682
4683 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4684
4685 extid = id_data[3];
4686
4687
4688 memorg->pagesize = 1024 << (extid & 0x03);
4689 mtd->writesize = memorg->pagesize;
4690 extid >>= 2;
4691
4692 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4693 mtd->oobsize = memorg->oobsize;
4694 extid >>= 2;
4695
4696 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4697 memorg->pagesize;
4698 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4699 extid >>= 2;
4700
4701 if (extid & 0x1)
4702 chip->options |= NAND_BUSWIDTH_16;
4703}
4704EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4705
4706
4707
4708
4709
4710
4711static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4712{
4713 struct mtd_info *mtd = nand_to_mtd(chip);
4714 struct nand_memory_organization *memorg;
4715
4716 memorg = nanddev_get_memorg(&chip->base);
4717
4718 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4719 mtd->erasesize = type->erasesize;
4720 memorg->pagesize = type->pagesize;
4721 mtd->writesize = memorg->pagesize;
4722 memorg->oobsize = memorg->pagesize / 32;
4723 mtd->oobsize = memorg->oobsize;
4724
4725
4726 memorg->bits_per_cell = 1;
4727}
4728
4729
4730
4731
4732
4733
4734static void nand_decode_bbm_options(struct nand_chip *chip)
4735{
4736 struct mtd_info *mtd = nand_to_mtd(chip);
4737
4738
4739 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4740 chip->badblockpos = NAND_BBM_POS_LARGE;
4741 else
4742 chip->badblockpos = NAND_BBM_POS_SMALL;
4743}
4744
4745static inline bool is_full_id_nand(struct nand_flash_dev *type)
4746{
4747 return type->id_len;
4748}
4749
4750static bool find_full_id_nand(struct nand_chip *chip,
4751 struct nand_flash_dev *type)
4752{
4753 struct mtd_info *mtd = nand_to_mtd(chip);
4754 struct nand_memory_organization *memorg;
4755 u8 *id_data = chip->id.data;
4756
4757 memorg = nanddev_get_memorg(&chip->base);
4758
4759 if (!strncmp(type->id, id_data, type->id_len)) {
4760 memorg->pagesize = type->pagesize;
4761 mtd->writesize = memorg->pagesize;
4762 memorg->pages_per_eraseblock = type->erasesize /
4763 type->pagesize;
4764 mtd->erasesize = type->erasesize;
4765 memorg->oobsize = type->oobsize;
4766 mtd->oobsize = memorg->oobsize;
4767
4768 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4769 memorg->eraseblocks_per_lun =
4770 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4771 memorg->pagesize *
4772 memorg->pages_per_eraseblock);
4773 chip->options |= type->options;
4774 chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
4775 chip->base.eccreq.step_size = NAND_ECC_STEP(type);
4776
4777 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4778 if (!chip->parameters.model)
4779 return false;
4780
4781 return true;
4782 }
4783 return false;
4784}
4785
4786
4787
4788
4789
4790
4791static void nand_manufacturer_detect(struct nand_chip *chip)
4792{
4793
4794
4795
4796
4797 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4798 chip->manufacturer.desc->ops->detect) {
4799 struct nand_memory_organization *memorg;
4800
4801 memorg = nanddev_get_memorg(&chip->base);
4802
4803
4804 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4805 chip->manufacturer.desc->ops->detect(chip);
4806 } else {
4807 nand_decode_ext_id(chip);
4808 }
4809}
4810
4811
4812
4813
4814
4815
4816
4817static int nand_manufacturer_init(struct nand_chip *chip)
4818{
4819 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4820 !chip->manufacturer.desc->ops->init)
4821 return 0;
4822
4823 return chip->manufacturer.desc->ops->init(chip);
4824}
4825
4826
4827
4828
4829
4830
4831
4832static void nand_manufacturer_cleanup(struct nand_chip *chip)
4833{
4834
4835 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4836 chip->manufacturer.desc->ops->cleanup)
4837 chip->manufacturer.desc->ops->cleanup(chip);
4838}
4839
4840static const char *
4841nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4842{
4843 return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4844}
4845
4846
4847
4848
4849static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4850{
4851 const struct nand_manufacturer_desc *manufacturer_desc;
4852 struct mtd_info *mtd = nand_to_mtd(chip);
4853 struct nand_memory_organization *memorg;
4854 int busw, ret;
4855 u8 *id_data = chip->id.data;
4856 u8 maf_id, dev_id;
4857 u64 targetsize;
4858
4859
4860
4861
4862
4863 memorg = nanddev_get_memorg(&chip->base);
4864 memorg->planes_per_lun = 1;
4865 memorg->luns_per_target = 1;
4866
4867
4868
4869
4870
4871 ret = nand_reset(chip, 0);
4872 if (ret)
4873 return ret;
4874
4875
4876 nand_select_target(chip, 0);
4877
4878
4879 ret = nand_readid_op(chip, 0, id_data, 2);
4880 if (ret)
4881 return ret;
4882
4883
4884 maf_id = id_data[0];
4885 dev_id = id_data[1];
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4896 if (ret)
4897 return ret;
4898
4899 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4900 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4901 maf_id, dev_id, id_data[0], id_data[1]);
4902 return -ENODEV;
4903 }
4904
4905 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4906
4907
4908 manufacturer_desc = nand_get_manufacturer_desc(maf_id);
4909 chip->manufacturer.desc = manufacturer_desc;
4910
4911 if (!type)
4912 type = nand_flash_ids;
4913
4914
4915
4916
4917
4918
4919
4920
4921 busw = chip->options & NAND_BUSWIDTH_16;
4922
4923
4924
4925
4926
4927 chip->options &= ~NAND_BUSWIDTH_16;
4928
4929 for (; type->name != NULL; type++) {
4930 if (is_full_id_nand(type)) {
4931 if (find_full_id_nand(chip, type))
4932 goto ident_done;
4933 } else if (dev_id == type->dev_id) {
4934 break;
4935 }
4936 }
4937
4938 if (!type->name || !type->pagesize) {
4939
4940 ret = nand_onfi_detect(chip);
4941 if (ret < 0)
4942 return ret;
4943 else if (ret)
4944 goto ident_done;
4945
4946
4947 ret = nand_jedec_detect(chip);
4948 if (ret < 0)
4949 return ret;
4950 else if (ret)
4951 goto ident_done;
4952 }
4953
4954 if (!type->name)
4955 return -ENODEV;
4956
4957 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4958 if (!chip->parameters.model)
4959 return -ENOMEM;
4960
4961 if (!type->pagesize)
4962 nand_manufacturer_detect(chip);
4963 else
4964 nand_decode_id(chip, type);
4965
4966
4967 chip->options |= type->options;
4968
4969 memorg->eraseblocks_per_lun =
4970 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4971 memorg->pagesize *
4972 memorg->pages_per_eraseblock);
4973
4974ident_done:
4975 if (!mtd->name)
4976 mtd->name = chip->parameters.model;
4977
4978 if (chip->options & NAND_BUSWIDTH_AUTO) {
4979 WARN_ON(busw & NAND_BUSWIDTH_16);
4980 nand_set_defaults(chip);
4981 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4982
4983
4984
4985
4986 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4987 maf_id, dev_id);
4988 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4989 mtd->name);
4990 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4991 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4992 ret = -EINVAL;
4993
4994 goto free_detect_allocation;
4995 }
4996
4997 nand_decode_bbm_options(chip);
4998
4999
5000 chip->page_shift = ffs(mtd->writesize) - 1;
5001
5002 targetsize = nanddev_target_size(&chip->base);
5003 chip->pagemask = (targetsize >> chip->page_shift) - 1;
5004
5005 chip->bbt_erase_shift = chip->phys_erase_shift =
5006 ffs(mtd->erasesize) - 1;
5007 if (targetsize & 0xffffffff)
5008 chip->chip_shift = ffs((unsigned)targetsize) - 1;
5009 else {
5010 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
5011 chip->chip_shift += 32 - 1;
5012 }
5013
5014 if (chip->chip_shift - chip->page_shift > 16)
5015 chip->options |= NAND_ROW_ADDR_3;
5016
5017 chip->badblockbits = 8;
5018
5019 nand_legacy_adjust_cmdfunc(chip);
5020
5021 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5022 maf_id, dev_id);
5023 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5024 chip->parameters.model);
5025 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5026 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5027 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5028 return 0;
5029
5030free_detect_allocation:
5031 kfree(chip->parameters.model);
5032
5033 return ret;
5034}
5035
5036static const char * const nand_ecc_modes[] = {
5037 [NAND_ECC_NONE] = "none",
5038 [NAND_ECC_SOFT] = "soft",
5039 [NAND_ECC_HW] = "hw",
5040 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5041 [NAND_ECC_ON_DIE] = "on-die",
5042};
5043
5044static int of_get_nand_ecc_mode(struct device_node *np)
5045{
5046 const char *pm;
5047 int err, i;
5048
5049 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5050 if (err < 0)
5051 return err;
5052
5053 for (i = NAND_ECC_NONE; i < ARRAY_SIZE(nand_ecc_modes); i++)
5054 if (!strcasecmp(pm, nand_ecc_modes[i]))
5055 return i;
5056
5057
5058
5059
5060
5061
5062 if (!strcasecmp(pm, "soft_bch"))
5063 return NAND_ECC_SOFT;
5064
5065 return -ENODEV;
5066}
5067
5068static const char * const nand_ecc_algos[] = {
5069 [NAND_ECC_HAMMING] = "hamming",
5070 [NAND_ECC_BCH] = "bch",
5071 [NAND_ECC_RS] = "rs",
5072};
5073
5074static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
5075{
5076 enum nand_ecc_algo ecc_algo;
5077 const char *pm;
5078 int err;
5079
5080 err = of_property_read_string(np, "nand-ecc-algo", &pm);
5081 if (!err) {
5082 for (ecc_algo = NAND_ECC_HAMMING;
5083 ecc_algo < ARRAY_SIZE(nand_ecc_algos);
5084 ecc_algo++) {
5085 if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
5086 return ecc_algo;
5087 }
5088 }
5089
5090
5091
5092
5093
5094 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5095 if (!err) {
5096 if (!strcasecmp(pm, "soft"))
5097 return NAND_ECC_HAMMING;
5098 else if (!strcasecmp(pm, "soft_bch"))
5099 return NAND_ECC_BCH;
5100 }
5101
5102 return NAND_ECC_UNKNOWN;
5103}
5104
5105static int of_get_nand_ecc_step_size(struct device_node *np)
5106{
5107 int ret;
5108 u32 val;
5109
5110 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
5111 return ret ? ret : val;
5112}
5113
5114static int of_get_nand_ecc_strength(struct device_node *np)
5115{
5116 int ret;
5117 u32 val;
5118
5119 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
5120 return ret ? ret : val;
5121}
5122
5123static int of_get_nand_bus_width(struct device_node *np)
5124{
5125 u32 val;
5126
5127 if (of_property_read_u32(np, "nand-bus-width", &val))
5128 return 8;
5129
5130 switch (val) {
5131 case 8:
5132 case 16:
5133 return val;
5134 default:
5135 return -EIO;
5136 }
5137}
5138
5139static bool of_get_nand_on_flash_bbt(struct device_node *np)
5140{
5141 return of_property_read_bool(np, "nand-on-flash-bbt");
5142}
5143
5144static int nand_dt_init(struct nand_chip *chip)
5145{
5146 struct device_node *dn = nand_get_flash_node(chip);
5147 enum nand_ecc_algo ecc_algo;
5148 int ecc_mode, ecc_strength, ecc_step;
5149
5150 if (!dn)
5151 return 0;
5152
5153 if (of_get_nand_bus_width(dn) == 16)
5154 chip->options |= NAND_BUSWIDTH_16;
5155
5156 if (of_property_read_bool(dn, "nand-is-boot-medium"))
5157 chip->options |= NAND_IS_BOOT_MEDIUM;
5158
5159 if (of_get_nand_on_flash_bbt(dn))
5160 chip->bbt_options |= NAND_BBT_USE_FLASH;
5161
5162 ecc_mode = of_get_nand_ecc_mode(dn);
5163 ecc_algo = of_get_nand_ecc_algo(dn);
5164 ecc_strength = of_get_nand_ecc_strength(dn);
5165 ecc_step = of_get_nand_ecc_step_size(dn);
5166
5167 if (ecc_mode >= 0)
5168 chip->ecc.mode = ecc_mode;
5169
5170 if (ecc_algo != NAND_ECC_UNKNOWN)
5171 chip->ecc.algo = ecc_algo;
5172
5173 if (ecc_strength >= 0)
5174 chip->ecc.strength = ecc_strength;
5175
5176 if (ecc_step > 0)
5177 chip->ecc.size = ecc_step;
5178
5179 if (of_property_read_bool(dn, "nand-ecc-maximize"))
5180 chip->ecc.options |= NAND_ECC_MAXIMIZE;
5181
5182 return 0;
5183}
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5200 struct nand_flash_dev *table)
5201{
5202 struct mtd_info *mtd = nand_to_mtd(chip);
5203 struct nand_memory_organization *memorg;
5204 int nand_maf_id, nand_dev_id;
5205 unsigned int i;
5206 int ret;
5207
5208 memorg = nanddev_get_memorg(&chip->base);
5209
5210
5211 chip->cur_cs = -1;
5212
5213 mutex_init(&chip->lock);
5214
5215
5216 chip->current_interface_config = nand_get_reset_interface_config();
5217
5218 ret = nand_dt_init(chip);
5219 if (ret)
5220 return ret;
5221
5222 if (!mtd->name && mtd->dev.parent)
5223 mtd->name = dev_name(mtd->dev.parent);
5224
5225
5226 nand_set_defaults(chip);
5227
5228 ret = nand_legacy_check_hooks(chip);
5229 if (ret)
5230 return ret;
5231
5232 memorg->ntargets = maxchips;
5233
5234
5235 ret = nand_detect(chip, table);
5236 if (ret) {
5237 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5238 pr_warn("No NAND device found\n");
5239 nand_deselect_target(chip);
5240 return ret;
5241 }
5242
5243 nand_maf_id = chip->id.data[0];
5244 nand_dev_id = chip->id.data[1];
5245
5246 nand_deselect_target(chip);
5247
5248
5249 for (i = 1; i < maxchips; i++) {
5250 u8 id[2];
5251
5252
5253 ret = nand_reset(chip, i);
5254 if (ret)
5255 break;
5256
5257 nand_select_target(chip, i);
5258
5259 ret = nand_readid_op(chip, 0, id, sizeof(id));
5260 if (ret)
5261 break;
5262
5263 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5264 nand_deselect_target(chip);
5265 break;
5266 }
5267 nand_deselect_target(chip);
5268 }
5269 if (i > 1)
5270 pr_info("%d chips detected\n", i);
5271
5272
5273 memorg->ntargets = i;
5274 mtd->size = i * nanddev_target_size(&chip->base);
5275
5276 return 0;
5277}
5278
5279static void nand_scan_ident_cleanup(struct nand_chip *chip)
5280{
5281 kfree(chip->parameters.model);
5282 kfree(chip->parameters.onfi);
5283}
5284
5285static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5286{
5287 struct mtd_info *mtd = nand_to_mtd(chip);
5288 struct nand_ecc_ctrl *ecc = &chip->ecc;
5289
5290 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
5291 return -EINVAL;
5292
5293 switch (ecc->algo) {
5294 case NAND_ECC_HAMMING:
5295 ecc->calculate = nand_calculate_ecc;
5296 ecc->correct = nand_correct_data;
5297 ecc->read_page = nand_read_page_swecc;
5298 ecc->read_subpage = nand_read_subpage;
5299 ecc->write_page = nand_write_page_swecc;
5300 if (!ecc->read_page_raw)
5301 ecc->read_page_raw = nand_read_page_raw;
5302 if (!ecc->write_page_raw)
5303 ecc->write_page_raw = nand_write_page_raw;
5304 ecc->read_oob = nand_read_oob_std;
5305 ecc->write_oob = nand_write_oob_std;
5306 if (!ecc->size)
5307 ecc->size = 256;
5308 ecc->bytes = 3;
5309 ecc->strength = 1;
5310
5311 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5312 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5313
5314 return 0;
5315 case NAND_ECC_BCH:
5316 if (!mtd_nand_has_bch()) {
5317 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5318 return -EINVAL;
5319 }
5320 ecc->calculate = nand_bch_calculate_ecc;
5321 ecc->correct = nand_bch_correct_data;
5322 ecc->read_page = nand_read_page_swecc;
5323 ecc->read_subpage = nand_read_subpage;
5324 ecc->write_page = nand_write_page_swecc;
5325 if (!ecc->read_page_raw)
5326 ecc->read_page_raw = nand_read_page_raw;
5327 if (!ecc->write_page_raw)
5328 ecc->write_page_raw = nand_write_page_raw;
5329 ecc->read_oob = nand_read_oob_std;
5330 ecc->write_oob = nand_write_oob_std;
5331
5332
5333
5334
5335
5336
5337 if (!ecc->size && (mtd->oobsize >= 64)) {
5338 ecc->size = 512;
5339 ecc->strength = 4;
5340 }
5341
5342
5343
5344
5345
5346 if (!mtd->ooblayout) {
5347
5348 if (mtd->oobsize < 64) {
5349 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5350 return -EINVAL;
5351 }
5352
5353 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
5354
5355 }
5356
5357
5358
5359
5360
5361
5362 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
5363 ecc->options & NAND_ECC_MAXIMIZE) {
5364 int steps, bytes;
5365
5366
5367 ecc->size = 1024;
5368 steps = mtd->writesize / ecc->size;
5369
5370
5371 bytes = (mtd->oobsize - 2) / steps;
5372 ecc->strength = bytes * 8 / fls(8 * ecc->size);
5373 }
5374
5375
5376 ecc->bytes = 0;
5377 ecc->priv = nand_bch_init(mtd);
5378 if (!ecc->priv) {
5379 WARN(1, "BCH ECC initialization failed!\n");
5380 return -EINVAL;
5381 }
5382 return 0;
5383 default:
5384 WARN(1, "Unsupported ECC algorithm!\n");
5385 return -EINVAL;
5386 }
5387}
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399static int
5400nand_check_ecc_caps(struct nand_chip *chip,
5401 const struct nand_ecc_caps *caps, int oobavail)
5402{
5403 struct mtd_info *mtd = nand_to_mtd(chip);
5404 const struct nand_ecc_step_info *stepinfo;
5405 int preset_step = chip->ecc.size;
5406 int preset_strength = chip->ecc.strength;
5407 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5408 int i, j;
5409
5410 for (i = 0; i < caps->nstepinfos; i++) {
5411 stepinfo = &caps->stepinfos[i];
5412
5413 if (stepinfo->stepsize != preset_step)
5414 continue;
5415
5416 for (j = 0; j < stepinfo->nstrengths; j++) {
5417 if (stepinfo->strengths[j] != preset_strength)
5418 continue;
5419
5420 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5421 preset_strength);
5422 if (WARN_ON_ONCE(ecc_bytes < 0))
5423 return ecc_bytes;
5424
5425 if (ecc_bytes * nsteps > oobavail) {
5426 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5427 preset_step, preset_strength);
5428 return -ENOSPC;
5429 }
5430
5431 chip->ecc.bytes = ecc_bytes;
5432
5433 return 0;
5434 }
5435 }
5436
5437 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5438 preset_step, preset_strength);
5439
5440 return -ENOTSUPP;
5441}
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453static int
5454nand_match_ecc_req(struct nand_chip *chip,
5455 const struct nand_ecc_caps *caps, int oobavail)
5456{
5457 struct mtd_info *mtd = nand_to_mtd(chip);
5458 const struct nand_ecc_step_info *stepinfo;
5459 int req_step = chip->base.eccreq.step_size;
5460 int req_strength = chip->base.eccreq.strength;
5461 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5462 int best_step, best_strength, best_ecc_bytes;
5463 int best_ecc_bytes_total = INT_MAX;
5464 int i, j;
5465
5466
5467 if (!req_step || !req_strength)
5468 return -ENOTSUPP;
5469
5470
5471 req_corr = mtd->writesize / req_step * req_strength;
5472
5473 for (i = 0; i < caps->nstepinfos; i++) {
5474 stepinfo = &caps->stepinfos[i];
5475 step_size = stepinfo->stepsize;
5476
5477 for (j = 0; j < stepinfo->nstrengths; j++) {
5478 strength = stepinfo->strengths[j];
5479
5480
5481
5482
5483
5484
5485 if (step_size < req_step && strength < req_strength)
5486 continue;
5487
5488 if (mtd->writesize % step_size)
5489 continue;
5490
5491 nsteps = mtd->writesize / step_size;
5492
5493 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5494 if (WARN_ON_ONCE(ecc_bytes < 0))
5495 continue;
5496 ecc_bytes_total = ecc_bytes * nsteps;
5497
5498 if (ecc_bytes_total > oobavail ||
5499 strength * nsteps < req_corr)
5500 continue;
5501
5502
5503
5504
5505
5506 if (ecc_bytes_total < best_ecc_bytes_total) {
5507 best_ecc_bytes_total = ecc_bytes_total;
5508 best_step = step_size;
5509 best_strength = strength;
5510 best_ecc_bytes = ecc_bytes;
5511 }
5512 }
5513 }
5514
5515 if (best_ecc_bytes_total == INT_MAX)
5516 return -ENOTSUPP;
5517
5518 chip->ecc.size = best_step;
5519 chip->ecc.strength = best_strength;
5520 chip->ecc.bytes = best_ecc_bytes;
5521
5522 return 0;
5523}
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534static int
5535nand_maximize_ecc(struct nand_chip *chip,
5536 const struct nand_ecc_caps *caps, int oobavail)
5537{
5538 struct mtd_info *mtd = nand_to_mtd(chip);
5539 const struct nand_ecc_step_info *stepinfo;
5540 int step_size, strength, nsteps, ecc_bytes, corr;
5541 int best_corr = 0;
5542 int best_step = 0;
5543 int best_strength, best_ecc_bytes;
5544 int i, j;
5545
5546 for (i = 0; i < caps->nstepinfos; i++) {
5547 stepinfo = &caps->stepinfos[i];
5548 step_size = stepinfo->stepsize;
5549
5550
5551 if (chip->ecc.size && step_size != chip->ecc.size)
5552 continue;
5553
5554 for (j = 0; j < stepinfo->nstrengths; j++) {
5555 strength = stepinfo->strengths[j];
5556
5557 if (mtd->writesize % step_size)
5558 continue;
5559
5560 nsteps = mtd->writesize / step_size;
5561
5562 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5563 if (WARN_ON_ONCE(ecc_bytes < 0))
5564 continue;
5565
5566 if (ecc_bytes * nsteps > oobavail)
5567 continue;
5568
5569 corr = strength * nsteps;
5570
5571
5572
5573
5574
5575 if (corr > best_corr ||
5576 (corr == best_corr && step_size > best_step)) {
5577 best_corr = corr;
5578 best_step = step_size;
5579 best_strength = strength;
5580 best_ecc_bytes = ecc_bytes;
5581 }
5582 }
5583 }
5584
5585 if (!best_corr)
5586 return -ENOTSUPP;
5587
5588 chip->ecc.size = best_step;
5589 chip->ecc.strength = best_strength;
5590 chip->ecc.bytes = best_ecc_bytes;
5591
5592 return 0;
5593}
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612int nand_ecc_choose_conf(struct nand_chip *chip,
5613 const struct nand_ecc_caps *caps, int oobavail)
5614{
5615 struct mtd_info *mtd = nand_to_mtd(chip);
5616
5617 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5618 return -EINVAL;
5619
5620 if (chip->ecc.size && chip->ecc.strength)
5621 return nand_check_ecc_caps(chip, caps, oobavail);
5622
5623 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
5624 return nand_maximize_ecc(chip, caps, oobavail);
5625
5626 if (!nand_match_ecc_req(chip, caps, oobavail))
5627 return 0;
5628
5629 return nand_maximize_ecc(chip, caps, oobavail);
5630}
5631EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647static bool nand_ecc_strength_good(struct nand_chip *chip)
5648{
5649 struct mtd_info *mtd = nand_to_mtd(chip);
5650 struct nand_ecc_ctrl *ecc = &chip->ecc;
5651 int corr, ds_corr;
5652
5653 if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
5654
5655 return true;
5656
5657
5658
5659
5660
5661 corr = (mtd->writesize * ecc->strength) / ecc->size;
5662 ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
5663 chip->base.eccreq.step_size;
5664
5665 return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
5666}
5667
5668static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5669{
5670 struct nand_chip *chip = container_of(nand, struct nand_chip,
5671 base);
5672 unsigned int eb = nanddev_pos_to_row(nand, pos);
5673 int ret;
5674
5675 eb >>= nand->rowconv.eraseblock_addr_shift;
5676
5677 nand_select_target(chip, pos->target);
5678 ret = nand_erase_op(chip, eb);
5679 nand_deselect_target(chip);
5680
5681 return ret;
5682}
5683
5684static int rawnand_markbad(struct nand_device *nand,
5685 const struct nand_pos *pos)
5686{
5687 struct nand_chip *chip = container_of(nand, struct nand_chip,
5688 base);
5689
5690 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5691}
5692
5693static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5694{
5695 struct nand_chip *chip = container_of(nand, struct nand_chip,
5696 base);
5697 int ret;
5698
5699 nand_select_target(chip, pos->target);
5700 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5701 nand_deselect_target(chip);
5702
5703 return ret;
5704}
5705
5706static const struct nand_ops rawnand_ops = {
5707 .erase = rawnand_erase,
5708 .markbad = rawnand_markbad,
5709 .isbad = rawnand_isbad,
5710};
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720static int nand_scan_tail(struct nand_chip *chip)
5721{
5722 struct mtd_info *mtd = nand_to_mtd(chip);
5723 struct nand_ecc_ctrl *ecc = &chip->ecc;
5724 int ret, i;
5725
5726
5727 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5728 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5729 return -EINVAL;
5730 }
5731
5732 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5733 if (!chip->data_buf)
5734 return -ENOMEM;
5735
5736
5737
5738
5739
5740
5741
5742 nand_select_target(chip, 0);
5743 ret = nand_manufacturer_init(chip);
5744 nand_deselect_target(chip);
5745 if (ret)
5746 goto err_free_buf;
5747
5748
5749 chip->oob_poi = chip->data_buf + mtd->writesize;
5750
5751
5752
5753
5754 if (!mtd->ooblayout &&
5755 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
5756 switch (mtd->oobsize) {
5757 case 8:
5758 case 16:
5759 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
5760 break;
5761 case 64:
5762 case 128:
5763 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
5764 break;
5765 default:
5766
5767
5768
5769
5770
5771
5772
5773 if (ecc->mode == NAND_ECC_NONE) {
5774 mtd_set_ooblayout(mtd,
5775 &nand_ooblayout_lp_ops);
5776 break;
5777 }
5778
5779 WARN(1, "No oob scheme defined for oobsize %d\n",
5780 mtd->oobsize);
5781 ret = -EINVAL;
5782 goto err_nand_manuf_cleanup;
5783 }
5784 }
5785
5786
5787
5788
5789
5790
5791 switch (ecc->mode) {
5792 case NAND_ECC_HW:
5793
5794 if (!ecc->read_page)
5795 ecc->read_page = nand_read_page_hwecc;
5796 if (!ecc->write_page)
5797 ecc->write_page = nand_write_page_hwecc;
5798 if (!ecc->read_page_raw)
5799 ecc->read_page_raw = nand_read_page_raw;
5800 if (!ecc->write_page_raw)
5801 ecc->write_page_raw = nand_write_page_raw;
5802 if (!ecc->read_oob)
5803 ecc->read_oob = nand_read_oob_std;
5804 if (!ecc->write_oob)
5805 ecc->write_oob = nand_write_oob_std;
5806 if (!ecc->read_subpage)
5807 ecc->read_subpage = nand_read_subpage;
5808 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5809 ecc->write_subpage = nand_write_subpage_hwecc;
5810 fallthrough;
5811 case NAND_ECC_HW_SYNDROME:
5812 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5813 (!ecc->read_page ||
5814 ecc->read_page == nand_read_page_hwecc ||
5815 !ecc->write_page ||
5816 ecc->write_page == nand_write_page_hwecc)) {
5817 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5818 ret = -EINVAL;
5819 goto err_nand_manuf_cleanup;
5820 }
5821
5822 if (!ecc->read_page)
5823 ecc->read_page = nand_read_page_syndrome;
5824 if (!ecc->write_page)
5825 ecc->write_page = nand_write_page_syndrome;
5826 if (!ecc->read_page_raw)
5827 ecc->read_page_raw = nand_read_page_raw_syndrome;
5828 if (!ecc->write_page_raw)
5829 ecc->write_page_raw = nand_write_page_raw_syndrome;
5830 if (!ecc->read_oob)
5831 ecc->read_oob = nand_read_oob_syndrome;
5832 if (!ecc->write_oob)
5833 ecc->write_oob = nand_write_oob_syndrome;
5834
5835 if (mtd->writesize >= ecc->size) {
5836 if (!ecc->strength) {
5837 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5838 ret = -EINVAL;
5839 goto err_nand_manuf_cleanup;
5840 }
5841 break;
5842 }
5843 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5844 ecc->size, mtd->writesize);
5845 ecc->mode = NAND_ECC_SOFT;
5846 ecc->algo = NAND_ECC_HAMMING;
5847 fallthrough;
5848 case NAND_ECC_SOFT:
5849 ret = nand_set_ecc_soft_ops(chip);
5850 if (ret) {
5851 ret = -EINVAL;
5852 goto err_nand_manuf_cleanup;
5853 }
5854 break;
5855
5856 case NAND_ECC_ON_DIE:
5857 if (!ecc->read_page || !ecc->write_page) {
5858 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5859 ret = -EINVAL;
5860 goto err_nand_manuf_cleanup;
5861 }
5862 if (!ecc->read_oob)
5863 ecc->read_oob = nand_read_oob_std;
5864 if (!ecc->write_oob)
5865 ecc->write_oob = nand_write_oob_std;
5866 break;
5867
5868 case NAND_ECC_NONE:
5869 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5870 ecc->read_page = nand_read_page_raw;
5871 ecc->write_page = nand_write_page_raw;
5872 ecc->read_oob = nand_read_oob_std;
5873 ecc->read_page_raw = nand_read_page_raw;
5874 ecc->write_page_raw = nand_write_page_raw;
5875 ecc->write_oob = nand_write_oob_std;
5876 ecc->size = mtd->writesize;
5877 ecc->bytes = 0;
5878 ecc->strength = 0;
5879 break;
5880
5881 default:
5882 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5883 ret = -EINVAL;
5884 goto err_nand_manuf_cleanup;
5885 }
5886
5887 if (ecc->correct || ecc->calculate) {
5888 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5889 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5890 if (!ecc->calc_buf || !ecc->code_buf) {
5891 ret = -ENOMEM;
5892 goto err_nand_manuf_cleanup;
5893 }
5894 }
5895
5896
5897 if (!ecc->read_oob_raw)
5898 ecc->read_oob_raw = ecc->read_oob;
5899 if (!ecc->write_oob_raw)
5900 ecc->write_oob_raw = ecc->write_oob;
5901
5902
5903 mtd->ecc_strength = ecc->strength;
5904 mtd->ecc_step_size = ecc->size;
5905
5906
5907
5908
5909
5910 ecc->steps = mtd->writesize / ecc->size;
5911 if (ecc->steps * ecc->size != mtd->writesize) {
5912 WARN(1, "Invalid ECC parameters\n");
5913 ret = -EINVAL;
5914 goto err_nand_manuf_cleanup;
5915 }
5916 ecc->total = ecc->steps * ecc->bytes;
5917 if (ecc->total > mtd->oobsize) {
5918 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5919 ret = -EINVAL;
5920 goto err_nand_manuf_cleanup;
5921 }
5922
5923
5924
5925
5926
5927 ret = mtd_ooblayout_count_freebytes(mtd);
5928 if (ret < 0)
5929 ret = 0;
5930
5931 mtd->oobavail = ret;
5932
5933
5934 if (!nand_ecc_strength_good(chip))
5935 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
5936 mtd->name, chip->ecc.strength, chip->ecc.size,
5937 chip->base.eccreq.strength,
5938 chip->base.eccreq.step_size);
5939
5940
5941 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5942 switch (ecc->steps) {
5943 case 2:
5944 mtd->subpage_sft = 1;
5945 break;
5946 case 4:
5947 case 8:
5948 case 16:
5949 mtd->subpage_sft = 2;
5950 break;
5951 }
5952 }
5953 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5954
5955
5956 chip->pagecache.page = -1;
5957
5958
5959 switch (ecc->mode) {
5960 case NAND_ECC_SOFT:
5961 if (chip->page_shift > 9)
5962 chip->options |= NAND_SUBPAGE_READ;
5963 break;
5964
5965 default:
5966 break;
5967 }
5968
5969 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5970 if (ret)
5971 goto err_nand_manuf_cleanup;
5972
5973
5974 if (chip->options & NAND_ROM)
5975 mtd->flags = MTD_CAP_ROM;
5976
5977
5978 mtd->_erase = nand_erase;
5979 mtd->_point = NULL;
5980 mtd->_unpoint = NULL;
5981 mtd->_panic_write = panic_nand_write;
5982 mtd->_read_oob = nand_read_oob;
5983 mtd->_write_oob = nand_write_oob;
5984 mtd->_sync = nand_sync;
5985 mtd->_lock = nand_lock;
5986 mtd->_unlock = nand_unlock;
5987 mtd->_suspend = nand_suspend;
5988 mtd->_resume = nand_resume;
5989 mtd->_reboot = nand_shutdown;
5990 mtd->_block_isreserved = nand_block_isreserved;
5991 mtd->_block_isbad = nand_block_isbad;
5992 mtd->_block_markbad = nand_block_markbad;
5993 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5994
5995
5996
5997
5998
5999
6000 if (!mtd->bitflip_threshold)
6001 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6002
6003
6004 ret = nand_choose_interface_config(chip);
6005 if (ret)
6006 goto err_nanddev_cleanup;
6007
6008
6009 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
6010 ret = nand_setup_interface(chip, i);
6011 if (ret)
6012 goto err_free_interface_config;
6013 }
6014
6015
6016 if (chip->options & NAND_SKIP_BBTSCAN)
6017 return 0;
6018
6019
6020 ret = nand_create_bbt(chip);
6021 if (ret)
6022 goto err_free_interface_config;
6023
6024 return 0;
6025
6026err_free_interface_config:
6027 kfree(chip->best_interface_config);
6028
6029err_nanddev_cleanup:
6030 nanddev_cleanup(&chip->base);
6031
6032err_nand_manuf_cleanup:
6033 nand_manufacturer_cleanup(chip);
6034
6035err_free_buf:
6036 kfree(chip->data_buf);
6037 kfree(ecc->code_buf);
6038 kfree(ecc->calc_buf);
6039
6040 return ret;
6041}
6042
6043static int nand_attach(struct nand_chip *chip)
6044{
6045 if (chip->controller->ops && chip->controller->ops->attach_chip)
6046 return chip->controller->ops->attach_chip(chip);
6047
6048 return 0;
6049}
6050
6051static void nand_detach(struct nand_chip *chip)
6052{
6053 if (chip->controller->ops && chip->controller->ops->detach_chip)
6054 chip->controller->ops->detach_chip(chip);
6055}
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6068 struct nand_flash_dev *ids)
6069{
6070 int ret;
6071
6072 if (!maxchips)
6073 return -EINVAL;
6074
6075 ret = nand_scan_ident(chip, maxchips, ids);
6076 if (ret)
6077 return ret;
6078
6079 ret = nand_attach(chip);
6080 if (ret)
6081 goto cleanup_ident;
6082
6083 ret = nand_scan_tail(chip);
6084 if (ret)
6085 goto detach_chip;
6086
6087 return 0;
6088
6089detach_chip:
6090 nand_detach(chip);
6091cleanup_ident:
6092 nand_scan_ident_cleanup(chip);
6093
6094 return ret;
6095}
6096EXPORT_SYMBOL(nand_scan_with_ids);
6097
6098
6099
6100
6101
6102void nand_cleanup(struct nand_chip *chip)
6103{
6104 if (chip->ecc.mode == NAND_ECC_SOFT &&
6105 chip->ecc.algo == NAND_ECC_BCH)
6106 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
6107
6108 nanddev_cleanup(&chip->base);
6109
6110
6111 kfree(chip->bbt);
6112 kfree(chip->data_buf);
6113 kfree(chip->ecc.code_buf);
6114 kfree(chip->ecc.calc_buf);
6115
6116
6117 if (chip->badblock_pattern && chip->badblock_pattern->options
6118 & NAND_BBT_DYNAMICSTRUCT)
6119 kfree(chip->badblock_pattern);
6120
6121
6122 kfree(chip->best_interface_config);
6123
6124
6125 nand_manufacturer_cleanup(chip);
6126
6127
6128 nand_detach(chip);
6129
6130
6131 nand_scan_ident_cleanup(chip);
6132}
6133
6134EXPORT_SYMBOL_GPL(nand_cleanup);
6135
6136MODULE_LICENSE("GPL");
6137MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6138MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6139MODULE_DESCRIPTION("Generic NAND flash driver code");
6140