1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/err.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/mm.h>
35#include <linux/types.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/nand.h>
38#include <linux/mtd/nand_ecc.h>
39#include <linux/mtd/nand_bch.h>
40#include <linux/interrupt.h>
41#include <linux/bitops.h>
42#include <linux/io.h>
43#include <linux/mtd/partitions.h>
44#include <linux/of.h>
45#include <linux/gpio/consumer.h>
46
47#include "internals.h"
48
49static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
50 struct mtd_pairing_info *info)
51{
52 int lastpage = (mtd->erasesize / mtd->writesize) - 1;
53 int dist = 3;
54
55 if (page == lastpage)
56 dist = 2;
57
58 if (!page || (page & 1)) {
59 info->group = 0;
60 info->pair = (page + 1) / 2;
61 } else {
62 info->group = 1;
63 info->pair = (page + 1 - dist) / 2;
64 }
65
66 return 0;
67}
68
69static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
70 const struct mtd_pairing_info *info)
71{
72 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
73 int page = info->pair * 2;
74 int dist = 3;
75
76 if (!info->group && !info->pair)
77 return 0;
78
79 if (info->pair == lastpair && info->group)
80 dist = 2;
81
82 if (!info->group)
83 page--;
84 else if (info->pair)
85 page += dist - 1;
86
87 if (page >= mtd->erasesize / mtd->writesize)
88 return -EINVAL;
89
90 return page;
91}
92
93const struct mtd_pairing_scheme dist3_pairing_scheme = {
94 .ngroups = 2,
95 .get_info = nand_pairing_dist3_get_info,
96 .get_wunit = nand_pairing_dist3_get_wunit,
97};
98
99static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
100{
101 int ret = 0;
102
103
104 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
105 pr_debug("%s: unaligned address\n", __func__);
106 ret = -EINVAL;
107 }
108
109
110 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
111 pr_debug("%s: length not block aligned\n", __func__);
112 ret = -EINVAL;
113 }
114
115 return ret;
116}
117
118
119
120
121
122
123
124
125
126
127
128void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
129 unsigned int src_off, unsigned int nbits)
130{
131 unsigned int tmp, n;
132
133 dst += dst_off / 8;
134 dst_off %= 8;
135 src += src_off / 8;
136 src_off %= 8;
137
138 while (nbits) {
139 n = min3(8 - dst_off, 8 - src_off, nbits);
140
141 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
142 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
143 *dst |= tmp << dst_off;
144
145 dst_off += n;
146 if (dst_off >= 8) {
147 dst++;
148 dst_off -= 8;
149 }
150
151 src_off += n;
152 if (src_off >= 8) {
153 src++;
154 src_off -= 8;
155 }
156
157 nbits -= n;
158 }
159}
160EXPORT_SYMBOL_GPL(nand_extract_bits);
161
162
163
164
165
166
167
168
169
170
171void nand_select_target(struct nand_chip *chip, unsigned int cs)
172{
173
174
175
176
177 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
178 return;
179
180 chip->cur_cs = cs;
181
182 if (chip->legacy.select_chip)
183 chip->legacy.select_chip(chip, cs);
184}
185EXPORT_SYMBOL_GPL(nand_select_target);
186
187
188
189
190
191
192
193
194void nand_deselect_target(struct nand_chip *chip)
195{
196 if (chip->legacy.select_chip)
197 chip->legacy.select_chip(chip, -1);
198
199 chip->cur_cs = -1;
200}
201EXPORT_SYMBOL_GPL(nand_deselect_target);
202
203
204
205
206
207
208
209static void nand_release_device(struct nand_chip *chip)
210{
211
212 mutex_unlock(&chip->controller->lock);
213 mutex_unlock(&chip->lock);
214}
215
216
217
218
219
220
221
222
223
224
225int nand_bbm_get_next_page(struct nand_chip *chip, int page)
226{
227 struct mtd_info *mtd = nand_to_mtd(chip);
228 int last_page = ((mtd->erasesize - mtd->writesize) >>
229 chip->page_shift) & chip->pagemask;
230 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
231 | NAND_BBM_LASTPAGE;
232
233 if (page == 0 && !(chip->options & bbm_flags))
234 return 0;
235 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
236 return 0;
237 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
238 return 1;
239 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
240 return last_page;
241
242 return -EINVAL;
243}
244
245
246
247
248
249
250
251
252static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
253{
254 int first_page, page_offset;
255 int res;
256 u8 bad;
257
258 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
259 page_offset = nand_bbm_get_next_page(chip, 0);
260
261 while (page_offset >= 0) {
262 res = chip->ecc.read_oob(chip, first_page + page_offset);
263 if (res < 0)
264 return res;
265
266 bad = chip->oob_poi[chip->badblockpos];
267
268 if (likely(chip->badblockbits == 8))
269 res = bad != 0xFF;
270 else
271 res = hweight8(bad) < chip->badblockbits;
272 if (res)
273 return res;
274
275 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
276 }
277
278 return 0;
279}
280
281static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
282{
283 if (chip->options & NAND_NO_BBM_QUIRK)
284 return 0;
285
286 if (chip->legacy.block_bad)
287 return chip->legacy.block_bad(chip, ofs);
288
289 return nand_block_bad(chip, ofs);
290}
291
292
293
294
295
296
297
298
299
300static int nand_get_device(struct nand_chip *chip)
301{
302 mutex_lock(&chip->lock);
303 if (chip->suspended) {
304 mutex_unlock(&chip->lock);
305 return -EBUSY;
306 }
307 mutex_lock(&chip->controller->lock);
308
309 return 0;
310}
311
312
313
314
315
316
317
318
319static int nand_check_wp(struct nand_chip *chip)
320{
321 u8 status;
322 int ret;
323
324
325 if (chip->options & NAND_BROKEN_XD)
326 return 0;
327
328
329 ret = nand_status_op(chip, &status);
330 if (ret)
331 return ret;
332
333 return status & NAND_STATUS_WP ? 0 : 1;
334}
335
336
337
338
339
340
341
342
343static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
344 struct mtd_oob_ops *ops)
345{
346 struct mtd_info *mtd = nand_to_mtd(chip);
347 int ret;
348
349
350
351
352
353 memset(chip->oob_poi, 0xff, mtd->oobsize);
354
355 switch (ops->mode) {
356
357 case MTD_OPS_PLACE_OOB:
358 case MTD_OPS_RAW:
359 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
360 return oob + len;
361
362 case MTD_OPS_AUTO_OOB:
363 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
364 ops->ooboffs, len);
365 BUG_ON(ret);
366 return oob + len;
367
368 default:
369 BUG();
370 }
371 return NULL;
372}
373
374
375
376
377
378
379
380
381
382static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
383 struct mtd_oob_ops *ops)
384{
385 struct mtd_info *mtd = nand_to_mtd(chip);
386 int chipnr, page, status, len, ret;
387
388 pr_debug("%s: to = 0x%08x, len = %i\n",
389 __func__, (unsigned int)to, (int)ops->ooblen);
390
391 len = mtd_oobavail(mtd, ops);
392
393
394 if ((ops->ooboffs + ops->ooblen) > len) {
395 pr_debug("%s: attempt to write past end of page\n",
396 __func__);
397 return -EINVAL;
398 }
399
400 chipnr = (int)(to >> chip->chip_shift);
401
402
403
404
405
406
407
408 ret = nand_reset(chip, chipnr);
409 if (ret)
410 return ret;
411
412 nand_select_target(chip, chipnr);
413
414
415 page = (int)(to >> chip->page_shift);
416
417
418 if (nand_check_wp(chip)) {
419 nand_deselect_target(chip);
420 return -EROFS;
421 }
422
423
424 if (page == chip->pagecache.page)
425 chip->pagecache.page = -1;
426
427 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
428
429 if (ops->mode == MTD_OPS_RAW)
430 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
431 else
432 status = chip->ecc.write_oob(chip, page & chip->pagemask);
433
434 nand_deselect_target(chip);
435
436 if (status)
437 return status;
438
439 ops->oobretlen = ops->ooblen;
440
441 return 0;
442}
443
444
445
446
447
448
449
450
451
452
453static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
454{
455 struct mtd_info *mtd = nand_to_mtd(chip);
456 struct mtd_oob_ops ops;
457 uint8_t buf[2] = { 0, 0 };
458 int ret = 0, res, page_offset;
459
460 memset(&ops, 0, sizeof(ops));
461 ops.oobbuf = buf;
462 ops.ooboffs = chip->badblockpos;
463 if (chip->options & NAND_BUSWIDTH_16) {
464 ops.ooboffs &= ~0x01;
465 ops.len = ops.ooblen = 2;
466 } else {
467 ops.len = ops.ooblen = 1;
468 }
469 ops.mode = MTD_OPS_PLACE_OOB;
470
471 page_offset = nand_bbm_get_next_page(chip, 0);
472
473 while (page_offset >= 0) {
474 res = nand_do_write_oob(chip,
475 ofs + (page_offset * mtd->writesize),
476 &ops);
477
478 if (!ret)
479 ret = res;
480
481 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
482 }
483
484 return ret;
485}
486
487
488
489
490
491
492int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
493{
494 if (chip->legacy.block_markbad)
495 return chip->legacy.block_markbad(chip, ofs);
496
497 return nand_default_block_markbad(chip, ofs);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
520{
521 struct mtd_info *mtd = nand_to_mtd(chip);
522 int res, ret = 0;
523
524 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
525 struct erase_info einfo;
526
527
528 memset(&einfo, 0, sizeof(einfo));
529 einfo.addr = ofs;
530 einfo.len = 1ULL << chip->phys_erase_shift;
531 nand_erase_nand(chip, &einfo, 0);
532
533
534 ret = nand_get_device(chip);
535 if (ret)
536 return ret;
537
538 ret = nand_markbad_bbm(chip, ofs);
539 nand_release_device(chip);
540 }
541
542
543 if (chip->bbt) {
544 res = nand_markbad_bbt(chip, ofs);
545 if (!ret)
546 ret = res;
547 }
548
549 if (!ret)
550 mtd->ecc_stats.badblocks++;
551
552 return ret;
553}
554
555
556
557
558
559
560
561
562static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
563{
564 struct nand_chip *chip = mtd_to_nand(mtd);
565
566 if (!chip->bbt)
567 return 0;
568
569 return nand_isreserved_bbt(chip, ofs);
570}
571
572
573
574
575
576
577
578
579
580
581static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
582{
583
584 if (chip->bbt)
585 return nand_isbad_bbt(chip, ofs, allowbbt);
586
587 return nand_isbad_bbm(chip, ofs);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
608{
609 const struct nand_sdr_timings *timings;
610 u8 status = 0;
611 int ret;
612
613 if (!nand_has_exec_op(chip))
614 return -ENOTSUPP;
615
616
617 timings = nand_get_sdr_timings(nand_get_interface_config(chip));
618 ndelay(PSEC_TO_NSEC(timings->tWB_max));
619
620 ret = nand_status_op(chip, NULL);
621 if (ret)
622 return ret;
623
624
625
626
627
628
629 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
630 do {
631 ret = nand_read_data_op(chip, &status, sizeof(status), true,
632 false);
633 if (ret)
634 break;
635
636 if (status & NAND_STATUS_READY)
637 break;
638
639
640
641
642
643
644 udelay(10);
645 } while (time_before(jiffies, timeout_ms));
646
647
648
649
650
651
652 nand_exit_status_op(chip);
653
654 if (ret)
655 return ret;
656
657 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
658};
659EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
676 unsigned long timeout_ms)
677{
678
679
680
681
682
683
684
685 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
686 do {
687 if (gpiod_get_value_cansleep(gpiod))
688 return 0;
689
690 cond_resched();
691 } while (time_before(jiffies, timeout_ms));
692
693 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
694};
695EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
696
697
698
699
700
701
702
703
704
705
706void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
707{
708 int i;
709 for (i = 0; i < timeo; i++) {
710 if (chip->legacy.dev_ready) {
711 if (chip->legacy.dev_ready(chip))
712 break;
713 } else {
714 int ret;
715 u8 status;
716
717 ret = nand_read_data_op(chip, &status, sizeof(status),
718 true, false);
719 if (ret)
720 return;
721
722 if (status & NAND_STATUS_READY)
723 break;
724 }
725 mdelay(1);
726 }
727}
728
729static bool nand_supports_get_features(struct nand_chip *chip, int addr)
730{
731 return (chip->parameters.supports_set_get_features &&
732 test_bit(addr, chip->parameters.get_feature_list));
733}
734
735static bool nand_supports_set_features(struct nand_chip *chip, int addr)
736{
737 return (chip->parameters.supports_set_get_features &&
738 test_bit(addr, chip->parameters.set_feature_list));
739}
740
741
742
743
744
745
746
747
748
749
750static int nand_reset_interface(struct nand_chip *chip, int chipnr)
751{
752 const struct nand_controller_ops *ops = chip->controller->ops;
753 int ret;
754
755 if (!nand_controller_can_setup_interface(chip))
756 return 0;
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772 chip->current_interface_config = nand_get_reset_interface_config();
773 ret = ops->setup_interface(chip, chipnr,
774 chip->current_interface_config);
775 if (ret)
776 pr_err("Failed to configure data interface to SDR timing mode 0\n");
777
778 return ret;
779}
780
781
782
783
784
785
786
787
788
789
790
791static int nand_setup_interface(struct nand_chip *chip, int chipnr)
792{
793 const struct nand_controller_ops *ops = chip->controller->ops;
794 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
795 int ret;
796
797 if (!nand_controller_can_setup_interface(chip))
798 return 0;
799
800
801
802
803
804
805
806
807 if (!chip->best_interface_config)
808 return 0;
809
810 tmode_param[0] = chip->best_interface_config->timings.mode;
811
812
813 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
814 nand_select_target(chip, chipnr);
815 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
816 tmode_param);
817 nand_deselect_target(chip);
818 if (ret)
819 return ret;
820 }
821
822
823 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
824 if (ret)
825 return ret;
826
827
828 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
829 goto update_interface_config;
830
831 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
832 nand_select_target(chip, chipnr);
833 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
834 tmode_param);
835 nand_deselect_target(chip);
836 if (ret)
837 goto err_reset_chip;
838
839 if (tmode_param[0] != chip->best_interface_config->timings.mode) {
840 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
841 chip->best_interface_config->timings.mode);
842 goto err_reset_chip;
843 }
844
845update_interface_config:
846 chip->current_interface_config = chip->best_interface_config;
847
848 return 0;
849
850err_reset_chip:
851
852
853
854
855 nand_reset_interface(chip, chipnr);
856 nand_select_target(chip, chipnr);
857 nand_reset_op(chip);
858 nand_deselect_target(chip);
859
860 return ret;
861}
862
863
864
865
866
867
868
869
870
871
872
873int nand_choose_best_sdr_timings(struct nand_chip *chip,
874 struct nand_interface_config *iface,
875 struct nand_sdr_timings *spec_timings)
876{
877 const struct nand_controller_ops *ops = chip->controller->ops;
878 int best_mode = 0, mode, ret;
879
880 iface->type = NAND_SDR_IFACE;
881
882 if (spec_timings) {
883 iface->timings.sdr = *spec_timings;
884 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
885
886
887 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
888 iface);
889 if (!ret) {
890 chip->best_interface_config = iface;
891 return ret;
892 }
893
894
895 best_mode = iface->timings.mode;
896 } else if (chip->parameters.onfi) {
897 best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
898 }
899
900 for (mode = best_mode; mode >= 0; mode--) {
901 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
902
903 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
904 iface);
905 if (!ret)
906 break;
907 }
908
909 chip->best_interface_config = iface;
910
911 return 0;
912}
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927static int nand_choose_interface_config(struct nand_chip *chip)
928{
929 struct nand_interface_config *iface;
930 int ret;
931
932 if (!nand_controller_can_setup_interface(chip))
933 return 0;
934
935 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
936 if (!iface)
937 return -ENOMEM;
938
939 if (chip->ops.choose_interface_config)
940 ret = chip->ops.choose_interface_config(chip, iface);
941 else
942 ret = nand_choose_best_sdr_timings(chip, iface, NULL);
943
944 if (ret)
945 kfree(iface);
946
947 return ret;
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
963 unsigned int offset_in_page)
964{
965 struct mtd_info *mtd = nand_to_mtd(chip);
966
967
968 if (offset_in_page > mtd->writesize + mtd->oobsize)
969 return -EINVAL;
970
971
972
973
974
975
976 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
977 offset_in_page -= mtd->writesize;
978
979
980
981
982
983 if (chip->options & NAND_BUSWIDTH_16) {
984 if (WARN_ON(offset_in_page % 2))
985 return -EINVAL;
986
987 offset_in_page /= 2;
988 }
989
990 addrs[0] = offset_in_page;
991
992
993
994
995
996 if (mtd->writesize <= 512)
997 return 1;
998
999 addrs[1] = offset_in_page >> 8;
1000
1001 return 2;
1002}
1003
1004static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1005 unsigned int offset_in_page, void *buf,
1006 unsigned int len)
1007{
1008 const struct nand_sdr_timings *sdr =
1009 nand_get_sdr_timings(nand_get_interface_config(chip));
1010 struct mtd_info *mtd = nand_to_mtd(chip);
1011 u8 addrs[4];
1012 struct nand_op_instr instrs[] = {
1013 NAND_OP_CMD(NAND_CMD_READ0, 0),
1014 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1015 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1016 PSEC_TO_NSEC(sdr->tRR_min)),
1017 NAND_OP_DATA_IN(len, buf, 0),
1018 };
1019 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1020 int ret;
1021
1022
1023 if (!len)
1024 op.ninstrs--;
1025
1026 if (offset_in_page >= mtd->writesize)
1027 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1028 else if (offset_in_page >= 256 &&
1029 !(chip->options & NAND_BUSWIDTH_16))
1030 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1031
1032 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1033 if (ret < 0)
1034 return ret;
1035
1036 addrs[1] = page;
1037 addrs[2] = page >> 8;
1038
1039 if (chip->options & NAND_ROW_ADDR_3) {
1040 addrs[3] = page >> 16;
1041 instrs[1].ctx.addr.naddrs++;
1042 }
1043
1044 return nand_exec_op(chip, &op);
1045}
1046
1047static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1048 unsigned int offset_in_page, void *buf,
1049 unsigned int len)
1050{
1051 const struct nand_sdr_timings *sdr =
1052 nand_get_sdr_timings(nand_get_interface_config(chip));
1053 u8 addrs[5];
1054 struct nand_op_instr instrs[] = {
1055 NAND_OP_CMD(NAND_CMD_READ0, 0),
1056 NAND_OP_ADDR(4, addrs, 0),
1057 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1058 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1059 PSEC_TO_NSEC(sdr->tRR_min)),
1060 NAND_OP_DATA_IN(len, buf, 0),
1061 };
1062 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1063 int ret;
1064
1065
1066 if (!len)
1067 op.ninstrs--;
1068
1069 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1070 if (ret < 0)
1071 return ret;
1072
1073 addrs[2] = page;
1074 addrs[3] = page >> 8;
1075
1076 if (chip->options & NAND_ROW_ADDR_3) {
1077 addrs[4] = page >> 16;
1078 instrs[1].ctx.addr.naddrs++;
1079 }
1080
1081 return nand_exec_op(chip, &op);
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1098 unsigned int offset_in_page, void *buf, unsigned int len)
1099{
1100 struct mtd_info *mtd = nand_to_mtd(chip);
1101
1102 if (len && !buf)
1103 return -EINVAL;
1104
1105 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1106 return -EINVAL;
1107
1108 if (nand_has_exec_op(chip)) {
1109 if (mtd->writesize > 512)
1110 return nand_lp_exec_read_page_op(chip, page,
1111 offset_in_page, buf,
1112 len);
1113
1114 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1115 buf, len);
1116 }
1117
1118 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1119 if (len)
1120 chip->legacy.read_buf(chip, buf, len);
1121
1122 return 0;
1123}
1124EXPORT_SYMBOL_GPL(nand_read_page_op);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1139 unsigned int len)
1140{
1141 unsigned int i;
1142 u8 *p = buf;
1143
1144 if (len && !buf)
1145 return -EINVAL;
1146
1147 if (nand_has_exec_op(chip)) {
1148 const struct nand_sdr_timings *sdr =
1149 nand_get_sdr_timings(nand_get_interface_config(chip));
1150 struct nand_op_instr instrs[] = {
1151 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1152 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1153 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1154 PSEC_TO_NSEC(sdr->tRR_min)),
1155 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1156 };
1157 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1158
1159
1160 if (!len)
1161 op.ninstrs--;
1162
1163 return nand_exec_op(chip, &op);
1164 }
1165
1166 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1167 for (i = 0; i < len; i++)
1168 p[i] = chip->legacy.read_byte(chip);
1169
1170 return 0;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186int nand_change_read_column_op(struct nand_chip *chip,
1187 unsigned int offset_in_page, void *buf,
1188 unsigned int len, bool force_8bit)
1189{
1190 struct mtd_info *mtd = nand_to_mtd(chip);
1191
1192 if (len && !buf)
1193 return -EINVAL;
1194
1195 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1196 return -EINVAL;
1197
1198
1199 if (mtd->writesize <= 512)
1200 return -ENOTSUPP;
1201
1202 if (nand_has_exec_op(chip)) {
1203 const struct nand_sdr_timings *sdr =
1204 nand_get_sdr_timings(nand_get_interface_config(chip));
1205 u8 addrs[2] = {};
1206 struct nand_op_instr instrs[] = {
1207 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1208 NAND_OP_ADDR(2, addrs, 0),
1209 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1210 PSEC_TO_NSEC(sdr->tCCS_min)),
1211 NAND_OP_DATA_IN(len, buf, 0),
1212 };
1213 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1214 int ret;
1215
1216 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1217 if (ret < 0)
1218 return ret;
1219
1220
1221 if (!len)
1222 op.ninstrs--;
1223
1224 instrs[3].ctx.data.force_8bit = force_8bit;
1225
1226 return nand_exec_op(chip, &op);
1227 }
1228
1229 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1230 if (len)
1231 chip->legacy.read_buf(chip, buf, len);
1232
1233 return 0;
1234}
1235EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1251 unsigned int offset_in_oob, void *buf, unsigned int len)
1252{
1253 struct mtd_info *mtd = nand_to_mtd(chip);
1254
1255 if (len && !buf)
1256 return -EINVAL;
1257
1258 if (offset_in_oob + len > mtd->oobsize)
1259 return -EINVAL;
1260
1261 if (nand_has_exec_op(chip))
1262 return nand_read_page_op(chip, page,
1263 mtd->writesize + offset_in_oob,
1264 buf, len);
1265
1266 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1267 if (len)
1268 chip->legacy.read_buf(chip, buf, len);
1269
1270 return 0;
1271}
1272EXPORT_SYMBOL_GPL(nand_read_oob_op);
1273
1274static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1275 unsigned int offset_in_page, const void *buf,
1276 unsigned int len, bool prog)
1277{
1278 const struct nand_sdr_timings *sdr =
1279 nand_get_sdr_timings(nand_get_interface_config(chip));
1280 struct mtd_info *mtd = nand_to_mtd(chip);
1281 u8 addrs[5] = {};
1282 struct nand_op_instr instrs[] = {
1283
1284
1285
1286
1287
1288 NAND_OP_CMD(NAND_CMD_READ0, 0),
1289 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1290 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1291 NAND_OP_DATA_OUT(len, buf, 0),
1292 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1293 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1294 };
1295 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1296 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1297 int ret;
1298 u8 status;
1299
1300 if (naddrs < 0)
1301 return naddrs;
1302
1303 addrs[naddrs++] = page;
1304 addrs[naddrs++] = page >> 8;
1305 if (chip->options & NAND_ROW_ADDR_3)
1306 addrs[naddrs++] = page >> 16;
1307
1308 instrs[2].ctx.addr.naddrs = naddrs;
1309
1310
1311 if (!prog) {
1312 op.ninstrs -= 2;
1313
1314 if (!len)
1315 op.ninstrs--;
1316 }
1317
1318 if (mtd->writesize <= 512) {
1319
1320
1321
1322
1323
1324 if (offset_in_page >= mtd->writesize)
1325 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1326 else if (offset_in_page >= 256 &&
1327 !(chip->options & NAND_BUSWIDTH_16))
1328 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1329 } else {
1330
1331
1332
1333
1334 op.instrs++;
1335 op.ninstrs--;
1336 }
1337
1338 ret = nand_exec_op(chip, &op);
1339 if (!prog || ret)
1340 return ret;
1341
1342 ret = nand_status_op(chip, &status);
1343 if (ret)
1344 return ret;
1345
1346 return status;
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1363 unsigned int offset_in_page, const void *buf,
1364 unsigned int len)
1365{
1366 struct mtd_info *mtd = nand_to_mtd(chip);
1367
1368 if (len && !buf)
1369 return -EINVAL;
1370
1371 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1372 return -EINVAL;
1373
1374 if (nand_has_exec_op(chip))
1375 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1376 len, false);
1377
1378 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1379
1380 if (buf)
1381 chip->legacy.write_buf(chip, buf, len);
1382
1383 return 0;
1384}
1385EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396int nand_prog_page_end_op(struct nand_chip *chip)
1397{
1398 int ret;
1399 u8 status;
1400
1401 if (nand_has_exec_op(chip)) {
1402 const struct nand_sdr_timings *sdr =
1403 nand_get_sdr_timings(nand_get_interface_config(chip));
1404 struct nand_op_instr instrs[] = {
1405 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1406 PSEC_TO_NSEC(sdr->tWB_max)),
1407 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1408 };
1409 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1410
1411 ret = nand_exec_op(chip, &op);
1412 if (ret)
1413 return ret;
1414
1415 ret = nand_status_op(chip, &status);
1416 if (ret)
1417 return ret;
1418 } else {
1419 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1420 ret = chip->legacy.waitfunc(chip);
1421 if (ret < 0)
1422 return ret;
1423
1424 status = ret;
1425 }
1426
1427 if (status & NAND_STATUS_FAIL)
1428 return -EIO;
1429
1430 return 0;
1431}
1432EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1448 unsigned int offset_in_page, const void *buf,
1449 unsigned int len)
1450{
1451 struct mtd_info *mtd = nand_to_mtd(chip);
1452 int status;
1453
1454 if (!len || !buf)
1455 return -EINVAL;
1456
1457 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1458 return -EINVAL;
1459
1460 if (nand_has_exec_op(chip)) {
1461 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1462 len, true);
1463 } else {
1464 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1465 page);
1466 chip->legacy.write_buf(chip, buf, len);
1467 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1468 status = chip->legacy.waitfunc(chip);
1469 }
1470
1471 if (status & NAND_STATUS_FAIL)
1472 return -EIO;
1473
1474 return 0;
1475}
1476EXPORT_SYMBOL_GPL(nand_prog_page_op);
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491int nand_change_write_column_op(struct nand_chip *chip,
1492 unsigned int offset_in_page,
1493 const void *buf, unsigned int len,
1494 bool force_8bit)
1495{
1496 struct mtd_info *mtd = nand_to_mtd(chip);
1497
1498 if (len && !buf)
1499 return -EINVAL;
1500
1501 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1502 return -EINVAL;
1503
1504
1505 if (mtd->writesize <= 512)
1506 return -ENOTSUPP;
1507
1508 if (nand_has_exec_op(chip)) {
1509 const struct nand_sdr_timings *sdr =
1510 nand_get_sdr_timings(nand_get_interface_config(chip));
1511 u8 addrs[2];
1512 struct nand_op_instr instrs[] = {
1513 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1514 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1515 NAND_OP_DATA_OUT(len, buf, 0),
1516 };
1517 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1518 int ret;
1519
1520 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1521 if (ret < 0)
1522 return ret;
1523
1524 instrs[2].ctx.data.force_8bit = force_8bit;
1525
1526
1527 if (!len)
1528 op.ninstrs--;
1529
1530 return nand_exec_op(chip, &op);
1531 }
1532
1533 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1534 if (len)
1535 chip->legacy.write_buf(chip, buf, len);
1536
1537 return 0;
1538}
1539EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1555 unsigned int len)
1556{
1557 unsigned int i;
1558 u8 *id = buf;
1559
1560 if (len && !buf)
1561 return -EINVAL;
1562
1563 if (nand_has_exec_op(chip)) {
1564 const struct nand_sdr_timings *sdr =
1565 nand_get_sdr_timings(nand_get_interface_config(chip));
1566 struct nand_op_instr instrs[] = {
1567 NAND_OP_CMD(NAND_CMD_READID, 0),
1568 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1569 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1570 };
1571 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1572
1573
1574 if (!len)
1575 op.ninstrs--;
1576
1577 return nand_exec_op(chip, &op);
1578 }
1579
1580 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1581
1582 for (i = 0; i < len; i++)
1583 id[i] = chip->legacy.read_byte(chip);
1584
1585 return 0;
1586}
1587EXPORT_SYMBOL_GPL(nand_readid_op);
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600int nand_status_op(struct nand_chip *chip, u8 *status)
1601{
1602 if (nand_has_exec_op(chip)) {
1603 const struct nand_sdr_timings *sdr =
1604 nand_get_sdr_timings(nand_get_interface_config(chip));
1605 struct nand_op_instr instrs[] = {
1606 NAND_OP_CMD(NAND_CMD_STATUS,
1607 PSEC_TO_NSEC(sdr->tADL_min)),
1608 NAND_OP_8BIT_DATA_IN(1, status, 0),
1609 };
1610 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1611
1612 if (!status)
1613 op.ninstrs--;
1614
1615 return nand_exec_op(chip, &op);
1616 }
1617
1618 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1619 if (status)
1620 *status = chip->legacy.read_byte(chip);
1621
1622 return 0;
1623}
1624EXPORT_SYMBOL_GPL(nand_status_op);
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637int nand_exit_status_op(struct nand_chip *chip)
1638{
1639 if (nand_has_exec_op(chip)) {
1640 struct nand_op_instr instrs[] = {
1641 NAND_OP_CMD(NAND_CMD_READ0, 0),
1642 };
1643 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1644
1645 return nand_exec_op(chip, &op);
1646 }
1647
1648 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1649
1650 return 0;
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1665{
1666 unsigned int page = eraseblock <<
1667 (chip->phys_erase_shift - chip->page_shift);
1668 int ret;
1669 u8 status;
1670
1671 if (nand_has_exec_op(chip)) {
1672 const struct nand_sdr_timings *sdr =
1673 nand_get_sdr_timings(nand_get_interface_config(chip));
1674 u8 addrs[3] = { page, page >> 8, page >> 16 };
1675 struct nand_op_instr instrs[] = {
1676 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1677 NAND_OP_ADDR(2, addrs, 0),
1678 NAND_OP_CMD(NAND_CMD_ERASE2,
1679 PSEC_TO_MSEC(sdr->tWB_max)),
1680 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1681 };
1682 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1683
1684 if (chip->options & NAND_ROW_ADDR_3)
1685 instrs[1].ctx.addr.naddrs++;
1686
1687 ret = nand_exec_op(chip, &op);
1688 if (ret)
1689 return ret;
1690
1691 ret = nand_status_op(chip, &status);
1692 if (ret)
1693 return ret;
1694 } else {
1695 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1696 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1697
1698 ret = chip->legacy.waitfunc(chip);
1699 if (ret < 0)
1700 return ret;
1701
1702 status = ret;
1703 }
1704
1705 if (status & NAND_STATUS_FAIL)
1706 return -EIO;
1707
1708 return 0;
1709}
1710EXPORT_SYMBOL_GPL(nand_erase_op);
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1725 const void *data)
1726{
1727 const u8 *params = data;
1728 int i, ret;
1729
1730 if (nand_has_exec_op(chip)) {
1731 const struct nand_sdr_timings *sdr =
1732 nand_get_sdr_timings(nand_get_interface_config(chip));
1733 struct nand_op_instr instrs[] = {
1734 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1735 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1736 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1737 PSEC_TO_NSEC(sdr->tWB_max)),
1738 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1739 };
1740 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1741
1742 return nand_exec_op(chip, &op);
1743 }
1744
1745 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1746 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1747 chip->legacy.write_byte(chip, params[i]);
1748
1749 ret = chip->legacy.waitfunc(chip);
1750 if (ret < 0)
1751 return ret;
1752
1753 if (ret & NAND_STATUS_FAIL)
1754 return -EIO;
1755
1756 return 0;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1772 void *data)
1773{
1774 u8 *params = data;
1775 int i;
1776
1777 if (nand_has_exec_op(chip)) {
1778 const struct nand_sdr_timings *sdr =
1779 nand_get_sdr_timings(nand_get_interface_config(chip));
1780 struct nand_op_instr instrs[] = {
1781 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1782 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1783 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1784 PSEC_TO_NSEC(sdr->tRR_min)),
1785 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1786 data, 0),
1787 };
1788 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1789
1790 return nand_exec_op(chip, &op);
1791 }
1792
1793 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1794 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1795 params[i] = chip->legacy.read_byte(chip);
1796
1797 return 0;
1798}
1799
1800static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1801 unsigned int delay_ns)
1802{
1803 if (nand_has_exec_op(chip)) {
1804 struct nand_op_instr instrs[] = {
1805 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1806 PSEC_TO_NSEC(delay_ns)),
1807 };
1808 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1809
1810 return nand_exec_op(chip, &op);
1811 }
1812
1813
1814 if (!chip->legacy.dev_ready)
1815 udelay(chip->legacy.chip_delay);
1816 else
1817 nand_wait_ready(chip);
1818
1819 return 0;
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832int nand_reset_op(struct nand_chip *chip)
1833{
1834 if (nand_has_exec_op(chip)) {
1835 const struct nand_sdr_timings *sdr =
1836 nand_get_sdr_timings(nand_get_interface_config(chip));
1837 struct nand_op_instr instrs[] = {
1838 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1839 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1840 };
1841 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1842
1843 return nand_exec_op(chip, &op);
1844 }
1845
1846 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1847
1848 return 0;
1849}
1850EXPORT_SYMBOL_GPL(nand_reset_op);
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1868 bool force_8bit, bool check_only)
1869{
1870 if (!len || !buf)
1871 return -EINVAL;
1872
1873 if (nand_has_exec_op(chip)) {
1874 struct nand_op_instr instrs[] = {
1875 NAND_OP_DATA_IN(len, buf, 0),
1876 };
1877 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1878
1879 instrs[0].ctx.data.force_8bit = force_8bit;
1880
1881 if (check_only)
1882 return nand_check_op(chip, &op);
1883
1884 return nand_exec_op(chip, &op);
1885 }
1886
1887 if (check_only)
1888 return 0;
1889
1890 if (force_8bit) {
1891 u8 *p = buf;
1892 unsigned int i;
1893
1894 for (i = 0; i < len; i++)
1895 p[i] = chip->legacy.read_byte(chip);
1896 } else {
1897 chip->legacy.read_buf(chip, buf, len);
1898 }
1899
1900 return 0;
1901}
1902EXPORT_SYMBOL_GPL(nand_read_data_op);
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917int nand_write_data_op(struct nand_chip *chip, const void *buf,
1918 unsigned int len, bool force_8bit)
1919{
1920 if (!len || !buf)
1921 return -EINVAL;
1922
1923 if (nand_has_exec_op(chip)) {
1924 struct nand_op_instr instrs[] = {
1925 NAND_OP_DATA_OUT(len, buf, 0),
1926 };
1927 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1928
1929 instrs[0].ctx.data.force_8bit = force_8bit;
1930
1931 return nand_exec_op(chip, &op);
1932 }
1933
1934 if (force_8bit) {
1935 const u8 *p = buf;
1936 unsigned int i;
1937
1938 for (i = 0; i < len; i++)
1939 chip->legacy.write_byte(chip, p[i]);
1940 } else {
1941 chip->legacy.write_buf(chip, buf, len);
1942 }
1943
1944 return 0;
1945}
1946EXPORT_SYMBOL_GPL(nand_write_data_op);
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957struct nand_op_parser_ctx {
1958 const struct nand_op_instr *instrs;
1959 unsigned int ninstrs;
1960 struct nand_subop subop;
1961};
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983static bool
1984nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1985 const struct nand_op_instr *instr,
1986 unsigned int *start_offset)
1987{
1988 switch (pat->type) {
1989 case NAND_OP_ADDR_INSTR:
1990 if (!pat->ctx.addr.maxcycles)
1991 break;
1992
1993 if (instr->ctx.addr.naddrs - *start_offset >
1994 pat->ctx.addr.maxcycles) {
1995 *start_offset += pat->ctx.addr.maxcycles;
1996 return true;
1997 }
1998 break;
1999
2000 case NAND_OP_DATA_IN_INSTR:
2001 case NAND_OP_DATA_OUT_INSTR:
2002 if (!pat->ctx.data.maxlen)
2003 break;
2004
2005 if (instr->ctx.data.len - *start_offset >
2006 pat->ctx.data.maxlen) {
2007 *start_offset += pat->ctx.data.maxlen;
2008 return true;
2009 }
2010 break;
2011
2012 default:
2013 break;
2014 }
2015
2016 return false;
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static bool
2031nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2032 struct nand_op_parser_ctx *ctx)
2033{
2034 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2035 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2036 const struct nand_op_instr *instr = ctx->subop.instrs;
2037 unsigned int i, ninstrs;
2038
2039 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2040
2041
2042
2043
2044
2045
2046
2047 if (instr->type != pat->elems[i].type) {
2048 if (!pat->elems[i].optional)
2049 return false;
2050
2051 continue;
2052 }
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2063 &instr_offset)) {
2064 ninstrs++;
2065 i++;
2066 break;
2067 }
2068
2069 instr++;
2070 ninstrs++;
2071 instr_offset = 0;
2072 }
2073
2074
2075
2076
2077
2078
2079
2080 if (!ninstrs)
2081 return false;
2082
2083
2084
2085
2086
2087
2088 for (; i < pat->nelems; i++) {
2089 if (!pat->elems[i].optional)
2090 return false;
2091 }
2092
2093
2094
2095
2096
2097 ctx->subop.ninstrs = ninstrs;
2098 ctx->subop.last_instr_end_off = instr_offset;
2099
2100 return true;
2101}
2102
2103#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2104static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2105{
2106 const struct nand_op_instr *instr;
2107 char *prefix = " ";
2108 unsigned int i;
2109
2110 pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2111
2112 for (i = 0; i < ctx->ninstrs; i++) {
2113 instr = &ctx->instrs[i];
2114
2115 if (instr == &ctx->subop.instrs[0])
2116 prefix = " ->";
2117
2118 nand_op_trace(prefix, instr);
2119
2120 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2121 prefix = " ";
2122 }
2123}
2124#else
2125static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2126{
2127
2128}
2129#endif
2130
2131static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2132 const struct nand_op_parser_ctx *b)
2133{
2134 if (a->subop.ninstrs < b->subop.ninstrs)
2135 return -1;
2136 else if (a->subop.ninstrs > b->subop.ninstrs)
2137 return 1;
2138
2139 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2140 return -1;
2141 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2142 return 1;
2143
2144 return 0;
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169int nand_op_parser_exec_op(struct nand_chip *chip,
2170 const struct nand_op_parser *parser,
2171 const struct nand_operation *op, bool check_only)
2172{
2173 struct nand_op_parser_ctx ctx = {
2174 .subop.cs = op->cs,
2175 .subop.instrs = op->instrs,
2176 .instrs = op->instrs,
2177 .ninstrs = op->ninstrs,
2178 };
2179 unsigned int i;
2180
2181 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2182 const struct nand_op_parser_pattern *pattern;
2183 struct nand_op_parser_ctx best_ctx;
2184 int ret, best_pattern = -1;
2185
2186 for (i = 0; i < parser->npatterns; i++) {
2187 struct nand_op_parser_ctx test_ctx = ctx;
2188
2189 pattern = &parser->patterns[i];
2190 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2191 continue;
2192
2193 if (best_pattern >= 0 &&
2194 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2195 continue;
2196
2197 best_pattern = i;
2198 best_ctx = test_ctx;
2199 }
2200
2201 if (best_pattern < 0) {
2202 pr_debug("->exec_op() parser: pattern not found!\n");
2203 return -ENOTSUPP;
2204 }
2205
2206 ctx = best_ctx;
2207 nand_op_parser_trace(&ctx);
2208
2209 if (!check_only) {
2210 pattern = &parser->patterns[best_pattern];
2211 ret = pattern->exec(chip, &ctx.subop);
2212 if (ret)
2213 return ret;
2214 }
2215
2216
2217
2218
2219
2220 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2221 if (ctx.subop.last_instr_end_off)
2222 ctx.subop.instrs -= 1;
2223
2224 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2225 }
2226
2227 return 0;
2228}
2229EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2230
2231static bool nand_instr_is_data(const struct nand_op_instr *instr)
2232{
2233 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2234 instr->type == NAND_OP_DATA_OUT_INSTR);
2235}
2236
2237static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2238 unsigned int instr_idx)
2239{
2240 return subop && instr_idx < subop->ninstrs;
2241}
2242
2243static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2244 unsigned int instr_idx)
2245{
2246 if (instr_idx)
2247 return 0;
2248
2249 return subop->first_instr_start_off;
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2264 unsigned int instr_idx)
2265{
2266 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2267 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2268 return 0;
2269
2270 return nand_subop_get_start_off(subop, instr_idx);
2271}
2272EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2286 unsigned int instr_idx)
2287{
2288 int start_off, end_off;
2289
2290 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2291 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2292 return 0;
2293
2294 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2295
2296 if (instr_idx == subop->ninstrs - 1 &&
2297 subop->last_instr_end_off)
2298 end_off = subop->last_instr_end_off;
2299 else
2300 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2301
2302 return end_off - start_off;
2303}
2304EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2318 unsigned int instr_idx)
2319{
2320 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2321 !nand_instr_is_data(&subop->instrs[instr_idx])))
2322 return 0;
2323
2324 return nand_subop_get_start_off(subop, instr_idx);
2325}
2326EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2340 unsigned int instr_idx)
2341{
2342 int start_off = 0, end_off;
2343
2344 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2345 !nand_instr_is_data(&subop->instrs[instr_idx])))
2346 return 0;
2347
2348 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2349
2350 if (instr_idx == subop->ninstrs - 1 &&
2351 subop->last_instr_end_off)
2352 end_off = subop->last_instr_end_off;
2353 else
2354 end_off = subop->instrs[instr_idx].ctx.data.len;
2355
2356 return end_off - start_off;
2357}
2358EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371int nand_reset(struct nand_chip *chip, int chipnr)
2372{
2373 int ret;
2374
2375 ret = nand_reset_interface(chip, chipnr);
2376 if (ret)
2377 return ret;
2378
2379
2380
2381
2382
2383
2384 nand_select_target(chip, chipnr);
2385 ret = nand_reset_op(chip);
2386 nand_deselect_target(chip);
2387 if (ret)
2388 return ret;
2389
2390 ret = nand_setup_interface(chip, chipnr);
2391 if (ret)
2392 return ret;
2393
2394 return 0;
2395}
2396EXPORT_SYMBOL_GPL(nand_reset);
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407int nand_get_features(struct nand_chip *chip, int addr,
2408 u8 *subfeature_param)
2409{
2410 if (!nand_supports_get_features(chip, addr))
2411 return -ENOTSUPP;
2412
2413 if (chip->legacy.get_features)
2414 return chip->legacy.get_features(chip, addr, subfeature_param);
2415
2416 return nand_get_features_op(chip, addr, subfeature_param);
2417}
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428int nand_set_features(struct nand_chip *chip, int addr,
2429 u8 *subfeature_param)
2430{
2431 if (!nand_supports_set_features(chip, addr))
2432 return -ENOTSUPP;
2433
2434 if (chip->legacy.set_features)
2435 return chip->legacy.set_features(chip, addr, subfeature_param);
2436
2437 return nand_set_features_op(chip, addr, subfeature_param);
2438}
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2460{
2461 const unsigned char *bitmap = buf;
2462 int bitflips = 0;
2463 int weight;
2464
2465 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2466 len--, bitmap++) {
2467 weight = hweight8(*bitmap);
2468 bitflips += BITS_PER_BYTE - weight;
2469 if (unlikely(bitflips > bitflips_threshold))
2470 return -EBADMSG;
2471 }
2472
2473 for (; len >= sizeof(long);
2474 len -= sizeof(long), bitmap += sizeof(long)) {
2475 unsigned long d = *((unsigned long *)bitmap);
2476 if (d == ~0UL)
2477 continue;
2478 weight = hweight_long(d);
2479 bitflips += BITS_PER_LONG - weight;
2480 if (unlikely(bitflips > bitflips_threshold))
2481 return -EBADMSG;
2482 }
2483
2484 for (; len > 0; len--, bitmap++) {
2485 weight = hweight8(*bitmap);
2486 bitflips += BITS_PER_BYTE - weight;
2487 if (unlikely(bitflips > bitflips_threshold))
2488 return -EBADMSG;
2489 }
2490
2491 return bitflips;
2492}
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533int nand_check_erased_ecc_chunk(void *data, int datalen,
2534 void *ecc, int ecclen,
2535 void *extraoob, int extraooblen,
2536 int bitflips_threshold)
2537{
2538 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2539
2540 data_bitflips = nand_check_erased_buf(data, datalen,
2541 bitflips_threshold);
2542 if (data_bitflips < 0)
2543 return data_bitflips;
2544
2545 bitflips_threshold -= data_bitflips;
2546
2547 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2548 if (ecc_bitflips < 0)
2549 return ecc_bitflips;
2550
2551 bitflips_threshold -= ecc_bitflips;
2552
2553 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2554 bitflips_threshold);
2555 if (extraoob_bitflips < 0)
2556 return extraoob_bitflips;
2557
2558 if (data_bitflips)
2559 memset(data, 0xff, datalen);
2560
2561 if (ecc_bitflips)
2562 memset(ecc, 0xff, ecclen);
2563
2564 if (extraoob_bitflips)
2565 memset(extraoob, 0xff, extraooblen);
2566
2567 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2568}
2569EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2581 int oob_required, int page)
2582{
2583 return -ENOTSUPP;
2584}
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2596 int page)
2597{
2598 struct mtd_info *mtd = nand_to_mtd(chip);
2599 int ret;
2600
2601 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2602 if (ret)
2603 return ret;
2604
2605 if (oob_required) {
2606 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2607 false, false);
2608 if (ret)
2609 return ret;
2610 }
2611
2612 return 0;
2613}
2614EXPORT_SYMBOL(nand_read_page_raw);
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2632 int oob_required, int page)
2633{
2634 struct mtd_info *mtd = nand_to_mtd(chip);
2635 unsigned int size = mtd->writesize;
2636 u8 *read_buf = buf;
2637 int ret;
2638
2639 if (oob_required) {
2640 size += mtd->oobsize;
2641
2642 if (buf != chip->data_buf)
2643 read_buf = nand_get_data_buf(chip);
2644 }
2645
2646 ret = nand_read_page_op(chip, page, 0, read_buf, size);
2647 if (ret)
2648 return ret;
2649
2650 if (buf != chip->data_buf)
2651 memcpy(buf, read_buf, mtd->writesize);
2652
2653 return 0;
2654}
2655EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2667 int oob_required, int page)
2668{
2669 struct mtd_info *mtd = nand_to_mtd(chip);
2670 int eccsize = chip->ecc.size;
2671 int eccbytes = chip->ecc.bytes;
2672 uint8_t *oob = chip->oob_poi;
2673 int steps, size, ret;
2674
2675 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2676 if (ret)
2677 return ret;
2678
2679 for (steps = chip->ecc.steps; steps > 0; steps--) {
2680 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2681 if (ret)
2682 return ret;
2683
2684 buf += eccsize;
2685
2686 if (chip->ecc.prepad) {
2687 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2688 false, false);
2689 if (ret)
2690 return ret;
2691
2692 oob += chip->ecc.prepad;
2693 }
2694
2695 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2696 if (ret)
2697 return ret;
2698
2699 oob += eccbytes;
2700
2701 if (chip->ecc.postpad) {
2702 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2703 false, false);
2704 if (ret)
2705 return ret;
2706
2707 oob += chip->ecc.postpad;
2708 }
2709 }
2710
2711 size = mtd->oobsize - (oob - chip->oob_poi);
2712 if (size) {
2713 ret = nand_read_data_op(chip, oob, size, false, false);
2714 if (ret)
2715 return ret;
2716 }
2717
2718 return 0;
2719}
2720
2721
2722
2723
2724
2725
2726
2727
2728static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2729 int oob_required, int page)
2730{
2731 struct mtd_info *mtd = nand_to_mtd(chip);
2732 int i, eccsize = chip->ecc.size, ret;
2733 int eccbytes = chip->ecc.bytes;
2734 int eccsteps = chip->ecc.steps;
2735 uint8_t *p = buf;
2736 uint8_t *ecc_calc = chip->ecc.calc_buf;
2737 uint8_t *ecc_code = chip->ecc.code_buf;
2738 unsigned int max_bitflips = 0;
2739
2740 chip->ecc.read_page_raw(chip, buf, 1, page);
2741
2742 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2743 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2744
2745 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2746 chip->ecc.total);
2747 if (ret)
2748 return ret;
2749
2750 eccsteps = chip->ecc.steps;
2751 p = buf;
2752
2753 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2754 int stat;
2755
2756 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2757 if (stat < 0) {
2758 mtd->ecc_stats.failed++;
2759 } else {
2760 mtd->ecc_stats.corrected += stat;
2761 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2762 }
2763 }
2764 return max_bitflips;
2765}
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2776 uint32_t readlen, uint8_t *bufpoi, int page)
2777{
2778 struct mtd_info *mtd = nand_to_mtd(chip);
2779 int start_step, end_step, num_steps, ret;
2780 uint8_t *p;
2781 int data_col_addr, i, gaps = 0;
2782 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2783 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2784 int index, section = 0;
2785 unsigned int max_bitflips = 0;
2786 struct mtd_oob_region oobregion = { };
2787
2788
2789 start_step = data_offs / chip->ecc.size;
2790 end_step = (data_offs + readlen - 1) / chip->ecc.size;
2791 num_steps = end_step - start_step + 1;
2792 index = start_step * chip->ecc.bytes;
2793
2794
2795 datafrag_len = num_steps * chip->ecc.size;
2796 eccfrag_len = num_steps * chip->ecc.bytes;
2797
2798 data_col_addr = start_step * chip->ecc.size;
2799
2800 p = bufpoi + data_col_addr;
2801 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2802 if (ret)
2803 return ret;
2804
2805
2806 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2807 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2808
2809
2810
2811
2812
2813 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
2814 if (ret)
2815 return ret;
2816
2817 if (oobregion.length < eccfrag_len)
2818 gaps = 1;
2819
2820 if (gaps) {
2821 ret = nand_change_read_column_op(chip, mtd->writesize,
2822 chip->oob_poi, mtd->oobsize,
2823 false);
2824 if (ret)
2825 return ret;
2826 } else {
2827
2828
2829
2830
2831 aligned_pos = oobregion.offset & ~(busw - 1);
2832 aligned_len = eccfrag_len;
2833 if (oobregion.offset & (busw - 1))
2834 aligned_len++;
2835 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2836 (busw - 1))
2837 aligned_len++;
2838
2839 ret = nand_change_read_column_op(chip,
2840 mtd->writesize + aligned_pos,
2841 &chip->oob_poi[aligned_pos],
2842 aligned_len, false);
2843 if (ret)
2844 return ret;
2845 }
2846
2847 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2848 chip->oob_poi, index, eccfrag_len);
2849 if (ret)
2850 return ret;
2851
2852 p = bufpoi + data_col_addr;
2853 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2854 int stat;
2855
2856 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2857 &chip->ecc.calc_buf[i]);
2858 if (stat == -EBADMSG &&
2859 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2860
2861 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2862 &chip->ecc.code_buf[i],
2863 chip->ecc.bytes,
2864 NULL, 0,
2865 chip->ecc.strength);
2866 }
2867
2868 if (stat < 0) {
2869 mtd->ecc_stats.failed++;
2870 } else {
2871 mtd->ecc_stats.corrected += stat;
2872 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2873 }
2874 }
2875 return max_bitflips;
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2888 int oob_required, int page)
2889{
2890 struct mtd_info *mtd = nand_to_mtd(chip);
2891 int i, eccsize = chip->ecc.size, ret;
2892 int eccbytes = chip->ecc.bytes;
2893 int eccsteps = chip->ecc.steps;
2894 uint8_t *p = buf;
2895 uint8_t *ecc_calc = chip->ecc.calc_buf;
2896 uint8_t *ecc_code = chip->ecc.code_buf;
2897 unsigned int max_bitflips = 0;
2898
2899 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2900 if (ret)
2901 return ret;
2902
2903 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2904 chip->ecc.hwctl(chip, NAND_ECC_READ);
2905
2906 ret = nand_read_data_op(chip, p, eccsize, false, false);
2907 if (ret)
2908 return ret;
2909
2910 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2911 }
2912
2913 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
2914 false);
2915 if (ret)
2916 return ret;
2917
2918 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2919 chip->ecc.total);
2920 if (ret)
2921 return ret;
2922
2923 eccsteps = chip->ecc.steps;
2924 p = buf;
2925
2926 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2927 int stat;
2928
2929 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2930 if (stat == -EBADMSG &&
2931 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2932
2933 stat = nand_check_erased_ecc_chunk(p, eccsize,
2934 &ecc_code[i], eccbytes,
2935 NULL, 0,
2936 chip->ecc.strength);
2937 }
2938
2939 if (stat < 0) {
2940 mtd->ecc_stats.failed++;
2941 } else {
2942 mtd->ecc_stats.corrected += stat;
2943 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2944 }
2945 }
2946 return max_bitflips;
2947}
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2960 int oob_required, int page)
2961{
2962 struct mtd_info *mtd = nand_to_mtd(chip);
2963 int ret, i, eccsize = chip->ecc.size;
2964 int eccbytes = chip->ecc.bytes;
2965 int eccsteps = chip->ecc.steps;
2966 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2967 uint8_t *p = buf;
2968 uint8_t *oob = chip->oob_poi;
2969 unsigned int max_bitflips = 0;
2970
2971 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2972 if (ret)
2973 return ret;
2974
2975 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2976 int stat;
2977
2978 chip->ecc.hwctl(chip, NAND_ECC_READ);
2979
2980 ret = nand_read_data_op(chip, p, eccsize, false, false);
2981 if (ret)
2982 return ret;
2983
2984 if (chip->ecc.prepad) {
2985 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2986 false, false);
2987 if (ret)
2988 return ret;
2989
2990 oob += chip->ecc.prepad;
2991 }
2992
2993 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
2994
2995 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2996 if (ret)
2997 return ret;
2998
2999 stat = chip->ecc.correct(chip, p, oob, NULL);
3000
3001 oob += eccbytes;
3002
3003 if (chip->ecc.postpad) {
3004 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3005 false, false);
3006 if (ret)
3007 return ret;
3008
3009 oob += chip->ecc.postpad;
3010 }
3011
3012 if (stat == -EBADMSG &&
3013 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3014
3015 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3016 oob - eccpadbytes,
3017 eccpadbytes,
3018 NULL, 0,
3019 chip->ecc.strength);
3020 }
3021
3022 if (stat < 0) {
3023 mtd->ecc_stats.failed++;
3024 } else {
3025 mtd->ecc_stats.corrected += stat;
3026 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3027 }
3028 }
3029
3030
3031 i = mtd->oobsize - (oob - chip->oob_poi);
3032 if (i) {
3033 ret = nand_read_data_op(chip, oob, i, false, false);
3034 if (ret)
3035 return ret;
3036 }
3037
3038 return max_bitflips;
3039}
3040
3041
3042
3043
3044
3045
3046
3047
3048static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3049 struct mtd_oob_ops *ops, size_t len)
3050{
3051 struct mtd_info *mtd = nand_to_mtd(chip);
3052 int ret;
3053
3054 switch (ops->mode) {
3055
3056 case MTD_OPS_PLACE_OOB:
3057 case MTD_OPS_RAW:
3058 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3059 return oob + len;
3060
3061 case MTD_OPS_AUTO_OOB:
3062 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3063 ops->ooboffs, len);
3064 BUG_ON(ret);
3065 return oob + len;
3066
3067 default:
3068 BUG();
3069 }
3070 return NULL;
3071}
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3083{
3084 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3085
3086 if (retry_mode >= chip->read_retries)
3087 return -EINVAL;
3088
3089 if (!chip->ops.setup_read_retry)
3090 return -EOPNOTSUPP;
3091
3092 return chip->ops.setup_read_retry(chip, retry_mode);
3093}
3094
3095static void nand_wait_readrdy(struct nand_chip *chip)
3096{
3097 const struct nand_sdr_timings *sdr;
3098
3099 if (!(chip->options & NAND_NEED_READRDY))
3100 return;
3101
3102 sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
3103 WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3104}
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3115 struct mtd_oob_ops *ops)
3116{
3117 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3118 struct mtd_info *mtd = nand_to_mtd(chip);
3119 int ret = 0;
3120 uint32_t readlen = ops->len;
3121 uint32_t oobreadlen = ops->ooblen;
3122 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3123
3124 uint8_t *bufpoi, *oob, *buf;
3125 int use_bounce_buf;
3126 unsigned int max_bitflips = 0;
3127 int retry_mode = 0;
3128 bool ecc_fail = false;
3129
3130 chipnr = (int)(from >> chip->chip_shift);
3131 nand_select_target(chip, chipnr);
3132
3133 realpage = (int)(from >> chip->page_shift);
3134 page = realpage & chip->pagemask;
3135
3136 col = (int)(from & (mtd->writesize - 1));
3137
3138 buf = ops->datbuf;
3139 oob = ops->oobbuf;
3140 oob_required = oob ? 1 : 0;
3141
3142 while (1) {
3143 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3144
3145 bytes = min(mtd->writesize - col, readlen);
3146 aligned = (bytes == mtd->writesize);
3147
3148 if (!aligned)
3149 use_bounce_buf = 1;
3150 else if (chip->options & NAND_USES_DMA)
3151 use_bounce_buf = !virt_addr_valid(buf) ||
3152 !IS_ALIGNED((unsigned long)buf,
3153 chip->buf_align);
3154 else
3155 use_bounce_buf = 0;
3156
3157
3158 if (realpage != chip->pagecache.page || oob) {
3159 bufpoi = use_bounce_buf ? chip->data_buf : buf;
3160
3161 if (use_bounce_buf && aligned)
3162 pr_debug("%s: using read bounce buffer for buf@%p\n",
3163 __func__, buf);
3164
3165read_retry:
3166
3167
3168
3169
3170 if (unlikely(ops->mode == MTD_OPS_RAW))
3171 ret = chip->ecc.read_page_raw(chip, bufpoi,
3172 oob_required,
3173 page);
3174 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3175 !oob)
3176 ret = chip->ecc.read_subpage(chip, col, bytes,
3177 bufpoi, page);
3178 else
3179 ret = chip->ecc.read_page(chip, bufpoi,
3180 oob_required, page);
3181 if (ret < 0) {
3182 if (use_bounce_buf)
3183
3184 chip->pagecache.page = -1;
3185 break;
3186 }
3187
3188
3189
3190
3191
3192 if (use_bounce_buf) {
3193 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3194 !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3195 (ops->mode != MTD_OPS_RAW)) {
3196 chip->pagecache.page = realpage;
3197 chip->pagecache.bitflips = ret;
3198 } else {
3199
3200 chip->pagecache.page = -1;
3201 }
3202 memcpy(buf, bufpoi + col, bytes);
3203 }
3204
3205 if (unlikely(oob)) {
3206 int toread = min(oobreadlen, max_oobsize);
3207
3208 if (toread) {
3209 oob = nand_transfer_oob(chip, oob, ops,
3210 toread);
3211 oobreadlen -= toread;
3212 }
3213 }
3214
3215 nand_wait_readrdy(chip);
3216
3217 if (mtd->ecc_stats.failed - ecc_stats.failed) {
3218 if (retry_mode + 1 < chip->read_retries) {
3219 retry_mode++;
3220 ret = nand_setup_read_retry(chip,
3221 retry_mode);
3222 if (ret < 0)
3223 break;
3224
3225
3226 mtd->ecc_stats = ecc_stats;
3227 goto read_retry;
3228 } else {
3229
3230 ecc_fail = true;
3231 }
3232 }
3233
3234 buf += bytes;
3235 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3236 } else {
3237 memcpy(buf, chip->data_buf + col, bytes);
3238 buf += bytes;
3239 max_bitflips = max_t(unsigned int, max_bitflips,
3240 chip->pagecache.bitflips);
3241 }
3242
3243 readlen -= bytes;
3244
3245
3246 if (retry_mode) {
3247 ret = nand_setup_read_retry(chip, 0);
3248 if (ret < 0)
3249 break;
3250 retry_mode = 0;
3251 }
3252
3253 if (!readlen)
3254 break;
3255
3256
3257 col = 0;
3258
3259 realpage++;
3260
3261 page = realpage & chip->pagemask;
3262
3263 if (!page) {
3264 chipnr++;
3265 nand_deselect_target(chip);
3266 nand_select_target(chip, chipnr);
3267 }
3268 }
3269 nand_deselect_target(chip);
3270
3271 ops->retlen = ops->len - (size_t) readlen;
3272 if (oob)
3273 ops->oobretlen = ops->ooblen - oobreadlen;
3274
3275 if (ret < 0)
3276 return ret;
3277
3278 if (ecc_fail)
3279 return -EBADMSG;
3280
3281 return max_bitflips;
3282}
3283
3284
3285
3286
3287
3288
3289int nand_read_oob_std(struct nand_chip *chip, int page)
3290{
3291 struct mtd_info *mtd = nand_to_mtd(chip);
3292
3293 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3294}
3295EXPORT_SYMBOL(nand_read_oob_std);
3296
3297
3298
3299
3300
3301
3302
3303static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3304{
3305 struct mtd_info *mtd = nand_to_mtd(chip);
3306 int length = mtd->oobsize;
3307 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3308 int eccsize = chip->ecc.size;
3309 uint8_t *bufpoi = chip->oob_poi;
3310 int i, toread, sndrnd = 0, pos, ret;
3311
3312 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3313 if (ret)
3314 return ret;
3315
3316 for (i = 0; i < chip->ecc.steps; i++) {
3317 if (sndrnd) {
3318 int ret;
3319
3320 pos = eccsize + i * (eccsize + chunk);
3321 if (mtd->writesize > 512)
3322 ret = nand_change_read_column_op(chip, pos,
3323 NULL, 0,
3324 false);
3325 else
3326 ret = nand_read_page_op(chip, page, pos, NULL,
3327 0);
3328
3329 if (ret)
3330 return ret;
3331 } else
3332 sndrnd = 1;
3333 toread = min_t(int, length, chunk);
3334
3335 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3336 if (ret)
3337 return ret;
3338
3339 bufpoi += toread;
3340 length -= toread;
3341 }
3342 if (length > 0) {
3343 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3344 if (ret)
3345 return ret;
3346 }
3347
3348 return 0;
3349}
3350
3351
3352
3353
3354
3355
3356int nand_write_oob_std(struct nand_chip *chip, int page)
3357{
3358 struct mtd_info *mtd = nand_to_mtd(chip);
3359
3360 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3361 mtd->oobsize);
3362}
3363EXPORT_SYMBOL(nand_write_oob_std);
3364
3365
3366
3367
3368
3369
3370
3371static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3372{
3373 struct mtd_info *mtd = nand_to_mtd(chip);
3374 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3375 int eccsize = chip->ecc.size, length = mtd->oobsize;
3376 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3377 const uint8_t *bufpoi = chip->oob_poi;
3378
3379
3380
3381
3382
3383
3384 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3385 pos = steps * (eccsize + chunk);
3386 steps = 0;
3387 } else
3388 pos = eccsize;
3389
3390 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3391 if (ret)
3392 return ret;
3393
3394 for (i = 0; i < steps; i++) {
3395 if (sndcmd) {
3396 if (mtd->writesize <= 512) {
3397 uint32_t fill = 0xFFFFFFFF;
3398
3399 len = eccsize;
3400 while (len > 0) {
3401 int num = min_t(int, len, 4);
3402
3403 ret = nand_write_data_op(chip, &fill,
3404 num, false);
3405 if (ret)
3406 return ret;
3407
3408 len -= num;
3409 }
3410 } else {
3411 pos = eccsize + i * (eccsize + chunk);
3412 ret = nand_change_write_column_op(chip, pos,
3413 NULL, 0,
3414 false);
3415 if (ret)
3416 return ret;
3417 }
3418 } else
3419 sndcmd = 1;
3420 len = min_t(int, length, chunk);
3421
3422 ret = nand_write_data_op(chip, bufpoi, len, false);
3423 if (ret)
3424 return ret;
3425
3426 bufpoi += len;
3427 length -= len;
3428 }
3429 if (length > 0) {
3430 ret = nand_write_data_op(chip, bufpoi, length, false);
3431 if (ret)
3432 return ret;
3433 }
3434
3435 return nand_prog_page_end_op(chip);
3436}
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3447 struct mtd_oob_ops *ops)
3448{
3449 struct mtd_info *mtd = nand_to_mtd(chip);
3450 unsigned int max_bitflips = 0;
3451 int page, realpage, chipnr;
3452 struct mtd_ecc_stats stats;
3453 int readlen = ops->ooblen;
3454 int len;
3455 uint8_t *buf = ops->oobbuf;
3456 int ret = 0;
3457
3458 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3459 __func__, (unsigned long long)from, readlen);
3460
3461 stats = mtd->ecc_stats;
3462
3463 len = mtd_oobavail(mtd, ops);
3464
3465 chipnr = (int)(from >> chip->chip_shift);
3466 nand_select_target(chip, chipnr);
3467
3468
3469 realpage = (int)(from >> chip->page_shift);
3470 page = realpage & chip->pagemask;
3471
3472 while (1) {
3473 if (ops->mode == MTD_OPS_RAW)
3474 ret = chip->ecc.read_oob_raw(chip, page);
3475 else
3476 ret = chip->ecc.read_oob(chip, page);
3477
3478 if (ret < 0)
3479 break;
3480
3481 len = min(len, readlen);
3482 buf = nand_transfer_oob(chip, buf, ops, len);
3483
3484 nand_wait_readrdy(chip);
3485
3486 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3487
3488 readlen -= len;
3489 if (!readlen)
3490 break;
3491
3492
3493 realpage++;
3494
3495 page = realpage & chip->pagemask;
3496
3497 if (!page) {
3498 chipnr++;
3499 nand_deselect_target(chip);
3500 nand_select_target(chip, chipnr);
3501 }
3502 }
3503 nand_deselect_target(chip);
3504
3505 ops->oobretlen = ops->ooblen - readlen;
3506
3507 if (ret < 0)
3508 return ret;
3509
3510 if (mtd->ecc_stats.failed - stats.failed)
3511 return -EBADMSG;
3512
3513 return max_bitflips;
3514}
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3525 struct mtd_oob_ops *ops)
3526{
3527 struct nand_chip *chip = mtd_to_nand(mtd);
3528 int ret;
3529
3530 ops->retlen = 0;
3531
3532 if (ops->mode != MTD_OPS_PLACE_OOB &&
3533 ops->mode != MTD_OPS_AUTO_OOB &&
3534 ops->mode != MTD_OPS_RAW)
3535 return -ENOTSUPP;
3536
3537 ret = nand_get_device(chip);
3538 if (ret)
3539 return ret;
3540
3541 if (!ops->datbuf)
3542 ret = nand_do_read_oob(chip, from, ops);
3543 else
3544 ret = nand_do_read_ops(chip, from, ops);
3545
3546 nand_release_device(chip);
3547 return ret;
3548}
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3560 int oob_required, int page)
3561{
3562 return -ENOTSUPP;
3563}
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3575 int oob_required, int page)
3576{
3577 struct mtd_info *mtd = nand_to_mtd(chip);
3578 int ret;
3579
3580 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3581 if (ret)
3582 return ret;
3583
3584 if (oob_required) {
3585 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3586 false);
3587 if (ret)
3588 return ret;
3589 }
3590
3591 return nand_prog_page_end_op(chip);
3592}
3593EXPORT_SYMBOL(nand_write_page_raw);
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3612 int oob_required, int page)
3613{
3614 struct mtd_info *mtd = nand_to_mtd(chip);
3615 unsigned int size = mtd->writesize;
3616 u8 *write_buf = (u8 *)buf;
3617
3618 if (oob_required) {
3619 size += mtd->oobsize;
3620
3621 if (buf != chip->data_buf) {
3622 write_buf = nand_get_data_buf(chip);
3623 memcpy(write_buf, buf, mtd->writesize);
3624 }
3625 }
3626
3627 return nand_prog_page_op(chip, page, 0, write_buf, size);
3628}
3629EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3641 const uint8_t *buf, int oob_required,
3642 int page)
3643{
3644 struct mtd_info *mtd = nand_to_mtd(chip);
3645 int eccsize = chip->ecc.size;
3646 int eccbytes = chip->ecc.bytes;
3647 uint8_t *oob = chip->oob_poi;
3648 int steps, size, ret;
3649
3650 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3651 if (ret)
3652 return ret;
3653
3654 for (steps = chip->ecc.steps; steps > 0; steps--) {
3655 ret = nand_write_data_op(chip, buf, eccsize, false);
3656 if (ret)
3657 return ret;
3658
3659 buf += eccsize;
3660
3661 if (chip->ecc.prepad) {
3662 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3663 false);
3664 if (ret)
3665 return ret;
3666
3667 oob += chip->ecc.prepad;
3668 }
3669
3670 ret = nand_write_data_op(chip, oob, eccbytes, false);
3671 if (ret)
3672 return ret;
3673
3674 oob += eccbytes;
3675
3676 if (chip->ecc.postpad) {
3677 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3678 false);
3679 if (ret)
3680 return ret;
3681
3682 oob += chip->ecc.postpad;
3683 }
3684 }
3685
3686 size = mtd->oobsize - (oob - chip->oob_poi);
3687 if (size) {
3688 ret = nand_write_data_op(chip, oob, size, false);
3689 if (ret)
3690 return ret;
3691 }
3692
3693 return nand_prog_page_end_op(chip);
3694}
3695
3696
3697
3698
3699
3700
3701
3702static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3703 int oob_required, int page)
3704{
3705 struct mtd_info *mtd = nand_to_mtd(chip);
3706 int i, eccsize = chip->ecc.size, ret;
3707 int eccbytes = chip->ecc.bytes;
3708 int eccsteps = chip->ecc.steps;
3709 uint8_t *ecc_calc = chip->ecc.calc_buf;
3710 const uint8_t *p = buf;
3711
3712
3713 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3714 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3715
3716 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3717 chip->ecc.total);
3718 if (ret)
3719 return ret;
3720
3721 return chip->ecc.write_page_raw(chip, buf, 1, page);
3722}
3723
3724
3725
3726
3727
3728
3729
3730
3731static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3732 int oob_required, int page)
3733{
3734 struct mtd_info *mtd = nand_to_mtd(chip);
3735 int i, eccsize = chip->ecc.size, ret;
3736 int eccbytes = chip->ecc.bytes;
3737 int eccsteps = chip->ecc.steps;
3738 uint8_t *ecc_calc = chip->ecc.calc_buf;
3739 const uint8_t *p = buf;
3740
3741 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3742 if (ret)
3743 return ret;
3744
3745 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3746 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3747
3748 ret = nand_write_data_op(chip, p, eccsize, false);
3749 if (ret)
3750 return ret;
3751
3752 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3753 }
3754
3755 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3756 chip->ecc.total);
3757 if (ret)
3758 return ret;
3759
3760 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3761 if (ret)
3762 return ret;
3763
3764 return nand_prog_page_end_op(chip);
3765}
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3778 uint32_t data_len, const uint8_t *buf,
3779 int oob_required, int page)
3780{
3781 struct mtd_info *mtd = nand_to_mtd(chip);
3782 uint8_t *oob_buf = chip->oob_poi;
3783 uint8_t *ecc_calc = chip->ecc.calc_buf;
3784 int ecc_size = chip->ecc.size;
3785 int ecc_bytes = chip->ecc.bytes;
3786 int ecc_steps = chip->ecc.steps;
3787 uint32_t start_step = offset / ecc_size;
3788 uint32_t end_step = (offset + data_len - 1) / ecc_size;
3789 int oob_bytes = mtd->oobsize / ecc_steps;
3790 int step, ret;
3791
3792 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3793 if (ret)
3794 return ret;
3795
3796 for (step = 0; step < ecc_steps; step++) {
3797
3798 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3799
3800
3801 ret = nand_write_data_op(chip, buf, ecc_size, false);
3802 if (ret)
3803 return ret;
3804
3805
3806 if ((step < start_step) || (step > end_step))
3807 memset(ecc_calc, 0xff, ecc_bytes);
3808 else
3809 chip->ecc.calculate(chip, buf, ecc_calc);
3810
3811
3812
3813 if (!oob_required || (step < start_step) || (step > end_step))
3814 memset(oob_buf, 0xff, oob_bytes);
3815
3816 buf += ecc_size;
3817 ecc_calc += ecc_bytes;
3818 oob_buf += oob_bytes;
3819 }
3820
3821
3822
3823 ecc_calc = chip->ecc.calc_buf;
3824 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3825 chip->ecc.total);
3826 if (ret)
3827 return ret;
3828
3829
3830 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3831 if (ret)
3832 return ret;
3833
3834 return nand_prog_page_end_op(chip);
3835}
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3849 int oob_required, int page)
3850{
3851 struct mtd_info *mtd = nand_to_mtd(chip);
3852 int i, eccsize = chip->ecc.size;
3853 int eccbytes = chip->ecc.bytes;
3854 int eccsteps = chip->ecc.steps;
3855 const uint8_t *p = buf;
3856 uint8_t *oob = chip->oob_poi;
3857 int ret;
3858
3859 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3860 if (ret)
3861 return ret;
3862
3863 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3864 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3865
3866 ret = nand_write_data_op(chip, p, eccsize, false);
3867 if (ret)
3868 return ret;
3869
3870 if (chip->ecc.prepad) {
3871 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3872 false);
3873 if (ret)
3874 return ret;
3875
3876 oob += chip->ecc.prepad;
3877 }
3878
3879 chip->ecc.calculate(chip, p, oob);
3880
3881 ret = nand_write_data_op(chip, oob, eccbytes, false);
3882 if (ret)
3883 return ret;
3884
3885 oob += eccbytes;
3886
3887 if (chip->ecc.postpad) {
3888 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3889 false);
3890 if (ret)
3891 return ret;
3892
3893 oob += chip->ecc.postpad;
3894 }
3895 }
3896
3897
3898 i = mtd->oobsize - (oob - chip->oob_poi);
3899 if (i) {
3900 ret = nand_write_data_op(chip, oob, i, false);
3901 if (ret)
3902 return ret;
3903 }
3904
3905 return nand_prog_page_end_op(chip);
3906}
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3919 int data_len, const uint8_t *buf, int oob_required,
3920 int page, int raw)
3921{
3922 struct mtd_info *mtd = nand_to_mtd(chip);
3923 int status, subpage;
3924
3925 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3926 chip->ecc.write_subpage)
3927 subpage = offset || (data_len < mtd->writesize);
3928 else
3929 subpage = 0;
3930
3931 if (unlikely(raw))
3932 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3933 page);
3934 else if (subpage)
3935 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3936 oob_required, page);
3937 else
3938 status = chip->ecc.write_page(chip, buf, oob_required, page);
3939
3940 if (status < 0)
3941 return status;
3942
3943 return 0;
3944}
3945
3946#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3957 struct mtd_oob_ops *ops)
3958{
3959 struct mtd_info *mtd = nand_to_mtd(chip);
3960 int chipnr, realpage, page, column;
3961 uint32_t writelen = ops->len;
3962
3963 uint32_t oobwritelen = ops->ooblen;
3964 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3965
3966 uint8_t *oob = ops->oobbuf;
3967 uint8_t *buf = ops->datbuf;
3968 int ret;
3969 int oob_required = oob ? 1 : 0;
3970
3971 ops->retlen = 0;
3972 if (!writelen)
3973 return 0;
3974
3975
3976 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3977 pr_notice("%s: attempt to write non page aligned data\n",
3978 __func__);
3979 return -EINVAL;
3980 }
3981
3982 column = to & (mtd->writesize - 1);
3983
3984 chipnr = (int)(to >> chip->chip_shift);
3985 nand_select_target(chip, chipnr);
3986
3987
3988 if (nand_check_wp(chip)) {
3989 ret = -EIO;
3990 goto err_out;
3991 }
3992
3993 realpage = (int)(to >> chip->page_shift);
3994 page = realpage & chip->pagemask;
3995
3996
3997 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3998 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3999 chip->pagecache.page = -1;
4000
4001
4002 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4003 ret = -EINVAL;
4004 goto err_out;
4005 }
4006
4007 while (1) {
4008 int bytes = mtd->writesize;
4009 uint8_t *wbuf = buf;
4010 int use_bounce_buf;
4011 int part_pagewr = (column || writelen < mtd->writesize);
4012
4013 if (part_pagewr)
4014 use_bounce_buf = 1;
4015 else if (chip->options & NAND_USES_DMA)
4016 use_bounce_buf = !virt_addr_valid(buf) ||
4017 !IS_ALIGNED((unsigned long)buf,
4018 chip->buf_align);
4019 else
4020 use_bounce_buf = 0;
4021
4022
4023
4024
4025
4026 if (use_bounce_buf) {
4027 pr_debug("%s: using write bounce buffer for buf@%p\n",
4028 __func__, buf);
4029 if (part_pagewr)
4030 bytes = min_t(int, bytes - column, writelen);
4031 wbuf = nand_get_data_buf(chip);
4032 memset(wbuf, 0xff, mtd->writesize);
4033 memcpy(&wbuf[column], buf, bytes);
4034 }
4035
4036 if (unlikely(oob)) {
4037 size_t len = min(oobwritelen, oobmaxlen);
4038 oob = nand_fill_oob(chip, oob, len, ops);
4039 oobwritelen -= len;
4040 } else {
4041
4042 memset(chip->oob_poi, 0xff, mtd->oobsize);
4043 }
4044
4045 ret = nand_write_page(chip, column, bytes, wbuf,
4046 oob_required, page,
4047 (ops->mode == MTD_OPS_RAW));
4048 if (ret)
4049 break;
4050
4051 writelen -= bytes;
4052 if (!writelen)
4053 break;
4054
4055 column = 0;
4056 buf += bytes;
4057 realpage++;
4058
4059 page = realpage & chip->pagemask;
4060
4061 if (!page) {
4062 chipnr++;
4063 nand_deselect_target(chip);
4064 nand_select_target(chip, chipnr);
4065 }
4066 }
4067
4068 ops->retlen = ops->len - writelen;
4069 if (unlikely(oob))
4070 ops->oobretlen = ops->ooblen;
4071
4072err_out:
4073 nand_deselect_target(chip);
4074 return ret;
4075}
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4089 size_t *retlen, const uint8_t *buf)
4090{
4091 struct nand_chip *chip = mtd_to_nand(mtd);
4092 int chipnr = (int)(to >> chip->chip_shift);
4093 struct mtd_oob_ops ops;
4094 int ret;
4095
4096 nand_select_target(chip, chipnr);
4097
4098
4099 panic_nand_wait(chip, 400);
4100
4101 memset(&ops, 0, sizeof(ops));
4102 ops.len = len;
4103 ops.datbuf = (uint8_t *)buf;
4104 ops.mode = MTD_OPS_PLACE_OOB;
4105
4106 ret = nand_do_write_ops(chip, to, &ops);
4107
4108 *retlen = ops.retlen;
4109 return ret;
4110}
4111
4112
4113
4114
4115
4116
4117
4118static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4119 struct mtd_oob_ops *ops)
4120{
4121 struct nand_chip *chip = mtd_to_nand(mtd);
4122 int ret;
4123
4124 ops->retlen = 0;
4125
4126 ret = nand_get_device(chip);
4127 if (ret)
4128 return ret;
4129
4130 switch (ops->mode) {
4131 case MTD_OPS_PLACE_OOB:
4132 case MTD_OPS_AUTO_OOB:
4133 case MTD_OPS_RAW:
4134 break;
4135
4136 default:
4137 goto out;
4138 }
4139
4140 if (!ops->datbuf)
4141 ret = nand_do_write_oob(chip, to, ops);
4142 else
4143 ret = nand_do_write_ops(chip, to, ops);
4144
4145out:
4146 nand_release_device(chip);
4147 return ret;
4148}
4149
4150
4151
4152
4153
4154
4155
4156
4157static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4158{
4159 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4160}
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4171 int allowbbt)
4172{
4173 int page, pages_per_block, ret, chipnr;
4174 loff_t len;
4175
4176 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4177 __func__, (unsigned long long)instr->addr,
4178 (unsigned long long)instr->len);
4179
4180 if (check_offs_len(chip, instr->addr, instr->len))
4181 return -EINVAL;
4182
4183
4184 ret = nand_get_device(chip);
4185 if (ret)
4186 return ret;
4187
4188
4189 page = (int)(instr->addr >> chip->page_shift);
4190 chipnr = (int)(instr->addr >> chip->chip_shift);
4191
4192
4193 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4194
4195
4196 nand_select_target(chip, chipnr);
4197
4198
4199 if (nand_check_wp(chip)) {
4200 pr_debug("%s: device is write protected!\n",
4201 __func__);
4202 ret = -EIO;
4203 goto erase_exit;
4204 }
4205
4206
4207 len = instr->len;
4208
4209 while (len) {
4210
4211 if (nand_block_checkbad(chip, ((loff_t) page) <<
4212 chip->page_shift, allowbbt)) {
4213 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4214 __func__, page);
4215 ret = -EIO;
4216 goto erase_exit;
4217 }
4218
4219
4220
4221
4222
4223 if (page <= chip->pagecache.page && chip->pagecache.page <
4224 (page + pages_per_block))
4225 chip->pagecache.page = -1;
4226
4227 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4228 (chip->phys_erase_shift - chip->page_shift));
4229 if (ret) {
4230 pr_debug("%s: failed erase, page 0x%08x\n",
4231 __func__, page);
4232 instr->fail_addr =
4233 ((loff_t)page << chip->page_shift);
4234 goto erase_exit;
4235 }
4236
4237
4238 len -= (1ULL << chip->phys_erase_shift);
4239 page += pages_per_block;
4240
4241
4242 if (len && !(page & chip->pagemask)) {
4243 chipnr++;
4244 nand_deselect_target(chip);
4245 nand_select_target(chip, chipnr);
4246 }
4247 }
4248
4249 ret = 0;
4250erase_exit:
4251
4252
4253 nand_deselect_target(chip);
4254 nand_release_device(chip);
4255
4256
4257 return ret;
4258}
4259
4260
4261
4262
4263
4264
4265
4266static void nand_sync(struct mtd_info *mtd)
4267{
4268 struct nand_chip *chip = mtd_to_nand(mtd);
4269
4270 pr_debug("%s: called\n", __func__);
4271
4272
4273 WARN_ON(nand_get_device(chip));
4274
4275 nand_release_device(chip);
4276}
4277
4278
4279
4280
4281
4282
4283static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4284{
4285 struct nand_chip *chip = mtd_to_nand(mtd);
4286 int chipnr = (int)(offs >> chip->chip_shift);
4287 int ret;
4288
4289
4290 ret = nand_get_device(chip);
4291 if (ret)
4292 return ret;
4293
4294 nand_select_target(chip, chipnr);
4295
4296 ret = nand_block_checkbad(chip, offs, 0);
4297
4298 nand_deselect_target(chip);
4299 nand_release_device(chip);
4300
4301 return ret;
4302}
4303
4304
4305
4306
4307
4308
4309static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4310{
4311 int ret;
4312
4313 ret = nand_block_isbad(mtd, ofs);
4314 if (ret) {
4315
4316 if (ret > 0)
4317 return 0;
4318 return ret;
4319 }
4320
4321 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4322}
4323
4324
4325
4326
4327
4328
4329
4330static int nand_suspend(struct mtd_info *mtd)
4331{
4332 struct nand_chip *chip = mtd_to_nand(mtd);
4333 int ret = 0;
4334
4335 mutex_lock(&chip->lock);
4336 if (chip->ops.suspend)
4337 ret = chip->ops.suspend(chip);
4338 if (!ret)
4339 chip->suspended = 1;
4340 mutex_unlock(&chip->lock);
4341
4342 return ret;
4343}
4344
4345
4346
4347
4348
4349static void nand_resume(struct mtd_info *mtd)
4350{
4351 struct nand_chip *chip = mtd_to_nand(mtd);
4352
4353 mutex_lock(&chip->lock);
4354 if (chip->suspended) {
4355 if (chip->ops.resume)
4356 chip->ops.resume(chip);
4357 chip->suspended = 0;
4358 } else {
4359 pr_err("%s called for a chip which is not in suspended state\n",
4360 __func__);
4361 }
4362 mutex_unlock(&chip->lock);
4363}
4364
4365
4366
4367
4368
4369
4370static void nand_shutdown(struct mtd_info *mtd)
4371{
4372 nand_suspend(mtd);
4373}
4374
4375
4376
4377
4378
4379
4380
4381static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4382{
4383 struct nand_chip *chip = mtd_to_nand(mtd);
4384
4385 if (!chip->ops.lock_area)
4386 return -ENOTSUPP;
4387
4388 return chip->ops.lock_area(chip, ofs, len);
4389}
4390
4391
4392
4393
4394
4395
4396
4397static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4398{
4399 struct nand_chip *chip = mtd_to_nand(mtd);
4400
4401 if (!chip->ops.unlock_area)
4402 return -ENOTSUPP;
4403
4404 return chip->ops.unlock_area(chip, ofs, len);
4405}
4406
4407
4408static void nand_set_defaults(struct nand_chip *chip)
4409{
4410
4411 if (!chip->controller) {
4412 chip->controller = &chip->legacy.dummy_controller;
4413 nand_controller_init(chip->controller);
4414 }
4415
4416 nand_legacy_set_defaults(chip);
4417
4418 if (!chip->buf_align)
4419 chip->buf_align = 1;
4420}
4421
4422
4423void sanitize_string(uint8_t *s, size_t len)
4424{
4425 ssize_t i;
4426
4427
4428 s[len - 1] = 0;
4429
4430
4431 for (i = 0; i < len - 1; i++) {
4432 if (s[i] < ' ' || s[i] > 127)
4433 s[i] = '?';
4434 }
4435
4436
4437 strim(s);
4438}
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4452{
4453 int i, j;
4454 for (i = 0; i < period; i++)
4455 for (j = i + period; j < arrlen; j += period)
4456 if (id_data[i] != id_data[j])
4457 return 0;
4458 return 1;
4459}
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469static int nand_id_len(u8 *id_data, int arrlen)
4470{
4471 int last_nonzero, period;
4472
4473
4474 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4475 if (id_data[last_nonzero])
4476 break;
4477
4478
4479 if (last_nonzero < 0)
4480 return 0;
4481
4482
4483 for (period = 1; period < arrlen; period++)
4484 if (nand_id_has_period(id_data, arrlen, period))
4485 break;
4486
4487
4488 if (period < arrlen)
4489 return period;
4490
4491
4492 if (last_nonzero < arrlen - 1)
4493 return last_nonzero + 1;
4494
4495
4496 return arrlen;
4497}
4498
4499
4500static int nand_get_bits_per_cell(u8 cellinfo)
4501{
4502 int bits;
4503
4504 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4505 bits >>= NAND_CI_CELLTYPE_SHIFT;
4506 return bits + 1;
4507}
4508
4509
4510
4511
4512
4513
4514void nand_decode_ext_id(struct nand_chip *chip)
4515{
4516 struct nand_memory_organization *memorg;
4517 struct mtd_info *mtd = nand_to_mtd(chip);
4518 int extid;
4519 u8 *id_data = chip->id.data;
4520
4521 memorg = nanddev_get_memorg(&chip->base);
4522
4523
4524 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4525
4526 extid = id_data[3];
4527
4528
4529 memorg->pagesize = 1024 << (extid & 0x03);
4530 mtd->writesize = memorg->pagesize;
4531 extid >>= 2;
4532
4533 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4534 mtd->oobsize = memorg->oobsize;
4535 extid >>= 2;
4536
4537 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4538 memorg->pagesize;
4539 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4540 extid >>= 2;
4541
4542 if (extid & 0x1)
4543 chip->options |= NAND_BUSWIDTH_16;
4544}
4545EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4546
4547
4548
4549
4550
4551
4552static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4553{
4554 struct mtd_info *mtd = nand_to_mtd(chip);
4555 struct nand_memory_organization *memorg;
4556
4557 memorg = nanddev_get_memorg(&chip->base);
4558
4559 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4560 mtd->erasesize = type->erasesize;
4561 memorg->pagesize = type->pagesize;
4562 mtd->writesize = memorg->pagesize;
4563 memorg->oobsize = memorg->pagesize / 32;
4564 mtd->oobsize = memorg->oobsize;
4565
4566
4567 memorg->bits_per_cell = 1;
4568}
4569
4570
4571
4572
4573
4574
4575static void nand_decode_bbm_options(struct nand_chip *chip)
4576{
4577 struct mtd_info *mtd = nand_to_mtd(chip);
4578
4579
4580 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4581 chip->badblockpos = NAND_BBM_POS_LARGE;
4582 else
4583 chip->badblockpos = NAND_BBM_POS_SMALL;
4584}
4585
4586static inline bool is_full_id_nand(struct nand_flash_dev *type)
4587{
4588 return type->id_len;
4589}
4590
4591static bool find_full_id_nand(struct nand_chip *chip,
4592 struct nand_flash_dev *type)
4593{
4594 struct nand_device *base = &chip->base;
4595 struct nand_ecc_props requirements;
4596 struct mtd_info *mtd = nand_to_mtd(chip);
4597 struct nand_memory_organization *memorg;
4598 u8 *id_data = chip->id.data;
4599
4600 memorg = nanddev_get_memorg(&chip->base);
4601
4602 if (!strncmp(type->id, id_data, type->id_len)) {
4603 memorg->pagesize = type->pagesize;
4604 mtd->writesize = memorg->pagesize;
4605 memorg->pages_per_eraseblock = type->erasesize /
4606 type->pagesize;
4607 mtd->erasesize = type->erasesize;
4608 memorg->oobsize = type->oobsize;
4609 mtd->oobsize = memorg->oobsize;
4610
4611 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4612 memorg->eraseblocks_per_lun =
4613 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4614 memorg->pagesize *
4615 memorg->pages_per_eraseblock);
4616 chip->options |= type->options;
4617 requirements.strength = NAND_ECC_STRENGTH(type);
4618 requirements.step_size = NAND_ECC_STEP(type);
4619 nanddev_set_ecc_requirements(base, &requirements);
4620
4621 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4622 if (!chip->parameters.model)
4623 return false;
4624
4625 return true;
4626 }
4627 return false;
4628}
4629
4630
4631
4632
4633
4634
4635static void nand_manufacturer_detect(struct nand_chip *chip)
4636{
4637
4638
4639
4640
4641 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4642 chip->manufacturer.desc->ops->detect) {
4643 struct nand_memory_organization *memorg;
4644
4645 memorg = nanddev_get_memorg(&chip->base);
4646
4647
4648 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4649 chip->manufacturer.desc->ops->detect(chip);
4650 } else {
4651 nand_decode_ext_id(chip);
4652 }
4653}
4654
4655
4656
4657
4658
4659
4660
4661static int nand_manufacturer_init(struct nand_chip *chip)
4662{
4663 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4664 !chip->manufacturer.desc->ops->init)
4665 return 0;
4666
4667 return chip->manufacturer.desc->ops->init(chip);
4668}
4669
4670
4671
4672
4673
4674
4675
4676static void nand_manufacturer_cleanup(struct nand_chip *chip)
4677{
4678
4679 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4680 chip->manufacturer.desc->ops->cleanup)
4681 chip->manufacturer.desc->ops->cleanup(chip);
4682}
4683
4684static const char *
4685nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4686{
4687 return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4688}
4689
4690
4691
4692
4693static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4694{
4695 const struct nand_manufacturer_desc *manufacturer_desc;
4696 struct mtd_info *mtd = nand_to_mtd(chip);
4697 struct nand_memory_organization *memorg;
4698 int busw, ret;
4699 u8 *id_data = chip->id.data;
4700 u8 maf_id, dev_id;
4701 u64 targetsize;
4702
4703
4704
4705
4706
4707 memorg = nanddev_get_memorg(&chip->base);
4708 memorg->planes_per_lun = 1;
4709 memorg->luns_per_target = 1;
4710
4711
4712
4713
4714
4715 ret = nand_reset(chip, 0);
4716 if (ret)
4717 return ret;
4718
4719
4720 nand_select_target(chip, 0);
4721
4722
4723 ret = nand_readid_op(chip, 0, id_data, 2);
4724 if (ret)
4725 return ret;
4726
4727
4728 maf_id = id_data[0];
4729 dev_id = id_data[1];
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4740 if (ret)
4741 return ret;
4742
4743 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4744 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4745 maf_id, dev_id, id_data[0], id_data[1]);
4746 return -ENODEV;
4747 }
4748
4749 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4750
4751
4752 manufacturer_desc = nand_get_manufacturer_desc(maf_id);
4753 chip->manufacturer.desc = manufacturer_desc;
4754
4755 if (!type)
4756 type = nand_flash_ids;
4757
4758
4759
4760
4761
4762
4763
4764
4765 busw = chip->options & NAND_BUSWIDTH_16;
4766
4767
4768
4769
4770
4771 chip->options &= ~NAND_BUSWIDTH_16;
4772
4773 for (; type->name != NULL; type++) {
4774 if (is_full_id_nand(type)) {
4775 if (find_full_id_nand(chip, type))
4776 goto ident_done;
4777 } else if (dev_id == type->dev_id) {
4778 break;
4779 }
4780 }
4781
4782 if (!type->name || !type->pagesize) {
4783
4784 ret = nand_onfi_detect(chip);
4785 if (ret < 0)
4786 return ret;
4787 else if (ret)
4788 goto ident_done;
4789
4790
4791 ret = nand_jedec_detect(chip);
4792 if (ret < 0)
4793 return ret;
4794 else if (ret)
4795 goto ident_done;
4796 }
4797
4798 if (!type->name)
4799 return -ENODEV;
4800
4801 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4802 if (!chip->parameters.model)
4803 return -ENOMEM;
4804
4805 if (!type->pagesize)
4806 nand_manufacturer_detect(chip);
4807 else
4808 nand_decode_id(chip, type);
4809
4810
4811 chip->options |= type->options;
4812
4813 memorg->eraseblocks_per_lun =
4814 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4815 memorg->pagesize *
4816 memorg->pages_per_eraseblock);
4817
4818ident_done:
4819 if (!mtd->name)
4820 mtd->name = chip->parameters.model;
4821
4822 if (chip->options & NAND_BUSWIDTH_AUTO) {
4823 WARN_ON(busw & NAND_BUSWIDTH_16);
4824 nand_set_defaults(chip);
4825 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4826
4827
4828
4829
4830 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4831 maf_id, dev_id);
4832 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4833 mtd->name);
4834 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4835 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4836 ret = -EINVAL;
4837
4838 goto free_detect_allocation;
4839 }
4840
4841 nand_decode_bbm_options(chip);
4842
4843
4844 chip->page_shift = ffs(mtd->writesize) - 1;
4845
4846 targetsize = nanddev_target_size(&chip->base);
4847 chip->pagemask = (targetsize >> chip->page_shift) - 1;
4848
4849 chip->bbt_erase_shift = chip->phys_erase_shift =
4850 ffs(mtd->erasesize) - 1;
4851 if (targetsize & 0xffffffff)
4852 chip->chip_shift = ffs((unsigned)targetsize) - 1;
4853 else {
4854 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4855 chip->chip_shift += 32 - 1;
4856 }
4857
4858 if (chip->chip_shift - chip->page_shift > 16)
4859 chip->options |= NAND_ROW_ADDR_3;
4860
4861 chip->badblockbits = 8;
4862
4863 nand_legacy_adjust_cmdfunc(chip);
4864
4865 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4866 maf_id, dev_id);
4867 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4868 chip->parameters.model);
4869 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4870 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4871 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4872 return 0;
4873
4874free_detect_allocation:
4875 kfree(chip->parameters.model);
4876
4877 return ret;
4878}
4879
4880static enum nand_ecc_engine_type
4881of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
4882{
4883 enum nand_ecc_legacy_mode {
4884 NAND_ECC_INVALID,
4885 NAND_ECC_NONE,
4886 NAND_ECC_SOFT,
4887 NAND_ECC_SOFT_BCH,
4888 NAND_ECC_HW,
4889 NAND_ECC_HW_SYNDROME,
4890 NAND_ECC_ON_DIE,
4891 };
4892 const char * const nand_ecc_legacy_modes[] = {
4893 [NAND_ECC_NONE] = "none",
4894 [NAND_ECC_SOFT] = "soft",
4895 [NAND_ECC_SOFT_BCH] = "soft_bch",
4896 [NAND_ECC_HW] = "hw",
4897 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4898 [NAND_ECC_ON_DIE] = "on-die",
4899 };
4900 enum nand_ecc_legacy_mode eng_type;
4901 const char *pm;
4902 int err;
4903
4904 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4905 if (err)
4906 return NAND_ECC_ENGINE_TYPE_INVALID;
4907
4908 for (eng_type = NAND_ECC_NONE;
4909 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
4910 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
4911 switch (eng_type) {
4912 case NAND_ECC_NONE:
4913 return NAND_ECC_ENGINE_TYPE_NONE;
4914 case NAND_ECC_SOFT:
4915 case NAND_ECC_SOFT_BCH:
4916 return NAND_ECC_ENGINE_TYPE_SOFT;
4917 case NAND_ECC_HW:
4918 case NAND_ECC_HW_SYNDROME:
4919 return NAND_ECC_ENGINE_TYPE_ON_HOST;
4920 case NAND_ECC_ON_DIE:
4921 return NAND_ECC_ENGINE_TYPE_ON_DIE;
4922 default:
4923 break;
4924 }
4925 }
4926 }
4927
4928 return NAND_ECC_ENGINE_TYPE_INVALID;
4929}
4930
4931static enum nand_ecc_placement
4932of_get_rawnand_ecc_placement_legacy(struct device_node *np)
4933{
4934 const char *pm;
4935 int err;
4936
4937 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4938 if (!err) {
4939 if (!strcasecmp(pm, "hw_syndrome"))
4940 return NAND_ECC_PLACEMENT_INTERLEAVED;
4941 }
4942
4943 return NAND_ECC_PLACEMENT_UNKNOWN;
4944}
4945
4946static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
4947{
4948 const char *pm;
4949 int err;
4950
4951 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4952 if (!err) {
4953 if (!strcasecmp(pm, "soft"))
4954 return NAND_ECC_ALGO_HAMMING;
4955 else if (!strcasecmp(pm, "soft_bch"))
4956 return NAND_ECC_ALGO_BCH;
4957 }
4958
4959 return NAND_ECC_ALGO_UNKNOWN;
4960}
4961
4962static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
4963{
4964 struct device_node *dn = nand_get_flash_node(chip);
4965 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
4966
4967 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
4968 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
4969
4970 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
4971 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
4972
4973 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
4974 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
4975}
4976
4977static int of_get_nand_bus_width(struct device_node *np)
4978{
4979 u32 val;
4980
4981 if (of_property_read_u32(np, "nand-bus-width", &val))
4982 return 8;
4983
4984 switch (val) {
4985 case 8:
4986 case 16:
4987 return val;
4988 default:
4989 return -EIO;
4990 }
4991}
4992
4993static bool of_get_nand_on_flash_bbt(struct device_node *np)
4994{
4995 return of_property_read_bool(np, "nand-on-flash-bbt");
4996}
4997
4998static int rawnand_dt_init(struct nand_chip *chip)
4999{
5000 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5001 struct device_node *dn = nand_get_flash_node(chip);
5002
5003 if (!dn)
5004 return 0;
5005
5006 if (of_get_nand_bus_width(dn) == 16)
5007 chip->options |= NAND_BUSWIDTH_16;
5008
5009 if (of_property_read_bool(dn, "nand-is-boot-medium"))
5010 chip->options |= NAND_IS_BOOT_MEDIUM;
5011
5012 if (of_get_nand_on_flash_bbt(dn))
5013 chip->bbt_options |= NAND_BBT_USE_FLASH;
5014
5015 of_get_nand_ecc_user_config(nand);
5016 of_get_nand_ecc_legacy_user_config(chip);
5017
5018
5019
5020
5021
5022 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5023
5024
5025
5026
5027
5028
5029 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5030 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5031 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5032 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5033
5034 chip->ecc.placement = nand->ecc.user_conf.placement;
5035 chip->ecc.algo = nand->ecc.user_conf.algo;
5036 chip->ecc.strength = nand->ecc.user_conf.strength;
5037 chip->ecc.size = nand->ecc.user_conf.step_size;
5038
5039 return 0;
5040}
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5057 struct nand_flash_dev *table)
5058{
5059 struct mtd_info *mtd = nand_to_mtd(chip);
5060 struct nand_memory_organization *memorg;
5061 int nand_maf_id, nand_dev_id;
5062 unsigned int i;
5063 int ret;
5064
5065 memorg = nanddev_get_memorg(&chip->base);
5066
5067
5068 chip->cur_cs = -1;
5069
5070 mutex_init(&chip->lock);
5071
5072
5073 chip->current_interface_config = nand_get_reset_interface_config();
5074
5075 ret = rawnand_dt_init(chip);
5076 if (ret)
5077 return ret;
5078
5079 if (!mtd->name && mtd->dev.parent)
5080 mtd->name = dev_name(mtd->dev.parent);
5081
5082
5083 nand_set_defaults(chip);
5084
5085 ret = nand_legacy_check_hooks(chip);
5086 if (ret)
5087 return ret;
5088
5089 memorg->ntargets = maxchips;
5090
5091
5092 ret = nand_detect(chip, table);
5093 if (ret) {
5094 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5095 pr_warn("No NAND device found\n");
5096 nand_deselect_target(chip);
5097 return ret;
5098 }
5099
5100 nand_maf_id = chip->id.data[0];
5101 nand_dev_id = chip->id.data[1];
5102
5103 nand_deselect_target(chip);
5104
5105
5106 for (i = 1; i < maxchips; i++) {
5107 u8 id[2];
5108
5109
5110 ret = nand_reset(chip, i);
5111 if (ret)
5112 break;
5113
5114 nand_select_target(chip, i);
5115
5116 ret = nand_readid_op(chip, 0, id, sizeof(id));
5117 if (ret)
5118 break;
5119
5120 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5121 nand_deselect_target(chip);
5122 break;
5123 }
5124 nand_deselect_target(chip);
5125 }
5126 if (i > 1)
5127 pr_info("%d chips detected\n", i);
5128
5129
5130 memorg->ntargets = i;
5131 mtd->size = i * nanddev_target_size(&chip->base);
5132
5133 return 0;
5134}
5135
5136static void nand_scan_ident_cleanup(struct nand_chip *chip)
5137{
5138 kfree(chip->parameters.model);
5139 kfree(chip->parameters.onfi);
5140}
5141
5142static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5143{
5144 struct nand_ecc_ctrl *ecc = &chip->ecc;
5145
5146 switch (ecc->placement) {
5147 case NAND_ECC_PLACEMENT_UNKNOWN:
5148 case NAND_ECC_PLACEMENT_OOB:
5149
5150 if (!ecc->read_page)
5151 ecc->read_page = nand_read_page_hwecc;
5152 if (!ecc->write_page)
5153 ecc->write_page = nand_write_page_hwecc;
5154 if (!ecc->read_page_raw)
5155 ecc->read_page_raw = nand_read_page_raw;
5156 if (!ecc->write_page_raw)
5157 ecc->write_page_raw = nand_write_page_raw;
5158 if (!ecc->read_oob)
5159 ecc->read_oob = nand_read_oob_std;
5160 if (!ecc->write_oob)
5161 ecc->write_oob = nand_write_oob_std;
5162 if (!ecc->read_subpage)
5163 ecc->read_subpage = nand_read_subpage;
5164 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5165 ecc->write_subpage = nand_write_subpage_hwecc;
5166 fallthrough;
5167
5168 case NAND_ECC_PLACEMENT_INTERLEAVED:
5169 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5170 (!ecc->read_page ||
5171 ecc->read_page == nand_read_page_hwecc ||
5172 !ecc->write_page ||
5173 ecc->write_page == nand_write_page_hwecc)) {
5174 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5175 return -EINVAL;
5176 }
5177
5178 if (!ecc->read_page)
5179 ecc->read_page = nand_read_page_syndrome;
5180 if (!ecc->write_page)
5181 ecc->write_page = nand_write_page_syndrome;
5182 if (!ecc->read_page_raw)
5183 ecc->read_page_raw = nand_read_page_raw_syndrome;
5184 if (!ecc->write_page_raw)
5185 ecc->write_page_raw = nand_write_page_raw_syndrome;
5186 if (!ecc->read_oob)
5187 ecc->read_oob = nand_read_oob_syndrome;
5188 if (!ecc->write_oob)
5189 ecc->write_oob = nand_write_oob_syndrome;
5190 break;
5191
5192 default:
5193 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5194 ecc->placement);
5195 return -EINVAL;
5196 }
5197
5198 return 0;
5199}
5200
5201static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5202{
5203 struct mtd_info *mtd = nand_to_mtd(chip);
5204 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5205 struct nand_ecc_ctrl *ecc = &chip->ecc;
5206
5207 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5208 return -EINVAL;
5209
5210 switch (ecc->algo) {
5211 case NAND_ECC_ALGO_HAMMING:
5212 ecc->calculate = nand_calculate_ecc;
5213 ecc->correct = nand_correct_data;
5214 ecc->read_page = nand_read_page_swecc;
5215 ecc->read_subpage = nand_read_subpage;
5216 ecc->write_page = nand_write_page_swecc;
5217 if (!ecc->read_page_raw)
5218 ecc->read_page_raw = nand_read_page_raw;
5219 if (!ecc->write_page_raw)
5220 ecc->write_page_raw = nand_write_page_raw;
5221 ecc->read_oob = nand_read_oob_std;
5222 ecc->write_oob = nand_write_oob_std;
5223 if (!ecc->size)
5224 ecc->size = 256;
5225 ecc->bytes = 3;
5226 ecc->strength = 1;
5227
5228 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5229 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5230
5231 return 0;
5232 case NAND_ECC_ALGO_BCH:
5233 if (!mtd_nand_has_bch()) {
5234 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5235 return -EINVAL;
5236 }
5237 ecc->calculate = nand_bch_calculate_ecc;
5238 ecc->correct = nand_bch_correct_data;
5239 ecc->read_page = nand_read_page_swecc;
5240 ecc->read_subpage = nand_read_subpage;
5241 ecc->write_page = nand_write_page_swecc;
5242 if (!ecc->read_page_raw)
5243 ecc->read_page_raw = nand_read_page_raw;
5244 if (!ecc->write_page_raw)
5245 ecc->write_page_raw = nand_write_page_raw;
5246 ecc->read_oob = nand_read_oob_std;
5247 ecc->write_oob = nand_write_oob_std;
5248
5249
5250
5251
5252
5253
5254 if (!ecc->size && (mtd->oobsize >= 64)) {
5255 ecc->size = 512;
5256 ecc->strength = 4;
5257 }
5258
5259
5260
5261
5262
5263 if (!mtd->ooblayout) {
5264
5265 if (mtd->oobsize < 64) {
5266 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5267 return -EINVAL;
5268 }
5269
5270 mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
5271
5272 }
5273
5274
5275
5276
5277
5278
5279 if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
5280 nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
5281 int steps, bytes;
5282
5283
5284 ecc->size = 1024;
5285 steps = mtd->writesize / ecc->size;
5286
5287
5288 bytes = (mtd->oobsize - 2) / steps;
5289 ecc->strength = bytes * 8 / fls(8 * ecc->size);
5290 }
5291
5292
5293 ecc->bytes = 0;
5294 ecc->priv = nand_bch_init(mtd);
5295 if (!ecc->priv) {
5296 WARN(1, "BCH ECC initialization failed!\n");
5297 return -EINVAL;
5298 }
5299 return 0;
5300 default:
5301 WARN(1, "Unsupported ECC algorithm!\n");
5302 return -EINVAL;
5303 }
5304}
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316static int
5317nand_check_ecc_caps(struct nand_chip *chip,
5318 const struct nand_ecc_caps *caps, int oobavail)
5319{
5320 struct mtd_info *mtd = nand_to_mtd(chip);
5321 const struct nand_ecc_step_info *stepinfo;
5322 int preset_step = chip->ecc.size;
5323 int preset_strength = chip->ecc.strength;
5324 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5325 int i, j;
5326
5327 for (i = 0; i < caps->nstepinfos; i++) {
5328 stepinfo = &caps->stepinfos[i];
5329
5330 if (stepinfo->stepsize != preset_step)
5331 continue;
5332
5333 for (j = 0; j < stepinfo->nstrengths; j++) {
5334 if (stepinfo->strengths[j] != preset_strength)
5335 continue;
5336
5337 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5338 preset_strength);
5339 if (WARN_ON_ONCE(ecc_bytes < 0))
5340 return ecc_bytes;
5341
5342 if (ecc_bytes * nsteps > oobavail) {
5343 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5344 preset_step, preset_strength);
5345 return -ENOSPC;
5346 }
5347
5348 chip->ecc.bytes = ecc_bytes;
5349
5350 return 0;
5351 }
5352 }
5353
5354 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5355 preset_step, preset_strength);
5356
5357 return -ENOTSUPP;
5358}
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370static int
5371nand_match_ecc_req(struct nand_chip *chip,
5372 const struct nand_ecc_caps *caps, int oobavail)
5373{
5374 const struct nand_ecc_props *requirements =
5375 nanddev_get_ecc_requirements(&chip->base);
5376 struct mtd_info *mtd = nand_to_mtd(chip);
5377 const struct nand_ecc_step_info *stepinfo;
5378 int req_step = requirements->step_size;
5379 int req_strength = requirements->strength;
5380 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5381 int best_step, best_strength, best_ecc_bytes;
5382 int best_ecc_bytes_total = INT_MAX;
5383 int i, j;
5384
5385
5386 if (!req_step || !req_strength)
5387 return -ENOTSUPP;
5388
5389
5390 req_corr = mtd->writesize / req_step * req_strength;
5391
5392 for (i = 0; i < caps->nstepinfos; i++) {
5393 stepinfo = &caps->stepinfos[i];
5394 step_size = stepinfo->stepsize;
5395
5396 for (j = 0; j < stepinfo->nstrengths; j++) {
5397 strength = stepinfo->strengths[j];
5398
5399
5400
5401
5402
5403
5404 if (step_size < req_step && strength < req_strength)
5405 continue;
5406
5407 if (mtd->writesize % step_size)
5408 continue;
5409
5410 nsteps = mtd->writesize / step_size;
5411
5412 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5413 if (WARN_ON_ONCE(ecc_bytes < 0))
5414 continue;
5415 ecc_bytes_total = ecc_bytes * nsteps;
5416
5417 if (ecc_bytes_total > oobavail ||
5418 strength * nsteps < req_corr)
5419 continue;
5420
5421
5422
5423
5424
5425 if (ecc_bytes_total < best_ecc_bytes_total) {
5426 best_ecc_bytes_total = ecc_bytes_total;
5427 best_step = step_size;
5428 best_strength = strength;
5429 best_ecc_bytes = ecc_bytes;
5430 }
5431 }
5432 }
5433
5434 if (best_ecc_bytes_total == INT_MAX)
5435 return -ENOTSUPP;
5436
5437 chip->ecc.size = best_step;
5438 chip->ecc.strength = best_strength;
5439 chip->ecc.bytes = best_ecc_bytes;
5440
5441 return 0;
5442}
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453static int
5454nand_maximize_ecc(struct nand_chip *chip,
5455 const struct nand_ecc_caps *caps, int oobavail)
5456{
5457 struct mtd_info *mtd = nand_to_mtd(chip);
5458 const struct nand_ecc_step_info *stepinfo;
5459 int step_size, strength, nsteps, ecc_bytes, corr;
5460 int best_corr = 0;
5461 int best_step = 0;
5462 int best_strength, best_ecc_bytes;
5463 int i, j;
5464
5465 for (i = 0; i < caps->nstepinfos; i++) {
5466 stepinfo = &caps->stepinfos[i];
5467 step_size = stepinfo->stepsize;
5468
5469
5470 if (chip->ecc.size && step_size != chip->ecc.size)
5471 continue;
5472
5473 for (j = 0; j < stepinfo->nstrengths; j++) {
5474 strength = stepinfo->strengths[j];
5475
5476 if (mtd->writesize % step_size)
5477 continue;
5478
5479 nsteps = mtd->writesize / step_size;
5480
5481 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5482 if (WARN_ON_ONCE(ecc_bytes < 0))
5483 continue;
5484
5485 if (ecc_bytes * nsteps > oobavail)
5486 continue;
5487
5488 corr = strength * nsteps;
5489
5490
5491
5492
5493
5494 if (corr > best_corr ||
5495 (corr == best_corr && step_size > best_step)) {
5496 best_corr = corr;
5497 best_step = step_size;
5498 best_strength = strength;
5499 best_ecc_bytes = ecc_bytes;
5500 }
5501 }
5502 }
5503
5504 if (!best_corr)
5505 return -ENOTSUPP;
5506
5507 chip->ecc.size = best_step;
5508 chip->ecc.strength = best_strength;
5509 chip->ecc.bytes = best_ecc_bytes;
5510
5511 return 0;
5512}
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532int nand_ecc_choose_conf(struct nand_chip *chip,
5533 const struct nand_ecc_caps *caps, int oobavail)
5534{
5535 struct mtd_info *mtd = nand_to_mtd(chip);
5536 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5537
5538 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5539 return -EINVAL;
5540
5541 if (chip->ecc.size && chip->ecc.strength)
5542 return nand_check_ecc_caps(chip, caps, oobavail);
5543
5544 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5545 return nand_maximize_ecc(chip, caps, oobavail);
5546
5547 if (!nand_match_ecc_req(chip, caps, oobavail))
5548 return 0;
5549
5550 return nand_maximize_ecc(chip, caps, oobavail);
5551}
5552EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5553
5554static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5555{
5556 struct nand_chip *chip = container_of(nand, struct nand_chip,
5557 base);
5558 unsigned int eb = nanddev_pos_to_row(nand, pos);
5559 int ret;
5560
5561 eb >>= nand->rowconv.eraseblock_addr_shift;
5562
5563 nand_select_target(chip, pos->target);
5564 ret = nand_erase_op(chip, eb);
5565 nand_deselect_target(chip);
5566
5567 return ret;
5568}
5569
5570static int rawnand_markbad(struct nand_device *nand,
5571 const struct nand_pos *pos)
5572{
5573 struct nand_chip *chip = container_of(nand, struct nand_chip,
5574 base);
5575
5576 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5577}
5578
5579static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5580{
5581 struct nand_chip *chip = container_of(nand, struct nand_chip,
5582 base);
5583 int ret;
5584
5585 nand_select_target(chip, pos->target);
5586 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5587 nand_deselect_target(chip);
5588
5589 return ret;
5590}
5591
5592static const struct nand_ops rawnand_ops = {
5593 .erase = rawnand_erase,
5594 .markbad = rawnand_markbad,
5595 .isbad = rawnand_isbad,
5596};
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606static int nand_scan_tail(struct nand_chip *chip)
5607{
5608 struct mtd_info *mtd = nand_to_mtd(chip);
5609 struct nand_ecc_ctrl *ecc = &chip->ecc;
5610 int ret, i;
5611
5612
5613 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5614 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5615 return -EINVAL;
5616 }
5617
5618 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5619 if (!chip->data_buf)
5620 return -ENOMEM;
5621
5622
5623
5624
5625
5626
5627
5628 nand_select_target(chip, 0);
5629 ret = nand_manufacturer_init(chip);
5630 nand_deselect_target(chip);
5631 if (ret)
5632 goto err_free_buf;
5633
5634
5635 chip->oob_poi = chip->data_buf + mtd->writesize;
5636
5637
5638
5639
5640 if (!mtd->ooblayout &&
5641 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5642 ecc->algo == NAND_ECC_ALGO_BCH)) {
5643 switch (mtd->oobsize) {
5644 case 8:
5645 case 16:
5646 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
5647 break;
5648 case 64:
5649 case 128:
5650 mtd_set_ooblayout(mtd,
5651 nand_get_large_page_hamming_ooblayout());
5652 break;
5653 default:
5654
5655
5656
5657
5658
5659
5660
5661 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
5662 mtd_set_ooblayout(mtd,
5663 nand_get_large_page_ooblayout());
5664 break;
5665 }
5666
5667 WARN(1, "No oob scheme defined for oobsize %d\n",
5668 mtd->oobsize);
5669 ret = -EINVAL;
5670 goto err_nand_manuf_cleanup;
5671 }
5672 }
5673
5674
5675
5676
5677
5678
5679 switch (ecc->engine_type) {
5680 case NAND_ECC_ENGINE_TYPE_ON_HOST:
5681 ret = nand_set_ecc_on_host_ops(chip);
5682 if (ret)
5683 goto err_nand_manuf_cleanup;
5684
5685 if (mtd->writesize >= ecc->size) {
5686 if (!ecc->strength) {
5687 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5688 ret = -EINVAL;
5689 goto err_nand_manuf_cleanup;
5690 }
5691 break;
5692 }
5693 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5694 ecc->size, mtd->writesize);
5695 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5696 ecc->algo = NAND_ECC_ALGO_HAMMING;
5697 fallthrough;
5698
5699 case NAND_ECC_ENGINE_TYPE_SOFT:
5700 ret = nand_set_ecc_soft_ops(chip);
5701 if (ret)
5702 goto err_nand_manuf_cleanup;
5703 break;
5704
5705 case NAND_ECC_ENGINE_TYPE_ON_DIE:
5706 if (!ecc->read_page || !ecc->write_page) {
5707 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5708 ret = -EINVAL;
5709 goto err_nand_manuf_cleanup;
5710 }
5711 if (!ecc->read_oob)
5712 ecc->read_oob = nand_read_oob_std;
5713 if (!ecc->write_oob)
5714 ecc->write_oob = nand_write_oob_std;
5715 break;
5716
5717 case NAND_ECC_ENGINE_TYPE_NONE:
5718 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
5719 ecc->read_page = nand_read_page_raw;
5720 ecc->write_page = nand_write_page_raw;
5721 ecc->read_oob = nand_read_oob_std;
5722 ecc->read_page_raw = nand_read_page_raw;
5723 ecc->write_page_raw = nand_write_page_raw;
5724 ecc->write_oob = nand_write_oob_std;
5725 ecc->size = mtd->writesize;
5726 ecc->bytes = 0;
5727 ecc->strength = 0;
5728 break;
5729
5730 default:
5731 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
5732 ret = -EINVAL;
5733 goto err_nand_manuf_cleanup;
5734 }
5735
5736 if (ecc->correct || ecc->calculate) {
5737 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5738 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5739 if (!ecc->calc_buf || !ecc->code_buf) {
5740 ret = -ENOMEM;
5741 goto err_nand_manuf_cleanup;
5742 }
5743 }
5744
5745
5746 if (!ecc->read_oob_raw)
5747 ecc->read_oob_raw = ecc->read_oob;
5748 if (!ecc->write_oob_raw)
5749 ecc->write_oob_raw = ecc->write_oob;
5750
5751
5752 mtd->ecc_strength = ecc->strength;
5753 mtd->ecc_step_size = ecc->size;
5754
5755
5756
5757
5758
5759 ecc->steps = mtd->writesize / ecc->size;
5760 if (ecc->steps * ecc->size != mtd->writesize) {
5761 WARN(1, "Invalid ECC parameters\n");
5762 ret = -EINVAL;
5763 goto err_nand_manuf_cleanup;
5764 }
5765
5766 ecc->total = ecc->steps * ecc->bytes;
5767 chip->base.ecc.ctx.total = ecc->total;
5768
5769 if (ecc->total > mtd->oobsize) {
5770 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5771 ret = -EINVAL;
5772 goto err_nand_manuf_cleanup;
5773 }
5774
5775
5776
5777
5778
5779 ret = mtd_ooblayout_count_freebytes(mtd);
5780 if (ret < 0)
5781 ret = 0;
5782
5783 mtd->oobavail = ret;
5784
5785
5786 if (!nand_ecc_is_strong_enough(&chip->base))
5787 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
5788 mtd->name, chip->ecc.strength, chip->ecc.size,
5789 nanddev_get_ecc_requirements(&chip->base)->strength,
5790 nanddev_get_ecc_requirements(&chip->base)->step_size);
5791
5792
5793 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5794 switch (ecc->steps) {
5795 case 2:
5796 mtd->subpage_sft = 1;
5797 break;
5798 case 4:
5799 case 8:
5800 case 16:
5801 mtd->subpage_sft = 2;
5802 break;
5803 }
5804 }
5805 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5806
5807
5808 chip->pagecache.page = -1;
5809
5810
5811 switch (ecc->engine_type) {
5812 case NAND_ECC_ENGINE_TYPE_SOFT:
5813 if (chip->page_shift > 9)
5814 chip->options |= NAND_SUBPAGE_READ;
5815 break;
5816
5817 default:
5818 break;
5819 }
5820
5821 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5822 if (ret)
5823 goto err_nand_manuf_cleanup;
5824
5825
5826 if (chip->options & NAND_ROM)
5827 mtd->flags = MTD_CAP_ROM;
5828
5829
5830 mtd->_erase = nand_erase;
5831 mtd->_point = NULL;
5832 mtd->_unpoint = NULL;
5833 mtd->_panic_write = panic_nand_write;
5834 mtd->_read_oob = nand_read_oob;
5835 mtd->_write_oob = nand_write_oob;
5836 mtd->_sync = nand_sync;
5837 mtd->_lock = nand_lock;
5838 mtd->_unlock = nand_unlock;
5839 mtd->_suspend = nand_suspend;
5840 mtd->_resume = nand_resume;
5841 mtd->_reboot = nand_shutdown;
5842 mtd->_block_isreserved = nand_block_isreserved;
5843 mtd->_block_isbad = nand_block_isbad;
5844 mtd->_block_markbad = nand_block_markbad;
5845 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5846
5847
5848
5849
5850
5851
5852 if (!mtd->bitflip_threshold)
5853 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5854
5855
5856 ret = nand_choose_interface_config(chip);
5857 if (ret)
5858 goto err_nanddev_cleanup;
5859
5860
5861 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5862 ret = nand_setup_interface(chip, i);
5863 if (ret)
5864 goto err_free_interface_config;
5865 }
5866
5867
5868 if (chip->options & NAND_SKIP_BBTSCAN)
5869 return 0;
5870
5871
5872 ret = nand_create_bbt(chip);
5873 if (ret)
5874 goto err_free_interface_config;
5875
5876 return 0;
5877
5878err_free_interface_config:
5879 kfree(chip->best_interface_config);
5880
5881err_nanddev_cleanup:
5882 nanddev_cleanup(&chip->base);
5883
5884err_nand_manuf_cleanup:
5885 nand_manufacturer_cleanup(chip);
5886
5887err_free_buf:
5888 kfree(chip->data_buf);
5889 kfree(ecc->code_buf);
5890 kfree(ecc->calc_buf);
5891
5892 return ret;
5893}
5894
5895static int nand_attach(struct nand_chip *chip)
5896{
5897 if (chip->controller->ops && chip->controller->ops->attach_chip)
5898 return chip->controller->ops->attach_chip(chip);
5899
5900 return 0;
5901}
5902
5903static void nand_detach(struct nand_chip *chip)
5904{
5905 if (chip->controller->ops && chip->controller->ops->detach_chip)
5906 chip->controller->ops->detach_chip(chip);
5907}
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5920 struct nand_flash_dev *ids)
5921{
5922 int ret;
5923
5924 if (!maxchips)
5925 return -EINVAL;
5926
5927 ret = nand_scan_ident(chip, maxchips, ids);
5928 if (ret)
5929 return ret;
5930
5931 ret = nand_attach(chip);
5932 if (ret)
5933 goto cleanup_ident;
5934
5935 ret = nand_scan_tail(chip);
5936 if (ret)
5937 goto detach_chip;
5938
5939 return 0;
5940
5941detach_chip:
5942 nand_detach(chip);
5943cleanup_ident:
5944 nand_scan_ident_cleanup(chip);
5945
5946 return ret;
5947}
5948EXPORT_SYMBOL(nand_scan_with_ids);
5949
5950
5951
5952
5953
5954void nand_cleanup(struct nand_chip *chip)
5955{
5956 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5957 chip->ecc.algo == NAND_ECC_ALGO_BCH)
5958 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5959
5960 nanddev_cleanup(&chip->base);
5961
5962
5963 kfree(chip->bbt);
5964 kfree(chip->data_buf);
5965 kfree(chip->ecc.code_buf);
5966 kfree(chip->ecc.calc_buf);
5967
5968
5969 if (chip->badblock_pattern && chip->badblock_pattern->options
5970 & NAND_BBT_DYNAMICSTRUCT)
5971 kfree(chip->badblock_pattern);
5972
5973
5974 kfree(chip->best_interface_config);
5975
5976
5977 nand_manufacturer_cleanup(chip);
5978
5979
5980 nand_detach(chip);
5981
5982
5983 nand_scan_ident_cleanup(chip);
5984}
5985
5986EXPORT_SYMBOL_GPL(nand_cleanup);
5987
5988MODULE_LICENSE("GPL");
5989MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5990MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5991MODULE_DESCRIPTION("Generic NAND flash driver code");
5992