1
2
3
4
5
6
7
8
9
10#ifndef __LINUX_MTD_NAND_H
11#define __LINUX_MTD_NAND_H
12
13#include <linux/mtd/mtd.h>
14
15struct nand_device;
16
17
18
19
20
21
22
23
24
25
26
27
28
29struct nand_memory_organization {
30 unsigned int bits_per_cell;
31 unsigned int pagesize;
32 unsigned int oobsize;
33 unsigned int pages_per_eraseblock;
34 unsigned int eraseblocks_per_lun;
35 unsigned int max_bad_eraseblocks_per_lun;
36 unsigned int planes_per_lun;
37 unsigned int luns_per_target;
38 unsigned int ntargets;
39};
40
41#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
42 { \
43 .bits_per_cell = (bpc), \
44 .pagesize = (ps), \
45 .oobsize = (os), \
46 .pages_per_eraseblock = (ppe), \
47 .eraseblocks_per_lun = (epl), \
48 .max_bad_eraseblocks_per_lun = (mbb), \
49 .planes_per_lun = (ppl), \
50 .luns_per_target = (lpt), \
51 .ntargets = (nt), \
52 }
53
54
55
56
57
58
59
60
61struct nand_row_converter {
62 unsigned int lun_addr_shift;
63 unsigned int eraseblock_addr_shift;
64};
65
66
67
68
69
70
71
72
73
74
75
76
77struct nand_pos {
78 unsigned int target;
79 unsigned int lun;
80 unsigned int plane;
81 unsigned int eraseblock;
82 unsigned int page;
83};
84
85
86
87
88
89
90enum nand_page_io_req_type {
91 NAND_PAGE_READ = 0,
92 NAND_PAGE_WRITE,
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112struct nand_page_io_req {
113 enum nand_page_io_req_type type;
114 struct nand_pos pos;
115 unsigned int dataoffs;
116 unsigned int datalen;
117 union {
118 const void *out;
119 void *in;
120 } databuf;
121 unsigned int ooboffs;
122 unsigned int ooblen;
123 union {
124 const void *out;
125 void *in;
126 } oobbuf;
127 int mode;
128};
129
130const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
131const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
132const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
133
134
135
136
137
138
139
140
141
142enum nand_ecc_engine_type {
143 NAND_ECC_ENGINE_TYPE_INVALID,
144 NAND_ECC_ENGINE_TYPE_NONE,
145 NAND_ECC_ENGINE_TYPE_SOFT,
146 NAND_ECC_ENGINE_TYPE_ON_HOST,
147 NAND_ECC_ENGINE_TYPE_ON_DIE,
148};
149
150
151
152
153
154
155
156
157
158enum nand_ecc_placement {
159 NAND_ECC_PLACEMENT_UNKNOWN,
160 NAND_ECC_PLACEMENT_OOB,
161 NAND_ECC_PLACEMENT_INTERLEAVED,
162};
163
164
165
166
167
168
169
170
171enum nand_ecc_algo {
172 NAND_ECC_ALGO_UNKNOWN,
173 NAND_ECC_ALGO_HAMMING,
174 NAND_ECC_ALGO_BCH,
175 NAND_ECC_ALGO_RS,
176};
177
178
179
180
181
182
183
184
185
186
187struct nand_ecc_props {
188 enum nand_ecc_engine_type engine_type;
189 enum nand_ecc_placement placement;
190 enum nand_ecc_algo algo;
191 unsigned int strength;
192 unsigned int step_size;
193 unsigned int flags;
194};
195
196#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
197
198
199#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
200
201
202
203
204
205struct nand_bbt {
206 unsigned long *cache;
207};
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct nand_ops {
226 int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
227 int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
228 bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
229};
230
231
232
233
234
235
236
237
238
239struct nand_ecc_context {
240 struct nand_ecc_props conf;
241 unsigned int nsteps;
242 unsigned int total;
243 void *priv;
244};
245
246
247
248
249
250
251
252
253
254
255
256
257struct nand_ecc_engine_ops {
258 int (*init_ctx)(struct nand_device *nand);
259 void (*cleanup_ctx)(struct nand_device *nand);
260 int (*prepare_io_req)(struct nand_device *nand,
261 struct nand_page_io_req *req);
262 int (*finish_io_req)(struct nand_device *nand,
263 struct nand_page_io_req *req);
264};
265
266
267
268
269
270struct nand_ecc_engine {
271 struct nand_ecc_engine_ops *ops;
272};
273
274void of_get_nand_ecc_user_config(struct nand_device *nand);
275int nand_ecc_init_ctx(struct nand_device *nand);
276void nand_ecc_cleanup_ctx(struct nand_device *nand);
277int nand_ecc_prepare_io_req(struct nand_device *nand,
278 struct nand_page_io_req *req);
279int nand_ecc_finish_io_req(struct nand_device *nand,
280 struct nand_page_io_req *req);
281bool nand_ecc_is_strong_enough(struct nand_device *nand);
282struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
283struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
284
285#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
286struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
287#else
288static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
289{
290 return NULL;
291}
292#endif
293
294#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
295struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
296#else
297static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
298{
299 return NULL;
300}
301#endif
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316struct nand_ecc_req_tweak_ctx {
317 struct nand_page_io_req orig_req;
318 struct nand_device *nand;
319 unsigned int page_buffer_size;
320 unsigned int oob_buffer_size;
321 void *spare_databuf;
322 void *spare_oobbuf;
323 bool bounce_data;
324 bool bounce_oob;
325};
326
327int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
328 struct nand_device *nand);
329void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
330void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
331 struct nand_page_io_req *req);
332void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
333 struct nand_page_io_req *req);
334
335
336
337
338
339
340
341
342
343
344
345struct nand_ecc {
346 struct nand_ecc_props defaults;
347 struct nand_ecc_props requirements;
348 struct nand_ecc_props user_conf;
349 struct nand_ecc_context ctx;
350 struct nand_ecc_engine *ondie_engine;
351 struct nand_ecc_engine *engine;
352};
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373struct nand_device {
374 struct mtd_info mtd;
375 struct nand_memory_organization memorg;
376 struct nand_ecc ecc;
377 struct nand_row_converter rowconv;
378 struct nand_bbt bbt;
379 const struct nand_ops *ops;
380};
381
382
383
384
385
386
387
388
389
390
391
392
393struct nand_io_iter {
394 struct nand_page_io_req req;
395 unsigned int oobbytes_per_page;
396 unsigned int dataleft;
397 unsigned int oobleft;
398};
399
400
401
402
403
404
405
406static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
407{
408 return container_of(mtd, struct nand_device, mtd);
409}
410
411
412
413
414
415
416
417static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
418{
419 return &nand->mtd;
420}
421
422
423
424
425
426
427
428static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
429{
430 return nand->memorg.bits_per_cell;
431}
432
433
434
435
436
437
438
439static inline size_t nanddev_page_size(const struct nand_device *nand)
440{
441 return nand->memorg.pagesize;
442}
443
444
445
446
447
448
449
450static inline unsigned int
451nanddev_per_page_oobsize(const struct nand_device *nand)
452{
453 return nand->memorg.oobsize;
454}
455
456
457
458
459
460
461
462static inline unsigned int
463nanddev_pages_per_eraseblock(const struct nand_device *nand)
464{
465 return nand->memorg.pages_per_eraseblock;
466}
467
468
469
470
471
472
473
474static inline unsigned int
475nanddev_pages_per_target(const struct nand_device *nand)
476{
477 return nand->memorg.pages_per_eraseblock *
478 nand->memorg.eraseblocks_per_lun *
479 nand->memorg.luns_per_target;
480}
481
482
483
484
485
486
487
488static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
489{
490 return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
491}
492
493
494
495
496
497
498
499static inline unsigned int
500nanddev_eraseblocks_per_lun(const struct nand_device *nand)
501{
502 return nand->memorg.eraseblocks_per_lun;
503}
504
505
506
507
508
509
510
511static inline unsigned int
512nanddev_eraseblocks_per_target(const struct nand_device *nand)
513{
514 return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
515}
516
517
518
519
520
521
522
523static inline u64 nanddev_target_size(const struct nand_device *nand)
524{
525 return (u64)nand->memorg.luns_per_target *
526 nand->memorg.eraseblocks_per_lun *
527 nand->memorg.pages_per_eraseblock *
528 nand->memorg.pagesize;
529}
530
531
532
533
534
535
536
537static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
538{
539 return nand->memorg.ntargets;
540}
541
542
543
544
545
546
547
548static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
549{
550 return nand->memorg.ntargets * nand->memorg.luns_per_target *
551 nand->memorg.eraseblocks_per_lun;
552}
553
554
555
556
557
558
559
560static inline u64 nanddev_size(const struct nand_device *nand)
561{
562 return nanddev_target_size(nand) * nanddev_ntargets(nand);
563}
564
565
566
567
568
569
570
571
572
573
574static inline struct nand_memory_organization *
575nanddev_get_memorg(struct nand_device *nand)
576{
577 return &nand->memorg;
578}
579
580
581
582
583
584static inline const struct nand_ecc_props *
585nanddev_get_ecc_conf(struct nand_device *nand)
586{
587 return &nand->ecc.ctx.conf;
588}
589
590
591
592
593
594static inline unsigned int
595nanddev_get_ecc_nsteps(struct nand_device *nand)
596{
597 return nand->ecc.ctx.nsteps;
598}
599
600
601
602
603
604static inline unsigned int
605nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
606{
607 return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
608}
609
610
611
612
613
614
615static inline const struct nand_ecc_props *
616nanddev_get_ecc_requirements(struct nand_device *nand)
617{
618 return &nand->ecc.requirements;
619}
620
621
622
623
624
625
626
627static inline void
628nanddev_set_ecc_requirements(struct nand_device *nand,
629 const struct nand_ecc_props *reqs)
630{
631 nand->ecc.requirements = *reqs;
632}
633
634int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
635 struct module *owner);
636void nanddev_cleanup(struct nand_device *nand);
637
638
639
640
641
642
643
644
645
646
647
648static inline int nanddev_register(struct nand_device *nand)
649{
650 return mtd_device_register(&nand->mtd, NULL, 0);
651}
652
653
654
655
656
657
658
659
660
661
662
663static inline int nanddev_unregister(struct nand_device *nand)
664{
665 return mtd_device_unregister(&nand->mtd);
666}
667
668
669
670
671
672
673
674
675static inline void nanddev_set_of_node(struct nand_device *nand,
676 struct device_node *np)
677{
678 mtd_set_of_node(&nand->mtd, np);
679}
680
681
682
683
684
685
686
687static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
688{
689 return mtd_get_of_node(&nand->mtd);
690}
691
692
693
694
695
696
697
698
699
700
701
702static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
703 loff_t offs,
704 struct nand_pos *pos)
705{
706 unsigned int pageoffs;
707 u64 tmp = offs;
708
709 pageoffs = do_div(tmp, nand->memorg.pagesize);
710 pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
711 pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
712 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
713 pos->lun = do_div(tmp, nand->memorg.luns_per_target);
714 pos->target = tmp;
715
716 return pageoffs;
717}
718
719
720
721
722
723
724
725
726
727
728static inline int nanddev_pos_cmp(const struct nand_pos *a,
729 const struct nand_pos *b)
730{
731 if (a->target != b->target)
732 return a->target < b->target ? -1 : 1;
733
734 if (a->lun != b->lun)
735 return a->lun < b->lun ? -1 : 1;
736
737 if (a->eraseblock != b->eraseblock)
738 return a->eraseblock < b->eraseblock ? -1 : 1;
739
740 if (a->page != b->page)
741 return a->page < b->page ? -1 : 1;
742
743 return 0;
744}
745
746
747
748
749
750
751
752
753
754
755
756
757static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
758 const struct nand_pos *pos)
759{
760 unsigned int npages;
761
762 npages = pos->page +
763 ((pos->eraseblock +
764 (pos->lun +
765 (pos->target * nand->memorg.luns_per_target)) *
766 nand->memorg.eraseblocks_per_lun) *
767 nand->memorg.pages_per_eraseblock);
768
769 return (loff_t)npages * nand->memorg.pagesize;
770}
771
772
773
774
775
776
777
778
779
780
781
782static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
783 const struct nand_pos *pos)
784{
785 return (pos->lun << nand->rowconv.lun_addr_shift) |
786 (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
787 pos->page;
788}
789
790
791
792
793
794
795
796
797
798static inline void nanddev_pos_next_target(struct nand_device *nand,
799 struct nand_pos *pos)
800{
801 pos->page = 0;
802 pos->plane = 0;
803 pos->eraseblock = 0;
804 pos->lun = 0;
805 pos->target++;
806}
807
808
809
810
811
812
813
814
815
816static inline void nanddev_pos_next_lun(struct nand_device *nand,
817 struct nand_pos *pos)
818{
819 if (pos->lun >= nand->memorg.luns_per_target - 1)
820 return nanddev_pos_next_target(nand, pos);
821
822 pos->lun++;
823 pos->page = 0;
824 pos->plane = 0;
825 pos->eraseblock = 0;
826}
827
828
829
830
831
832
833
834
835
836static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
837 struct nand_pos *pos)
838{
839 if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
840 return nanddev_pos_next_lun(nand, pos);
841
842 pos->eraseblock++;
843 pos->page = 0;
844 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
845}
846
847
848
849
850
851
852
853
854
855static inline void nanddev_pos_next_page(struct nand_device *nand,
856 struct nand_pos *pos)
857{
858 if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
859 return nanddev_pos_next_eraseblock(nand, pos);
860
861 pos->page++;
862}
863
864
865
866
867
868
869
870
871
872
873
874static inline void nanddev_io_iter_init(struct nand_device *nand,
875 enum nand_page_io_req_type reqtype,
876 loff_t offs, struct mtd_oob_ops *req,
877 struct nand_io_iter *iter)
878{
879 struct mtd_info *mtd = nanddev_to_mtd(nand);
880
881 iter->req.type = reqtype;
882 iter->req.mode = req->mode;
883 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
884 iter->req.ooboffs = req->ooboffs;
885 iter->oobbytes_per_page = mtd_oobavail(mtd, req);
886 iter->dataleft = req->len;
887 iter->oobleft = req->ooblen;
888 iter->req.databuf.in = req->datbuf;
889 iter->req.datalen = min_t(unsigned int,
890 nand->memorg.pagesize - iter->req.dataoffs,
891 iter->dataleft);
892 iter->req.oobbuf.in = req->oobbuf;
893 iter->req.ooblen = min_t(unsigned int,
894 iter->oobbytes_per_page - iter->req.ooboffs,
895 iter->oobleft);
896}
897
898
899
900
901
902
903
904
905static inline void nanddev_io_iter_next_page(struct nand_device *nand,
906 struct nand_io_iter *iter)
907{
908 nanddev_pos_next_page(nand, &iter->req.pos);
909 iter->dataleft -= iter->req.datalen;
910 iter->req.databuf.in += iter->req.datalen;
911 iter->oobleft -= iter->req.ooblen;
912 iter->req.oobbuf.in += iter->req.ooblen;
913 iter->req.dataoffs = 0;
914 iter->req.ooboffs = 0;
915 iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
916 iter->dataleft);
917 iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
918 iter->oobleft);
919}
920
921
922
923
924
925
926
927
928
929
930
931
932static inline bool nanddev_io_iter_end(struct nand_device *nand,
933 const struct nand_io_iter *iter)
934{
935 if (iter->dataleft || iter->oobleft)
936 return false;
937
938 return true;
939}
940
941
942
943
944
945
946
947
948
949
950
951#define nanddev_io_for_each_page(nand, type, start, req, iter) \
952 for (nanddev_io_iter_init(nand, type, start, req, iter); \
953 !nanddev_io_iter_end(nand, iter); \
954 nanddev_io_iter_next_page(nand, iter))
955
956bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
957bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
958int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
959int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
960
961
962int nanddev_ecc_engine_init(struct nand_device *nand);
963void nanddev_ecc_engine_cleanup(struct nand_device *nand);
964
965
966enum nand_bbt_block_status {
967 NAND_BBT_BLOCK_STATUS_UNKNOWN,
968 NAND_BBT_BLOCK_GOOD,
969 NAND_BBT_BLOCK_WORN,
970 NAND_BBT_BLOCK_RESERVED,
971 NAND_BBT_BLOCK_FACTORY_BAD,
972 NAND_BBT_BLOCK_NUM_STATUS,
973};
974
975int nanddev_bbt_init(struct nand_device *nand);
976void nanddev_bbt_cleanup(struct nand_device *nand);
977int nanddev_bbt_update(struct nand_device *nand);
978int nanddev_bbt_get_block_status(const struct nand_device *nand,
979 unsigned int entry);
980int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
981 enum nand_bbt_block_status status);
982int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
983
984
985
986
987
988
989
990
991
992
993
994static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
995 const struct nand_pos *pos)
996{
997 return pos->eraseblock +
998 ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
999 nand->memorg.eraseblocks_per_lun);
1000}
1001
1002
1003
1004
1005
1006
1007
1008static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
1009{
1010 return !!nand->bbt.cache;
1011}
1012
1013
1014int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
1015int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
1016
1017#endif
1018