1
2
3
4
5
6
7
8
9
10#ifndef __LINUX_MTD_NAND_H
11#define __LINUX_MTD_NAND_H
12
13#include <linux/mtd/mtd.h>
14
15
16
17
18
19
20
21
22
23
24
25
26struct nand_memory_organization {
27 unsigned int bits_per_cell;
28 unsigned int pagesize;
29 unsigned int oobsize;
30 unsigned int pages_per_eraseblock;
31 unsigned int eraseblocks_per_lun;
32 unsigned int planes_per_lun;
33 unsigned int luns_per_target;
34 unsigned int ntargets;
35};
36
37#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
38 { \
39 .bits_per_cell = (bpc), \
40 .pagesize = (ps), \
41 .oobsize = (os), \
42 .pages_per_eraseblock = (ppe), \
43 .eraseblocks_per_lun = (epl), \
44 .planes_per_lun = (ppl), \
45 .luns_per_target = (lpt), \
46 .ntargets = (nt), \
47 }
48
49
50
51
52
53
54
55
56struct nand_row_converter {
57 unsigned int lun_addr_shift;
58 unsigned int eraseblock_addr_shift;
59};
60
61
62
63
64
65
66
67
68
69
70
71
72struct nand_pos {
73 unsigned int target;
74 unsigned int lun;
75 unsigned int plane;
76 unsigned int eraseblock;
77 unsigned int page;
78};
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96struct nand_page_io_req {
97 struct nand_pos pos;
98 unsigned int dataoffs;
99 unsigned int datalen;
100 union {
101 const void *out;
102 void *in;
103 } databuf;
104 unsigned int ooboffs;
105 unsigned int ooblen;
106 union {
107 const void *out;
108 void *in;
109 } oobbuf;
110 int mode;
111};
112
113
114
115
116
117
118struct nand_ecc_req {
119 unsigned int strength;
120 unsigned int step_size;
121};
122
123#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
124
125
126
127
128
129struct nand_bbt {
130 unsigned long *cache;
131};
132
133struct nand_device;
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151struct nand_ops {
152 int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
153 int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
154 bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
155};
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176struct nand_device {
177 struct mtd_info mtd;
178 struct nand_memory_organization memorg;
179 struct nand_ecc_req eccreq;
180 struct nand_row_converter rowconv;
181 struct nand_bbt bbt;
182 const struct nand_ops *ops;
183};
184
185
186
187
188
189
190
191
192
193
194
195
196struct nand_io_iter {
197 struct nand_page_io_req req;
198 unsigned int oobbytes_per_page;
199 unsigned int dataleft;
200 unsigned int oobleft;
201};
202
203
204
205
206
207
208
209static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
210{
211 return container_of(mtd, struct nand_device, mtd);
212}
213
214
215
216
217
218
219
220static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
221{
222 return &nand->mtd;
223}
224
225
226
227
228
229
230
231static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
232{
233 return nand->memorg.bits_per_cell;
234}
235
236
237
238
239
240
241
242static inline size_t nanddev_page_size(const struct nand_device *nand)
243{
244 return nand->memorg.pagesize;
245}
246
247
248
249
250
251
252
253static inline unsigned int
254nanddev_per_page_oobsize(const struct nand_device *nand)
255{
256 return nand->memorg.oobsize;
257}
258
259
260
261
262
263
264
265static inline unsigned int
266nanddev_pages_per_eraseblock(const struct nand_device *nand)
267{
268 return nand->memorg.pages_per_eraseblock;
269}
270
271
272
273
274
275
276
277static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
278{
279 return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
280}
281
282
283
284
285
286
287
288static inline unsigned int
289nanddev_eraseblocks_per_lun(const struct nand_device *nand)
290{
291 return nand->memorg.eraseblocks_per_lun;
292}
293
294
295
296
297
298
299
300static inline u64 nanddev_target_size(const struct nand_device *nand)
301{
302 return (u64)nand->memorg.luns_per_target *
303 nand->memorg.eraseblocks_per_lun *
304 nand->memorg.pages_per_eraseblock *
305 nand->memorg.pagesize;
306}
307
308
309
310
311
312
313
314static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
315{
316 return nand->memorg.ntargets;
317}
318
319
320
321
322
323
324
325static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
326{
327 return nand->memorg.ntargets * nand->memorg.luns_per_target *
328 nand->memorg.eraseblocks_per_lun;
329}
330
331
332
333
334
335
336
337static inline u64 nanddev_size(const struct nand_device *nand)
338{
339 return nanddev_target_size(nand) * nanddev_ntargets(nand);
340}
341
342
343
344
345
346
347
348
349
350
351static inline struct nand_memory_organization *
352nanddev_get_memorg(struct nand_device *nand)
353{
354 return &nand->memorg;
355}
356
357int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
358 struct module *owner);
359void nanddev_cleanup(struct nand_device *nand);
360
361
362
363
364
365
366
367
368
369
370
371static inline int nanddev_register(struct nand_device *nand)
372{
373 return mtd_device_register(&nand->mtd, NULL, 0);
374}
375
376
377
378
379
380
381
382
383
384
385
386static inline int nanddev_unregister(struct nand_device *nand)
387{
388 return mtd_device_unregister(&nand->mtd);
389}
390
391
392
393
394
395
396
397
398static inline void nanddev_set_of_node(struct nand_device *nand,
399 struct device_node *np)
400{
401 mtd_set_of_node(&nand->mtd, np);
402}
403
404
405
406
407
408
409
410static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
411{
412 return mtd_get_of_node(&nand->mtd);
413}
414
415
416
417
418
419
420
421
422
423
424
425static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
426 loff_t offs,
427 struct nand_pos *pos)
428{
429 unsigned int pageoffs;
430 u64 tmp = offs;
431
432 pageoffs = do_div(tmp, nand->memorg.pagesize);
433 pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
434 pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
435 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
436 pos->lun = do_div(tmp, nand->memorg.luns_per_target);
437 pos->target = tmp;
438
439 return pageoffs;
440}
441
442
443
444
445
446
447
448
449
450
451static inline int nanddev_pos_cmp(const struct nand_pos *a,
452 const struct nand_pos *b)
453{
454 if (a->target != b->target)
455 return a->target < b->target ? -1 : 1;
456
457 if (a->lun != b->lun)
458 return a->lun < b->lun ? -1 : 1;
459
460 if (a->eraseblock != b->eraseblock)
461 return a->eraseblock < b->eraseblock ? -1 : 1;
462
463 if (a->page != b->page)
464 return a->page < b->page ? -1 : 1;
465
466 return 0;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
481 const struct nand_pos *pos)
482{
483 unsigned int npages;
484
485 npages = pos->page +
486 ((pos->eraseblock +
487 (pos->lun +
488 (pos->target * nand->memorg.luns_per_target)) *
489 nand->memorg.eraseblocks_per_lun) *
490 nand->memorg.pages_per_eraseblock);
491
492 return (loff_t)npages * nand->memorg.pagesize;
493}
494
495
496
497
498
499
500
501
502
503
504
505static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
506 const struct nand_pos *pos)
507{
508 return (pos->lun << nand->rowconv.lun_addr_shift) |
509 (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
510 pos->page;
511}
512
513
514
515
516
517
518
519
520
521static inline void nanddev_pos_next_target(struct nand_device *nand,
522 struct nand_pos *pos)
523{
524 pos->page = 0;
525 pos->plane = 0;
526 pos->eraseblock = 0;
527 pos->lun = 0;
528 pos->target++;
529}
530
531
532
533
534
535
536
537
538
539static inline void nanddev_pos_next_lun(struct nand_device *nand,
540 struct nand_pos *pos)
541{
542 if (pos->lun >= nand->memorg.luns_per_target - 1)
543 return nanddev_pos_next_target(nand, pos);
544
545 pos->lun++;
546 pos->page = 0;
547 pos->plane = 0;
548 pos->eraseblock = 0;
549}
550
551
552
553
554
555
556
557
558
559static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
560 struct nand_pos *pos)
561{
562 if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
563 return nanddev_pos_next_lun(nand, pos);
564
565 pos->eraseblock++;
566 pos->page = 0;
567 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
568}
569
570
571
572
573
574
575
576
577
578static inline void nanddev_pos_next_page(struct nand_device *nand,
579 struct nand_pos *pos)
580{
581 if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
582 return nanddev_pos_next_eraseblock(nand, pos);
583
584 pos->page++;
585}
586
587
588
589
590
591
592
593
594
595
596
597static inline void nanddev_io_iter_init(struct nand_device *nand,
598 loff_t offs, struct mtd_oob_ops *req,
599 struct nand_io_iter *iter)
600{
601 struct mtd_info *mtd = nanddev_to_mtd(nand);
602
603 iter->req.mode = req->mode;
604 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
605 iter->req.ooboffs = req->ooboffs;
606 iter->oobbytes_per_page = mtd_oobavail(mtd, req);
607 iter->dataleft = req->len;
608 iter->oobleft = req->ooblen;
609 iter->req.databuf.in = req->datbuf;
610 iter->req.datalen = min_t(unsigned int,
611 nand->memorg.pagesize - iter->req.dataoffs,
612 iter->dataleft);
613 iter->req.oobbuf.in = req->oobbuf;
614 iter->req.ooblen = min_t(unsigned int,
615 iter->oobbytes_per_page - iter->req.ooboffs,
616 iter->oobleft);
617}
618
619
620
621
622
623
624
625
626static inline void nanddev_io_iter_next_page(struct nand_device *nand,
627 struct nand_io_iter *iter)
628{
629 nanddev_pos_next_page(nand, &iter->req.pos);
630 iter->dataleft -= iter->req.datalen;
631 iter->req.databuf.in += iter->req.datalen;
632 iter->oobleft -= iter->req.ooblen;
633 iter->req.oobbuf.in += iter->req.ooblen;
634 iter->req.dataoffs = 0;
635 iter->req.ooboffs = 0;
636 iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
637 iter->dataleft);
638 iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
639 iter->oobleft);
640}
641
642
643
644
645
646
647
648
649
650
651
652
653static inline bool nanddev_io_iter_end(struct nand_device *nand,
654 const struct nand_io_iter *iter)
655{
656 if (iter->dataleft || iter->oobleft)
657 return false;
658
659 return true;
660}
661
662
663
664
665
666
667
668
669
670
671
672#define nanddev_io_for_each_page(nand, start, req, iter) \
673 for (nanddev_io_iter_init(nand, start, req, iter); \
674 !nanddev_io_iter_end(nand, iter); \
675 nanddev_io_iter_next_page(nand, iter))
676
677bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
678bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
679int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
680int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
681
682
683enum nand_bbt_block_status {
684 NAND_BBT_BLOCK_STATUS_UNKNOWN,
685 NAND_BBT_BLOCK_GOOD,
686 NAND_BBT_BLOCK_WORN,
687 NAND_BBT_BLOCK_RESERVED,
688 NAND_BBT_BLOCK_FACTORY_BAD,
689 NAND_BBT_BLOCK_NUM_STATUS,
690};
691
692int nanddev_bbt_init(struct nand_device *nand);
693void nanddev_bbt_cleanup(struct nand_device *nand);
694int nanddev_bbt_update(struct nand_device *nand);
695int nanddev_bbt_get_block_status(const struct nand_device *nand,
696 unsigned int entry);
697int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
698 enum nand_bbt_block_status status);
699int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
700
701
702
703
704
705
706
707
708
709
710
711static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
712 const struct nand_pos *pos)
713{
714 return pos->eraseblock +
715 ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
716 nand->memorg.eraseblocks_per_lun);
717}
718
719
720
721
722
723
724
725static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
726{
727 return !!nand->bbt.cache;
728}
729
730
731int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
732
733#endif
734