1
2
3
4
5
6
7
8
9
10
11
12#ifndef __UBOOT__
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/ptrace.h>
16#include <linux/seq_file.h>
17#include <linux/string.h>
18#include <linux/timer.h>
19#include <linux/major.h>
20#include <linux/fs.h>
21#include <linux/err.h>
22#include <linux/ioctl.h>
23#include <linux/init.h>
24#include <linux/proc_fs.h>
25#include <linux/idr.h>
26#include <linux/backing-dev.h>
27#include <linux/gfp.h>
28#include <linux/slab.h>
29#else
30#include <linux/err.h>
31#include <ubi_uboot.h>
32#endif
33
34#include <linux/log2.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/partitions.h>
37
38#include "mtdcore.h"
39
40#ifndef __UBOOT__
41
42
43
44
45static struct backing_dev_info mtd_bdi_unmappable = {
46 .capabilities = BDI_CAP_MAP_COPY,
47};
48
49
50
51
52
53
54static struct backing_dev_info mtd_bdi_ro_mappable = {
55 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
56 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
57};
58
59
60
61
62
63
64static struct backing_dev_info mtd_bdi_rw_mappable = {
65 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
66 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
67 BDI_CAP_WRITE_MAP),
68};
69
70static int mtd_cls_suspend(struct device *dev, pm_message_t state);
71static int mtd_cls_resume(struct device *dev);
72
73static struct class mtd_class = {
74 .name = "mtd",
75 .owner = THIS_MODULE,
76 .suspend = mtd_cls_suspend,
77 .resume = mtd_cls_resume,
78};
79#else
80struct mtd_info *mtd_table[MAX_MTD_DEVICES];
81
82#define MAX_IDR_ID 64
83
84struct idr_layer {
85 int used;
86 void *ptr;
87};
88
89struct idr {
90 struct idr_layer id[MAX_IDR_ID];
91};
92
93#define DEFINE_IDR(name) struct idr name;
94
95void idr_remove(struct idr *idp, int id)
96{
97 if (idp->id[id].used)
98 idp->id[id].used = 0;
99
100 return;
101}
102void *idr_find(struct idr *idp, int id)
103{
104 if (idp->id[id].used)
105 return idp->id[id].ptr;
106
107 return NULL;
108}
109
110void *idr_get_next(struct idr *idp, int *next)
111{
112 void *ret;
113 int id = *next;
114
115 ret = idr_find(idp, id);
116 if (ret) {
117 id ++;
118 if (!idp->id[id].used)
119 id = 0;
120 *next = id;
121 } else {
122 *next = 0;
123 }
124
125 return ret;
126}
127
128int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
129{
130 struct idr_layer *idl;
131 int i = 0;
132
133 while (i < MAX_IDR_ID) {
134 idl = &idp->id[i];
135 if (idl->used == 0) {
136 idl->used = 1;
137 idl->ptr = ptr;
138 return i;
139 }
140 i++;
141 }
142 return -ENOSPC;
143}
144#endif
145
146static DEFINE_IDR(mtd_idr);
147
148
149
150DEFINE_MUTEX(mtd_table_mutex);
151EXPORT_SYMBOL_GPL(mtd_table_mutex);
152
153struct mtd_info *__mtd_next_device(int i)
154{
155 return idr_get_next(&mtd_idr, &i);
156}
157EXPORT_SYMBOL_GPL(__mtd_next_device);
158
159#ifndef __UBOOT__
160static LIST_HEAD(mtd_notifiers);
161
162
163#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
164
165
166
167
168static void mtd_release(struct device *dev)
169{
170 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
171 dev_t index = MTD_DEVT(mtd->index);
172
173
174 if (index)
175 device_destroy(&mtd_class, index + 1);
176}
177
178static int mtd_cls_suspend(struct device *dev, pm_message_t state)
179{
180 struct mtd_info *mtd = dev_get_drvdata(dev);
181
182 return mtd ? mtd_suspend(mtd) : 0;
183}
184
185static int mtd_cls_resume(struct device *dev)
186{
187 struct mtd_info *mtd = dev_get_drvdata(dev);
188
189 if (mtd)
190 mtd_resume(mtd);
191 return 0;
192}
193
194static ssize_t mtd_type_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196{
197 struct mtd_info *mtd = dev_get_drvdata(dev);
198 char *type;
199
200 switch (mtd->type) {
201 case MTD_ABSENT:
202 type = "absent";
203 break;
204 case MTD_RAM:
205 type = "ram";
206 break;
207 case MTD_ROM:
208 type = "rom";
209 break;
210 case MTD_NORFLASH:
211 type = "nor";
212 break;
213 case MTD_NANDFLASH:
214 type = "nand";
215 break;
216 case MTD_DATAFLASH:
217 type = "dataflash";
218 break;
219 case MTD_UBIVOLUME:
220 type = "ubi";
221 break;
222 case MTD_MLCNANDFLASH:
223 type = "mlc-nand";
224 break;
225 default:
226 type = "unknown";
227 }
228
229 return snprintf(buf, PAGE_SIZE, "%s\n", type);
230}
231static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
232
233static ssize_t mtd_flags_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 struct mtd_info *mtd = dev_get_drvdata(dev);
237
238 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
239
240}
241static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
242
243static ssize_t mtd_size_show(struct device *dev,
244 struct device_attribute *attr, char *buf)
245{
246 struct mtd_info *mtd = dev_get_drvdata(dev);
247
248 return snprintf(buf, PAGE_SIZE, "%llu\n",
249 (unsigned long long)mtd->size);
250
251}
252static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
253
254static ssize_t mtd_erasesize_show(struct device *dev,
255 struct device_attribute *attr, char *buf)
256{
257 struct mtd_info *mtd = dev_get_drvdata(dev);
258
259 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
260
261}
262static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
263
264static ssize_t mtd_writesize_show(struct device *dev,
265 struct device_attribute *attr, char *buf)
266{
267 struct mtd_info *mtd = dev_get_drvdata(dev);
268
269 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
270
271}
272static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
273
274static ssize_t mtd_subpagesize_show(struct device *dev,
275 struct device_attribute *attr, char *buf)
276{
277 struct mtd_info *mtd = dev_get_drvdata(dev);
278 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
279
280 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
281
282}
283static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
284
285static ssize_t mtd_oobsize_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct mtd_info *mtd = dev_get_drvdata(dev);
289
290 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
291
292}
293static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
294
295static ssize_t mtd_numeraseregions_show(struct device *dev,
296 struct device_attribute *attr, char *buf)
297{
298 struct mtd_info *mtd = dev_get_drvdata(dev);
299
300 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
301
302}
303static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
304 NULL);
305
306static ssize_t mtd_name_show(struct device *dev,
307 struct device_attribute *attr, char *buf)
308{
309 struct mtd_info *mtd = dev_get_drvdata(dev);
310
311 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
312
313}
314static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
315
316static ssize_t mtd_ecc_strength_show(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct mtd_info *mtd = dev_get_drvdata(dev);
320
321 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
322}
323static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
324
325static ssize_t mtd_bitflip_threshold_show(struct device *dev,
326 struct device_attribute *attr,
327 char *buf)
328{
329 struct mtd_info *mtd = dev_get_drvdata(dev);
330
331 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
332}
333
334static ssize_t mtd_bitflip_threshold_store(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337{
338 struct mtd_info *mtd = dev_get_drvdata(dev);
339 unsigned int bitflip_threshold;
340 int retval;
341
342 retval = kstrtouint(buf, 0, &bitflip_threshold);
343 if (retval)
344 return retval;
345
346 mtd->bitflip_threshold = bitflip_threshold;
347 return count;
348}
349static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
350 mtd_bitflip_threshold_show,
351 mtd_bitflip_threshold_store);
352
353static ssize_t mtd_ecc_step_size_show(struct device *dev,
354 struct device_attribute *attr, char *buf)
355{
356 struct mtd_info *mtd = dev_get_drvdata(dev);
357
358 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
359
360}
361static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
362
363static struct attribute *mtd_attrs[] = {
364 &dev_attr_type.attr,
365 &dev_attr_flags.attr,
366 &dev_attr_size.attr,
367 &dev_attr_erasesize.attr,
368 &dev_attr_writesize.attr,
369 &dev_attr_subpagesize.attr,
370 &dev_attr_oobsize.attr,
371 &dev_attr_numeraseregions.attr,
372 &dev_attr_name.attr,
373 &dev_attr_ecc_strength.attr,
374 &dev_attr_ecc_step_size.attr,
375 &dev_attr_bitflip_threshold.attr,
376 NULL,
377};
378ATTRIBUTE_GROUPS(mtd);
379
380static struct device_type mtd_devtype = {
381 .name = "mtd",
382 .groups = mtd_groups,
383 .release = mtd_release,
384};
385#endif
386
387
388
389
390
391
392
393
394
395
396
397int add_mtd_device(struct mtd_info *mtd)
398{
399#ifndef __UBOOT__
400 struct mtd_notifier *not;
401#endif
402 int i, error;
403
404#ifndef __UBOOT__
405 if (!mtd->backing_dev_info) {
406 switch (mtd->type) {
407 case MTD_RAM:
408 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
409 break;
410 case MTD_ROM:
411 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
412 break;
413 default:
414 mtd->backing_dev_info = &mtd_bdi_unmappable;
415 break;
416 }
417 }
418#endif
419
420 BUG_ON(mtd->writesize == 0);
421 mutex_lock(&mtd_table_mutex);
422
423 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
424 if (i < 0)
425 goto fail_locked;
426
427 mtd->index = i;
428 mtd->usecount = 0;
429
430
431 if (mtd->bitflip_threshold == 0)
432 mtd->bitflip_threshold = mtd->ecc_strength;
433
434 if (is_power_of_2(mtd->erasesize))
435 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
436 else
437 mtd->erasesize_shift = 0;
438
439 if (is_power_of_2(mtd->writesize))
440 mtd->writesize_shift = ffs(mtd->writesize) - 1;
441 else
442 mtd->writesize_shift = 0;
443
444 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
445 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
446
447
448 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
449 error = mtd_unlock(mtd, 0, mtd->size);
450 if (error && error != -EOPNOTSUPP)
451 printk(KERN_WARNING
452 "%s: unlock failed, writes may not work\n",
453 mtd->name);
454 }
455
456#ifndef __UBOOT__
457
458
459
460 mtd->dev.type = &mtd_devtype;
461 mtd->dev.class = &mtd_class;
462 mtd->dev.devt = MTD_DEVT(i);
463 dev_set_name(&mtd->dev, "mtd%d", i);
464 dev_set_drvdata(&mtd->dev, mtd);
465 if (device_register(&mtd->dev) != 0)
466 goto fail_added;
467
468 if (MTD_DEVT(i))
469 device_create(&mtd_class, mtd->dev.parent,
470 MTD_DEVT(i) + 1,
471 NULL, "mtd%dro", i);
472
473 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
474
475
476 list_for_each_entry(not, &mtd_notifiers, list)
477 not->add(mtd);
478#else
479 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
480#endif
481
482 mutex_unlock(&mtd_table_mutex);
483
484
485
486
487 __module_get(THIS_MODULE);
488 return 0;
489
490#ifndef __UBOOT__
491fail_added:
492 idr_remove(&mtd_idr, i);
493#endif
494fail_locked:
495 mutex_unlock(&mtd_table_mutex);
496 return 1;
497}
498
499
500
501
502
503
504
505
506
507
508
509int del_mtd_device(struct mtd_info *mtd)
510{
511 int ret;
512#ifndef __UBOOT__
513 struct mtd_notifier *not;
514#endif
515
516 mutex_lock(&mtd_table_mutex);
517
518 if (idr_find(&mtd_idr, mtd->index) != mtd) {
519 ret = -ENODEV;
520 goto out_error;
521 }
522
523#ifndef __UBOOT__
524
525
526 list_for_each_entry(not, &mtd_notifiers, list)
527 not->remove(mtd);
528#endif
529
530 if (mtd->usecount) {
531 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
532 mtd->index, mtd->name, mtd->usecount);
533 ret = -EBUSY;
534 } else {
535#ifndef __UBOOT__
536 device_unregister(&mtd->dev);
537#endif
538
539 idr_remove(&mtd_idr, mtd->index);
540
541 module_put(THIS_MODULE);
542 ret = 0;
543 }
544
545out_error:
546 mutex_unlock(&mtd_table_mutex);
547 return ret;
548}
549
550#ifndef __UBOOT__
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
580 struct mtd_part_parser_data *parser_data,
581 const struct mtd_partition *parts,
582 int nr_parts)
583{
584 int err;
585 struct mtd_partition *real_parts;
586
587 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
588 if (err <= 0 && nr_parts && parts) {
589 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
590 GFP_KERNEL);
591 if (!real_parts)
592 err = -ENOMEM;
593 else
594 err = nr_parts;
595 }
596
597 if (err > 0) {
598 err = add_mtd_partitions(mtd, real_parts, err);
599 kfree(real_parts);
600 } else if (err == 0) {
601 err = add_mtd_device(mtd);
602 if (err == 1)
603 err = -ENODEV;
604 }
605
606 return err;
607}
608EXPORT_SYMBOL_GPL(mtd_device_parse_register);
609
610
611
612
613
614
615
616int mtd_device_unregister(struct mtd_info *master)
617{
618 int err;
619
620 err = del_mtd_partitions(master);
621 if (err)
622 return err;
623
624 if (!device_is_registered(&master->dev))
625 return 0;
626
627 return del_mtd_device(master);
628}
629EXPORT_SYMBOL_GPL(mtd_device_unregister);
630
631
632
633
634
635
636
637
638
639void register_mtd_user (struct mtd_notifier *new)
640{
641 struct mtd_info *mtd;
642
643 mutex_lock(&mtd_table_mutex);
644
645 list_add(&new->list, &mtd_notifiers);
646
647 __module_get(THIS_MODULE);
648
649 mtd_for_each_device(mtd)
650 new->add(mtd);
651
652 mutex_unlock(&mtd_table_mutex);
653}
654EXPORT_SYMBOL_GPL(register_mtd_user);
655
656
657
658
659
660
661
662
663
664
665int unregister_mtd_user (struct mtd_notifier *old)
666{
667 struct mtd_info *mtd;
668
669 mutex_lock(&mtd_table_mutex);
670
671 module_put(THIS_MODULE);
672
673 mtd_for_each_device(mtd)
674 old->remove(mtd);
675
676 list_del(&old->list);
677 mutex_unlock(&mtd_table_mutex);
678 return 0;
679}
680EXPORT_SYMBOL_GPL(unregister_mtd_user);
681#endif
682
683
684
685
686
687
688
689
690
691
692
693
694struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
695{
696 struct mtd_info *ret = NULL, *other;
697 int err = -ENODEV;
698
699 mutex_lock(&mtd_table_mutex);
700
701 if (num == -1) {
702 mtd_for_each_device(other) {
703 if (other == mtd) {
704 ret = mtd;
705 break;
706 }
707 }
708 } else if (num >= 0) {
709 ret = idr_find(&mtd_idr, num);
710 if (mtd && mtd != ret)
711 ret = NULL;
712 }
713
714 if (!ret) {
715 ret = ERR_PTR(err);
716 goto out;
717 }
718
719 err = __get_mtd_device(ret);
720 if (err)
721 ret = ERR_PTR(err);
722out:
723 mutex_unlock(&mtd_table_mutex);
724 return ret;
725}
726EXPORT_SYMBOL_GPL(get_mtd_device);
727
728
729int __get_mtd_device(struct mtd_info *mtd)
730{
731 int err;
732
733 if (!try_module_get(mtd->owner))
734 return -ENODEV;
735
736 if (mtd->_get_device) {
737 err = mtd->_get_device(mtd);
738
739 if (err) {
740 module_put(mtd->owner);
741 return err;
742 }
743 }
744 mtd->usecount++;
745 return 0;
746}
747EXPORT_SYMBOL_GPL(__get_mtd_device);
748
749
750
751
752
753
754
755
756
757struct mtd_info *get_mtd_device_nm(const char *name)
758{
759 int err = -ENODEV;
760 struct mtd_info *mtd = NULL, *other;
761
762 mutex_lock(&mtd_table_mutex);
763
764 mtd_for_each_device(other) {
765 if (!strcmp(name, other->name)) {
766 mtd = other;
767 break;
768 }
769 }
770
771 if (!mtd)
772 goto out_unlock;
773
774 err = __get_mtd_device(mtd);
775 if (err)
776 goto out_unlock;
777
778 mutex_unlock(&mtd_table_mutex);
779 return mtd;
780
781out_unlock:
782 mutex_unlock(&mtd_table_mutex);
783 return ERR_PTR(err);
784}
785EXPORT_SYMBOL_GPL(get_mtd_device_nm);
786
787#if defined(CONFIG_CMD_MTDPARTS_SPREAD)
788
789
790
791
792
793
794
795
796
797
798
799void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
800 const uint64_t length, uint64_t *len_incl_bad,
801 int *truncated)
802{
803 *truncated = 0;
804 *len_incl_bad = 0;
805
806 if (!mtd->_block_isbad) {
807 *len_incl_bad = length;
808 return;
809 }
810
811 uint64_t len_excl_bad = 0;
812 uint64_t block_len;
813
814 while (len_excl_bad < length) {
815 if (offset >= mtd->size) {
816 *truncated = 1;
817 return;
818 }
819
820 block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
821
822 if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
823 len_excl_bad += block_len;
824
825 *len_incl_bad += block_len;
826 offset += block_len;
827 }
828}
829#endif
830
831void put_mtd_device(struct mtd_info *mtd)
832{
833 mutex_lock(&mtd_table_mutex);
834 __put_mtd_device(mtd);
835 mutex_unlock(&mtd_table_mutex);
836
837}
838EXPORT_SYMBOL_GPL(put_mtd_device);
839
840void __put_mtd_device(struct mtd_info *mtd)
841{
842 --mtd->usecount;
843 BUG_ON(mtd->usecount < 0);
844
845 if (mtd->_put_device)
846 mtd->_put_device(mtd);
847
848 module_put(mtd->owner);
849}
850EXPORT_SYMBOL_GPL(__put_mtd_device);
851
852
853
854
855
856
857
858
859int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
860{
861 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
862 return -EINVAL;
863 if (!(mtd->flags & MTD_WRITEABLE))
864 return -EROFS;
865 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
866 if (!instr->len) {
867 instr->state = MTD_ERASE_DONE;
868 mtd_erase_callback(instr);
869 return 0;
870 }
871 return mtd->_erase(mtd, instr);
872}
873EXPORT_SYMBOL_GPL(mtd_erase);
874
875#ifndef __UBOOT__
876
877
878
879int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
880 void **virt, resource_size_t *phys)
881{
882 *retlen = 0;
883 *virt = NULL;
884 if (phys)
885 *phys = 0;
886 if (!mtd->_point)
887 return -EOPNOTSUPP;
888 if (from < 0 || from > mtd->size || len > mtd->size - from)
889 return -EINVAL;
890 if (!len)
891 return 0;
892 return mtd->_point(mtd, from, len, retlen, virt, phys);
893}
894EXPORT_SYMBOL_GPL(mtd_point);
895
896
897int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
898{
899 if (!mtd->_point)
900 return -EOPNOTSUPP;
901 if (from < 0 || from > mtd->size || len > mtd->size - from)
902 return -EINVAL;
903 if (!len)
904 return 0;
905 return mtd->_unpoint(mtd, from, len);
906}
907EXPORT_SYMBOL_GPL(mtd_unpoint);
908#endif
909
910
911
912
913
914
915unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
916 unsigned long offset, unsigned long flags)
917{
918 if (!mtd->_get_unmapped_area)
919 return -EOPNOTSUPP;
920 if (offset > mtd->size || len > mtd->size - offset)
921 return -EINVAL;
922 return mtd->_get_unmapped_area(mtd, len, offset, flags);
923}
924EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
925
926int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
927 u_char *buf)
928{
929 int ret_code;
930 *retlen = 0;
931 if (from < 0 || from > mtd->size || len > mtd->size - from)
932 return -EINVAL;
933 if (!len)
934 return 0;
935
936
937
938
939
940
941 ret_code = mtd->_read(mtd, from, len, retlen, buf);
942 if (unlikely(ret_code < 0))
943 return ret_code;
944 if (mtd->ecc_strength == 0)
945 return 0;
946 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
947}
948EXPORT_SYMBOL_GPL(mtd_read);
949
950int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
951 const u_char *buf)
952{
953 *retlen = 0;
954 if (to < 0 || to > mtd->size || len > mtd->size - to)
955 return -EINVAL;
956 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
957 return -EROFS;
958 if (!len)
959 return 0;
960 return mtd->_write(mtd, to, len, retlen, buf);
961}
962EXPORT_SYMBOL_GPL(mtd_write);
963
964
965
966
967
968
969
970
971int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
972 const u_char *buf)
973{
974 *retlen = 0;
975 if (!mtd->_panic_write)
976 return -EOPNOTSUPP;
977 if (to < 0 || to > mtd->size || len > mtd->size - to)
978 return -EINVAL;
979 if (!(mtd->flags & MTD_WRITEABLE))
980 return -EROFS;
981 if (!len)
982 return 0;
983 return mtd->_panic_write(mtd, to, len, retlen, buf);
984}
985EXPORT_SYMBOL_GPL(mtd_panic_write);
986
987int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
988{
989 int ret_code;
990 ops->retlen = ops->oobretlen = 0;
991 if (!mtd->_read_oob)
992 return -EOPNOTSUPP;
993
994
995
996
997
998
999 ret_code = mtd->_read_oob(mtd, from, ops);
1000 if (unlikely(ret_code < 0))
1001 return ret_code;
1002 if (mtd->ecc_strength == 0)
1003 return 0;
1004 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1005}
1006EXPORT_SYMBOL_GPL(mtd_read_oob);
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1025 struct mtd_oob_region *oobecc)
1026{
1027 memset(oobecc, 0, sizeof(*oobecc));
1028
1029 if (!mtd || section < 0)
1030 return -EINVAL;
1031
1032 if (!mtd->ooblayout || !mtd->ooblayout->ecc)
1033 return -ENOTSUPP;
1034
1035 return mtd->ooblayout->ecc(mtd, section, oobecc);
1036}
1037EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1057 struct mtd_oob_region *oobfree)
1058{
1059 memset(oobfree, 0, sizeof(*oobfree));
1060
1061 if (!mtd || section < 0)
1062 return -EINVAL;
1063
1064 if (!mtd->ooblayout || !mtd->ooblayout->free)
1065 return -ENOTSUPP;
1066
1067 return mtd->ooblayout->free(mtd, section, oobfree);
1068}
1069EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1089 int *sectionp, struct mtd_oob_region *oobregion,
1090 int (*iter)(struct mtd_info *,
1091 int section,
1092 struct mtd_oob_region *oobregion))
1093{
1094 int pos = 0, ret, section = 0;
1095
1096 memset(oobregion, 0, sizeof(*oobregion));
1097
1098 while (1) {
1099 ret = iter(mtd, section, oobregion);
1100 if (ret)
1101 return ret;
1102
1103 if (pos + oobregion->length > byte)
1104 break;
1105
1106 pos += oobregion->length;
1107 section++;
1108 }
1109
1110
1111
1112
1113
1114 oobregion->offset += byte - pos;
1115 oobregion->length -= byte - pos;
1116 *sectionp = section;
1117
1118 return 0;
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1135 int *section,
1136 struct mtd_oob_region *oobregion)
1137{
1138 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1139 mtd_ooblayout_ecc);
1140}
1141EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1158 const u8 *oobbuf, int start, int nbytes,
1159 int (*iter)(struct mtd_info *,
1160 int section,
1161 struct mtd_oob_region *oobregion))
1162{
1163 struct mtd_oob_region oobregion;
1164 int section, ret;
1165
1166 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1167 &oobregion, iter);
1168
1169 while (!ret) {
1170 int cnt;
1171
1172 cnt = min_t(int, nbytes, oobregion.length);
1173 memcpy(buf, oobbuf + oobregion.offset, cnt);
1174 buf += cnt;
1175 nbytes -= cnt;
1176
1177 if (!nbytes)
1178 break;
1179
1180 ret = iter(mtd, ++section, &oobregion);
1181 }
1182
1183 return ret;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1201 u8 *oobbuf, int start, int nbytes,
1202 int (*iter)(struct mtd_info *,
1203 int section,
1204 struct mtd_oob_region *oobregion))
1205{
1206 struct mtd_oob_region oobregion;
1207 int section, ret;
1208
1209 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1210 &oobregion, iter);
1211
1212 while (!ret) {
1213 int cnt;
1214
1215 cnt = min_t(int, nbytes, oobregion.length);
1216 memcpy(oobbuf + oobregion.offset, buf, cnt);
1217 buf += cnt;
1218 nbytes -= cnt;
1219
1220 if (!nbytes)
1221 break;
1222
1223 ret = iter(mtd, ++section, &oobregion);
1224 }
1225
1226 return ret;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1239 int (*iter)(struct mtd_info *,
1240 int section,
1241 struct mtd_oob_region *oobregion))
1242{
1243 struct mtd_oob_region oobregion;
1244 int section = 0, ret, nbytes = 0;
1245
1246 while (1) {
1247 ret = iter(mtd, section++, &oobregion);
1248 if (ret) {
1249 if (ret == -ERANGE)
1250 ret = nbytes;
1251 break;
1252 }
1253
1254 nbytes += oobregion.length;
1255 }
1256
1257 return ret;
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1273 const u8 *oobbuf, int start, int nbytes)
1274{
1275 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1276 mtd_ooblayout_ecc);
1277}
1278EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1293 u8 *oobbuf, int start, int nbytes)
1294{
1295 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1296 mtd_ooblayout_ecc);
1297}
1298EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1313 const u8 *oobbuf, int start, int nbytes)
1314{
1315 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1316 mtd_ooblayout_free);
1317}
1318EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1333 u8 *oobbuf, int start, int nbytes)
1334{
1335 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1336 mtd_ooblayout_free);
1337}
1338EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1349{
1350 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1351}
1352EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1363{
1364 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1365}
1366EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1367
1368
1369
1370
1371
1372
1373int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1374 struct otp_info *buf)
1375{
1376 if (!mtd->_get_fact_prot_info)
1377 return -EOPNOTSUPP;
1378 if (!len)
1379 return 0;
1380 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
1381}
1382EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1383
1384int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1385 size_t *retlen, u_char *buf)
1386{
1387 *retlen = 0;
1388 if (!mtd->_read_fact_prot_reg)
1389 return -EOPNOTSUPP;
1390 if (!len)
1391 return 0;
1392 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1393}
1394EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1395
1396int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1397 struct otp_info *buf)
1398{
1399 if (!mtd->_get_user_prot_info)
1400 return -EOPNOTSUPP;
1401 if (!len)
1402 return 0;
1403 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
1404}
1405EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1406
1407int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1408 size_t *retlen, u_char *buf)
1409{
1410 *retlen = 0;
1411 if (!mtd->_read_user_prot_reg)
1412 return -EOPNOTSUPP;
1413 if (!len)
1414 return 0;
1415 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1416}
1417EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1418
1419int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1420 size_t *retlen, u_char *buf)
1421{
1422 int ret;
1423
1424 *retlen = 0;
1425 if (!mtd->_write_user_prot_reg)
1426 return -EOPNOTSUPP;
1427 if (!len)
1428 return 0;
1429 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1430 if (ret)
1431 return ret;
1432
1433
1434
1435
1436
1437 return (*retlen) ? 0 : -ENOSPC;
1438}
1439EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1440
1441int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1442{
1443 if (!mtd->_lock_user_prot_reg)
1444 return -EOPNOTSUPP;
1445 if (!len)
1446 return 0;
1447 return mtd->_lock_user_prot_reg(mtd, from, len);
1448}
1449EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1450
1451
1452int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1453{
1454 if (!mtd->_lock)
1455 return -EOPNOTSUPP;
1456 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1457 return -EINVAL;
1458 if (!len)
1459 return 0;
1460 return mtd->_lock(mtd, ofs, len);
1461}
1462EXPORT_SYMBOL_GPL(mtd_lock);
1463
1464int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1465{
1466 if (!mtd->_unlock)
1467 return -EOPNOTSUPP;
1468 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1469 return -EINVAL;
1470 if (!len)
1471 return 0;
1472 return mtd->_unlock(mtd, ofs, len);
1473}
1474EXPORT_SYMBOL_GPL(mtd_unlock);
1475
1476int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1477{
1478 if (!mtd->_is_locked)
1479 return -EOPNOTSUPP;
1480 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1481 return -EINVAL;
1482 if (!len)
1483 return 0;
1484 return mtd->_is_locked(mtd, ofs, len);
1485}
1486EXPORT_SYMBOL_GPL(mtd_is_locked);
1487
1488int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1489{
1490 if (ofs < 0 || ofs > mtd->size)
1491 return -EINVAL;
1492 if (!mtd->_block_isreserved)
1493 return 0;
1494 return mtd->_block_isreserved(mtd, ofs);
1495}
1496EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1497
1498int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1499{
1500 if (ofs < 0 || ofs > mtd->size)
1501 return -EINVAL;
1502 if (!mtd->_block_isbad)
1503 return 0;
1504 return mtd->_block_isbad(mtd, ofs);
1505}
1506EXPORT_SYMBOL_GPL(mtd_block_isbad);
1507
1508int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1509{
1510 if (!mtd->_block_markbad)
1511 return -EOPNOTSUPP;
1512 if (ofs < 0 || ofs > mtd->size)
1513 return -EINVAL;
1514 if (!(mtd->flags & MTD_WRITEABLE))
1515 return -EROFS;
1516 return mtd->_block_markbad(mtd, ofs);
1517}
1518EXPORT_SYMBOL_GPL(mtd_block_markbad);
1519
1520#ifndef __UBOOT__
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1533 unsigned long count, loff_t to, size_t *retlen)
1534{
1535 unsigned long i;
1536 size_t totlen = 0, thislen;
1537 int ret = 0;
1538
1539 for (i = 0; i < count; i++) {
1540 if (!vecs[i].iov_len)
1541 continue;
1542 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1543 vecs[i].iov_base);
1544 totlen += thislen;
1545 if (ret || thislen != vecs[i].iov_len)
1546 break;
1547 to += vecs[i].iov_len;
1548 }
1549 *retlen = totlen;
1550 return ret;
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1565 unsigned long count, loff_t to, size_t *retlen)
1566{
1567 *retlen = 0;
1568 if (!(mtd->flags & MTD_WRITEABLE))
1569 return -EROFS;
1570 if (!mtd->_writev)
1571 return default_mtd_writev(mtd, vecs, count, to, retlen);
1572 return mtd->_writev(mtd, vecs, count, to, retlen);
1573}
1574EXPORT_SYMBOL_GPL(mtd_writev);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1601{
1602 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1603 __GFP_NORETRY | __GFP_NO_KSWAPD;
1604 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1605 void *kbuf;
1606
1607 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1608
1609 while (*size > min_alloc) {
1610 kbuf = kmalloc(*size, flags);
1611 if (kbuf)
1612 return kbuf;
1613
1614 *size >>= 1;
1615 *size = ALIGN(*size, mtd->writesize);
1616 }
1617
1618
1619
1620
1621
1622 return kmalloc(*size, GFP_KERNEL);
1623}
1624EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1625#endif
1626
1627#ifdef CONFIG_PROC_FS
1628
1629
1630
1631
1632static int mtd_proc_show(struct seq_file *m, void *v)
1633{
1634 struct mtd_info *mtd;
1635
1636 seq_puts(m, "dev: size erasesize name\n");
1637 mutex_lock(&mtd_table_mutex);
1638 mtd_for_each_device(mtd) {
1639 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1640 mtd->index, (unsigned long long)mtd->size,
1641 mtd->erasesize, mtd->name);
1642 }
1643 mutex_unlock(&mtd_table_mutex);
1644 return 0;
1645}
1646
1647static int mtd_proc_open(struct inode *inode, struct file *file)
1648{
1649 return single_open(file, mtd_proc_show, NULL);
1650}
1651
1652static const struct file_operations mtd_proc_ops = {
1653 .open = mtd_proc_open,
1654 .read = seq_read,
1655 .llseek = seq_lseek,
1656 .release = single_release,
1657};
1658#endif
1659
1660
1661
1662
1663#ifndef __UBOOT__
1664static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1665{
1666 int ret;
1667
1668 ret = bdi_init(bdi);
1669 if (!ret)
1670 ret = bdi_register(bdi, NULL, "%s", name);
1671
1672 if (ret)
1673 bdi_destroy(bdi);
1674
1675 return ret;
1676}
1677
1678static struct proc_dir_entry *proc_mtd;
1679
1680static int __init init_mtd(void)
1681{
1682 int ret;
1683
1684 ret = class_register(&mtd_class);
1685 if (ret)
1686 goto err_reg;
1687
1688 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
1689 if (ret)
1690 goto err_bdi1;
1691
1692 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1693 if (ret)
1694 goto err_bdi2;
1695
1696 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1697 if (ret)
1698 goto err_bdi3;
1699
1700 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1701
1702 ret = init_mtdchar();
1703 if (ret)
1704 goto out_procfs;
1705
1706 return 0;
1707
1708out_procfs:
1709 if (proc_mtd)
1710 remove_proc_entry("mtd", NULL);
1711err_bdi3:
1712 bdi_destroy(&mtd_bdi_ro_mappable);
1713err_bdi2:
1714 bdi_destroy(&mtd_bdi_unmappable);
1715err_bdi1:
1716 class_unregister(&mtd_class);
1717err_reg:
1718 pr_err("Error registering mtd class or bdi: %d\n", ret);
1719 return ret;
1720}
1721
1722static void __exit cleanup_mtd(void)
1723{
1724 cleanup_mtdchar();
1725 if (proc_mtd)
1726 remove_proc_entry("mtd", NULL);
1727 class_unregister(&mtd_class);
1728 bdi_destroy(&mtd_bdi_unmappable);
1729 bdi_destroy(&mtd_bdi_ro_mappable);
1730 bdi_destroy(&mtd_bdi_rw_mappable);
1731}
1732
1733module_init(init_mtd);
1734module_exit(cleanup_mtd);
1735#endif
1736
1737MODULE_LICENSE("GPL");
1738MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1739MODULE_DESCRIPTION("Core MTD registration and access routines");
1740