1
2
3
4
5
6
7
8
9
10
11#ifndef __UBOOT__
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/ptrace.h>
15#include <linux/seq_file.h>
16#include <linux/string.h>
17#include <linux/timer.h>
18#include <linux/major.h>
19#include <linux/fs.h>
20#include <linux/err.h>
21#include <linux/ioctl.h>
22#include <linux/init.h>
23#include <linux/proc_fs.h>
24#include <linux/idr.h>
25#include <linux/backing-dev.h>
26#include <linux/gfp.h>
27#include <linux/slab.h>
28#else
29#include <linux/err.h>
30#include <ubi_uboot.h>
31#endif
32
33#include <linux/log2.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h>
36
37#include "mtdcore.h"
38
39#ifndef __UBOOT__
40
41
42
43
44static struct backing_dev_info mtd_bdi_unmappable = {
45 .capabilities = BDI_CAP_MAP_COPY,
46};
47
48
49
50
51
52
53static struct backing_dev_info mtd_bdi_ro_mappable = {
54 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
55 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
56};
57
58
59
60
61
62
63static struct backing_dev_info mtd_bdi_rw_mappable = {
64 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
65 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
66 BDI_CAP_WRITE_MAP),
67};
68
69static int mtd_cls_suspend(struct device *dev, pm_message_t state);
70static int mtd_cls_resume(struct device *dev);
71
72static struct class mtd_class = {
73 .name = "mtd",
74 .owner = THIS_MODULE,
75 .suspend = mtd_cls_suspend,
76 .resume = mtd_cls_resume,
77};
78#else
79struct mtd_info *mtd_table[MAX_MTD_DEVICES];
80
81#define MAX_IDR_ID 64
82
83struct idr_layer {
84 int used;
85 void *ptr;
86};
87
88struct idr {
89 struct idr_layer id[MAX_IDR_ID];
90};
91
92#define DEFINE_IDR(name) struct idr name;
93
94void idr_remove(struct idr *idp, int id)
95{
96 if (idp->id[id].used)
97 idp->id[id].used = 0;
98
99 return;
100}
101void *idr_find(struct idr *idp, int id)
102{
103 if (idp->id[id].used)
104 return idp->id[id].ptr;
105
106 return NULL;
107}
108
109void *idr_get_next(struct idr *idp, int *next)
110{
111 void *ret;
112 int id = *next;
113
114 ret = idr_find(idp, id);
115 if (ret) {
116 id ++;
117 if (!idp->id[id].used)
118 id = 0;
119 *next = id;
120 } else {
121 *next = 0;
122 }
123
124 return ret;
125}
126
127int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
128{
129 struct idr_layer *idl;
130 int i = 0;
131
132 while (i < MAX_IDR_ID) {
133 idl = &idp->id[i];
134 if (idl->used == 0) {
135 idl->used = 1;
136 idl->ptr = ptr;
137 return i;
138 }
139 i++;
140 }
141 return -ENOSPC;
142}
143#endif
144
145static DEFINE_IDR(mtd_idr);
146
147
148
149DEFINE_MUTEX(mtd_table_mutex);
150EXPORT_SYMBOL_GPL(mtd_table_mutex);
151
152struct mtd_info *__mtd_next_device(int i)
153{
154 return idr_get_next(&mtd_idr, &i);
155}
156EXPORT_SYMBOL_GPL(__mtd_next_device);
157
158#ifndef __UBOOT__
159static LIST_HEAD(mtd_notifiers);
160
161
162#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
163
164
165
166
167static void mtd_release(struct device *dev)
168{
169 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
170 dev_t index = MTD_DEVT(mtd->index);
171
172
173 if (index)
174 device_destroy(&mtd_class, index + 1);
175}
176
177static int mtd_cls_suspend(struct device *dev, pm_message_t state)
178{
179 struct mtd_info *mtd = dev_get_drvdata(dev);
180
181 return mtd ? mtd_suspend(mtd) : 0;
182}
183
184static int mtd_cls_resume(struct device *dev)
185{
186 struct mtd_info *mtd = dev_get_drvdata(dev);
187
188 if (mtd)
189 mtd_resume(mtd);
190 return 0;
191}
192
193static ssize_t mtd_type_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 struct mtd_info *mtd = dev_get_drvdata(dev);
197 char *type;
198
199 switch (mtd->type) {
200 case MTD_ABSENT:
201 type = "absent";
202 break;
203 case MTD_RAM:
204 type = "ram";
205 break;
206 case MTD_ROM:
207 type = "rom";
208 break;
209 case MTD_NORFLASH:
210 type = "nor";
211 break;
212 case MTD_NANDFLASH:
213 type = "nand";
214 break;
215 case MTD_DATAFLASH:
216 type = "dataflash";
217 break;
218 case MTD_UBIVOLUME:
219 type = "ubi";
220 break;
221 case MTD_MLCNANDFLASH:
222 type = "mlc-nand";
223 break;
224 default:
225 type = "unknown";
226 }
227
228 return snprintf(buf, PAGE_SIZE, "%s\n", type);
229}
230static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
231
232static ssize_t mtd_flags_show(struct device *dev,
233 struct device_attribute *attr, char *buf)
234{
235 struct mtd_info *mtd = dev_get_drvdata(dev);
236
237 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
238
239}
240static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
241
242static ssize_t mtd_size_show(struct device *dev,
243 struct device_attribute *attr, char *buf)
244{
245 struct mtd_info *mtd = dev_get_drvdata(dev);
246
247 return snprintf(buf, PAGE_SIZE, "%llu\n",
248 (unsigned long long)mtd->size);
249
250}
251static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
252
253static ssize_t mtd_erasesize_show(struct device *dev,
254 struct device_attribute *attr, char *buf)
255{
256 struct mtd_info *mtd = dev_get_drvdata(dev);
257
258 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
259
260}
261static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
262
263static ssize_t mtd_writesize_show(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 struct mtd_info *mtd = dev_get_drvdata(dev);
267
268 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
269
270}
271static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
272
273static ssize_t mtd_subpagesize_show(struct device *dev,
274 struct device_attribute *attr, char *buf)
275{
276 struct mtd_info *mtd = dev_get_drvdata(dev);
277 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
278
279 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
280
281}
282static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
283
284static ssize_t mtd_oobsize_show(struct device *dev,
285 struct device_attribute *attr, char *buf)
286{
287 struct mtd_info *mtd = dev_get_drvdata(dev);
288
289 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
290
291}
292static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
293
294static ssize_t mtd_numeraseregions_show(struct device *dev,
295 struct device_attribute *attr, char *buf)
296{
297 struct mtd_info *mtd = dev_get_drvdata(dev);
298
299 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
300
301}
302static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
303 NULL);
304
305static ssize_t mtd_name_show(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct mtd_info *mtd = dev_get_drvdata(dev);
309
310 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
311
312}
313static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
314
315static ssize_t mtd_ecc_strength_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
317{
318 struct mtd_info *mtd = dev_get_drvdata(dev);
319
320 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
321}
322static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
323
324static ssize_t mtd_bitflip_threshold_show(struct device *dev,
325 struct device_attribute *attr,
326 char *buf)
327{
328 struct mtd_info *mtd = dev_get_drvdata(dev);
329
330 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
331}
332
333static ssize_t mtd_bitflip_threshold_store(struct device *dev,
334 struct device_attribute *attr,
335 const char *buf, size_t count)
336{
337 struct mtd_info *mtd = dev_get_drvdata(dev);
338 unsigned int bitflip_threshold;
339 int retval;
340
341 retval = kstrtouint(buf, 0, &bitflip_threshold);
342 if (retval)
343 return retval;
344
345 mtd->bitflip_threshold = bitflip_threshold;
346 return count;
347}
348static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
349 mtd_bitflip_threshold_show,
350 mtd_bitflip_threshold_store);
351
352static ssize_t mtd_ecc_step_size_show(struct device *dev,
353 struct device_attribute *attr, char *buf)
354{
355 struct mtd_info *mtd = dev_get_drvdata(dev);
356
357 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
358
359}
360static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
361
362static struct attribute *mtd_attrs[] = {
363 &dev_attr_type.attr,
364 &dev_attr_flags.attr,
365 &dev_attr_size.attr,
366 &dev_attr_erasesize.attr,
367 &dev_attr_writesize.attr,
368 &dev_attr_subpagesize.attr,
369 &dev_attr_oobsize.attr,
370 &dev_attr_numeraseregions.attr,
371 &dev_attr_name.attr,
372 &dev_attr_ecc_strength.attr,
373 &dev_attr_ecc_step_size.attr,
374 &dev_attr_bitflip_threshold.attr,
375 NULL,
376};
377ATTRIBUTE_GROUPS(mtd);
378
379static struct device_type mtd_devtype = {
380 .name = "mtd",
381 .groups = mtd_groups,
382 .release = mtd_release,
383};
384#endif
385
386
387
388
389
390
391
392
393
394
395
396int add_mtd_device(struct mtd_info *mtd)
397{
398#ifndef __UBOOT__
399 struct mtd_notifier *not;
400#endif
401 int i, error;
402
403#ifndef __UBOOT__
404 if (!mtd->backing_dev_info) {
405 switch (mtd->type) {
406 case MTD_RAM:
407 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
408 break;
409 case MTD_ROM:
410 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
411 break;
412 default:
413 mtd->backing_dev_info = &mtd_bdi_unmappable;
414 break;
415 }
416 }
417#endif
418
419 BUG_ON(mtd->writesize == 0);
420 mutex_lock(&mtd_table_mutex);
421
422 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
423 if (i < 0)
424 goto fail_locked;
425
426 mtd->index = i;
427 mtd->usecount = 0;
428
429 INIT_LIST_HEAD(&mtd->partitions);
430
431
432 if (mtd->bitflip_threshold == 0)
433 mtd->bitflip_threshold = mtd->ecc_strength;
434
435 if (is_power_of_2(mtd->erasesize))
436 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
437 else
438 mtd->erasesize_shift = 0;
439
440 if (is_power_of_2(mtd->writesize))
441 mtd->writesize_shift = ffs(mtd->writesize) - 1;
442 else
443 mtd->writesize_shift = 0;
444
445 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
446 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
447
448
449 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
450 error = mtd_unlock(mtd, 0, mtd->size);
451 if (error && error != -EOPNOTSUPP)
452 printk(KERN_WARNING
453 "%s: unlock failed, writes may not work\n",
454 mtd->name);
455 }
456
457#ifndef __UBOOT__
458
459
460
461 mtd->dev.type = &mtd_devtype;
462 mtd->dev.class = &mtd_class;
463 mtd->dev.devt = MTD_DEVT(i);
464 dev_set_name(&mtd->dev, "mtd%d", i);
465 dev_set_drvdata(&mtd->dev, mtd);
466 if (device_register(&mtd->dev) != 0)
467 goto fail_added;
468
469 if (MTD_DEVT(i))
470 device_create(&mtd_class, mtd->dev.parent,
471 MTD_DEVT(i) + 1,
472 NULL, "mtd%dro", i);
473
474 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
475
476
477 list_for_each_entry(not, &mtd_notifiers, list)
478 not->add(mtd);
479#else
480 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
481#endif
482
483 mutex_unlock(&mtd_table_mutex);
484
485
486
487
488 __module_get(THIS_MODULE);
489 return 0;
490
491#ifndef __UBOOT__
492fail_added:
493 idr_remove(&mtd_idr, i);
494#endif
495fail_locked:
496 mutex_unlock(&mtd_table_mutex);
497 return 1;
498}
499
500
501
502
503
504
505
506
507
508
509
510int del_mtd_device(struct mtd_info *mtd)
511{
512 int ret;
513#ifndef __UBOOT__
514 struct mtd_notifier *not;
515#endif
516
517 mutex_lock(&mtd_table_mutex);
518
519 if (idr_find(&mtd_idr, mtd->index) != mtd) {
520 ret = -ENODEV;
521 goto out_error;
522 }
523
524#ifndef __UBOOT__
525
526
527 list_for_each_entry(not, &mtd_notifiers, list)
528 not->remove(mtd);
529#endif
530
531 if (mtd->usecount) {
532 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
533 mtd->index, mtd->name, mtd->usecount);
534 ret = -EBUSY;
535 } else {
536#ifndef __UBOOT__
537 device_unregister(&mtd->dev);
538#endif
539
540 idr_remove(&mtd_idr, mtd->index);
541
542 module_put(THIS_MODULE);
543 ret = 0;
544 }
545
546out_error:
547 mutex_unlock(&mtd_table_mutex);
548 return ret;
549}
550
551#ifndef __UBOOT__
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
581 struct mtd_part_parser_data *parser_data,
582 const struct mtd_partition *parts,
583 int nr_parts)
584{
585 int err;
586 struct mtd_partition *real_parts;
587
588 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
589 if (err <= 0 && nr_parts && parts) {
590 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
591 GFP_KERNEL);
592 if (!real_parts)
593 err = -ENOMEM;
594 else
595 err = nr_parts;
596 }
597
598 if (err > 0) {
599 err = add_mtd_partitions(mtd, real_parts, err);
600 kfree(real_parts);
601 } else if (err == 0) {
602 err = add_mtd_device(mtd);
603 if (err == 1)
604 err = -ENODEV;
605 }
606
607 return err;
608}
609EXPORT_SYMBOL_GPL(mtd_device_parse_register);
610
611
612
613
614
615
616
617int mtd_device_unregister(struct mtd_info *master)
618{
619 int err;
620
621 err = del_mtd_partitions(master);
622 if (err)
623 return err;
624
625 if (!device_is_registered(&master->dev))
626 return 0;
627
628 return del_mtd_device(master);
629}
630EXPORT_SYMBOL_GPL(mtd_device_unregister);
631
632
633
634
635
636
637
638
639
640void register_mtd_user (struct mtd_notifier *new)
641{
642 struct mtd_info *mtd;
643
644 mutex_lock(&mtd_table_mutex);
645
646 list_add(&new->list, &mtd_notifiers);
647
648 __module_get(THIS_MODULE);
649
650 mtd_for_each_device(mtd)
651 new->add(mtd);
652
653 mutex_unlock(&mtd_table_mutex);
654}
655EXPORT_SYMBOL_GPL(register_mtd_user);
656
657
658
659
660
661
662
663
664
665
666int unregister_mtd_user (struct mtd_notifier *old)
667{
668 struct mtd_info *mtd;
669
670 mutex_lock(&mtd_table_mutex);
671
672 module_put(THIS_MODULE);
673
674 mtd_for_each_device(mtd)
675 old->remove(mtd);
676
677 list_del(&old->list);
678 mutex_unlock(&mtd_table_mutex);
679 return 0;
680}
681EXPORT_SYMBOL_GPL(unregister_mtd_user);
682#endif
683
684
685
686
687
688
689
690
691
692
693
694
695struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
696{
697 struct mtd_info *ret = NULL, *other;
698 int err = -ENODEV;
699
700 mutex_lock(&mtd_table_mutex);
701
702 if (num == -1) {
703 mtd_for_each_device(other) {
704 if (other == mtd) {
705 ret = mtd;
706 break;
707 }
708 }
709 } else if (num >= 0) {
710 ret = idr_find(&mtd_idr, num);
711 if (mtd && mtd != ret)
712 ret = NULL;
713 }
714
715 if (!ret) {
716 ret = ERR_PTR(err);
717 goto out;
718 }
719
720 err = __get_mtd_device(ret);
721 if (err)
722 ret = ERR_PTR(err);
723out:
724 mutex_unlock(&mtd_table_mutex);
725 return ret;
726}
727EXPORT_SYMBOL_GPL(get_mtd_device);
728
729
730int __get_mtd_device(struct mtd_info *mtd)
731{
732 int err;
733
734 if (!try_module_get(mtd->owner))
735 return -ENODEV;
736
737 if (mtd->_get_device) {
738 err = mtd->_get_device(mtd);
739
740 if (err) {
741 module_put(mtd->owner);
742 return err;
743 }
744 }
745 mtd->usecount++;
746 return 0;
747}
748EXPORT_SYMBOL_GPL(__get_mtd_device);
749
750
751
752
753
754
755
756
757
758struct mtd_info *get_mtd_device_nm(const char *name)
759{
760 int err = -ENODEV;
761 struct mtd_info *mtd = NULL, *other;
762
763 mutex_lock(&mtd_table_mutex);
764
765 mtd_for_each_device(other) {
766 if (!strcmp(name, other->name)) {
767 mtd = other;
768 break;
769 }
770 }
771
772 if (!mtd)
773 goto out_unlock;
774
775 err = __get_mtd_device(mtd);
776 if (err)
777 goto out_unlock;
778
779 mutex_unlock(&mtd_table_mutex);
780 return mtd;
781
782out_unlock:
783 mutex_unlock(&mtd_table_mutex);
784 return ERR_PTR(err);
785}
786EXPORT_SYMBOL_GPL(get_mtd_device_nm);
787
788#if defined(CONFIG_CMD_MTDPARTS_SPREAD)
789
790
791
792
793
794
795
796
797
798
799
800void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
801 const uint64_t length, uint64_t *len_incl_bad,
802 int *truncated)
803{
804 *truncated = 0;
805 *len_incl_bad = 0;
806
807 if (!mtd->_block_isbad) {
808 *len_incl_bad = length;
809 return;
810 }
811
812 uint64_t len_excl_bad = 0;
813 uint64_t block_len;
814
815 while (len_excl_bad < length) {
816 if (offset >= mtd->size) {
817 *truncated = 1;
818 return;
819 }
820
821 block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
822
823 if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
824 len_excl_bad += block_len;
825
826 *len_incl_bad += block_len;
827 offset += block_len;
828 }
829}
830#endif
831
832void put_mtd_device(struct mtd_info *mtd)
833{
834 mutex_lock(&mtd_table_mutex);
835 __put_mtd_device(mtd);
836 mutex_unlock(&mtd_table_mutex);
837
838}
839EXPORT_SYMBOL_GPL(put_mtd_device);
840
841void __put_mtd_device(struct mtd_info *mtd)
842{
843 --mtd->usecount;
844 BUG_ON(mtd->usecount < 0);
845
846 if (mtd->_put_device)
847 mtd->_put_device(mtd);
848
849 module_put(mtd->owner);
850}
851EXPORT_SYMBOL_GPL(__put_mtd_device);
852
853
854
855
856
857
858
859
860int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
861{
862 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
863 return -EINVAL;
864 if (!(mtd->flags & MTD_WRITEABLE))
865 return -EROFS;
866 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
867 if (!instr->len) {
868 instr->state = MTD_ERASE_DONE;
869 mtd_erase_callback(instr);
870 return 0;
871 }
872 return mtd->_erase(mtd, instr);
873}
874EXPORT_SYMBOL_GPL(mtd_erase);
875
876#ifndef __UBOOT__
877
878
879
880int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
881 void **virt, resource_size_t *phys)
882{
883 *retlen = 0;
884 *virt = NULL;
885 if (phys)
886 *phys = 0;
887 if (!mtd->_point)
888 return -EOPNOTSUPP;
889 if (from < 0 || from > mtd->size || len > mtd->size - from)
890 return -EINVAL;
891 if (!len)
892 return 0;
893 return mtd->_point(mtd, from, len, retlen, virt, phys);
894}
895EXPORT_SYMBOL_GPL(mtd_point);
896
897
898int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
899{
900 if (!mtd->_point)
901 return -EOPNOTSUPP;
902 if (from < 0 || from > mtd->size || len > mtd->size - from)
903 return -EINVAL;
904 if (!len)
905 return 0;
906 return mtd->_unpoint(mtd, from, len);
907}
908EXPORT_SYMBOL_GPL(mtd_unpoint);
909#endif
910
911
912
913
914
915
916unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
917 unsigned long offset, unsigned long flags)
918{
919 if (!mtd->_get_unmapped_area)
920 return -EOPNOTSUPP;
921 if (offset > mtd->size || len > mtd->size - offset)
922 return -EINVAL;
923 return mtd->_get_unmapped_area(mtd, len, offset, flags);
924}
925EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
926
927int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
928 u_char *buf)
929{
930 int ret_code;
931 *retlen = 0;
932 if (from < 0 || from > mtd->size || len > mtd->size - from)
933 return -EINVAL;
934 if (!len)
935 return 0;
936
937
938
939
940
941
942 if (mtd->_read) {
943 ret_code = mtd->_read(mtd, from, len, retlen, buf);
944 } else if (mtd->_read_oob) {
945 struct mtd_oob_ops ops = {
946 .len = len,
947 .datbuf = buf,
948 };
949
950 ret_code = mtd->_read_oob(mtd, from, &ops);
951 *retlen = ops.retlen;
952 } else {
953 return -ENOTSUPP;
954 }
955
956 if (unlikely(ret_code < 0))
957 return ret_code;
958 if (mtd->ecc_strength == 0)
959 return 0;
960 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
961}
962EXPORT_SYMBOL_GPL(mtd_read);
963
964int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
965 const u_char *buf)
966{
967 *retlen = 0;
968 if (to < 0 || to > mtd->size || len > mtd->size - to)
969 return -EINVAL;
970 if ((!mtd->_write && !mtd->_write_oob) ||
971 !(mtd->flags & MTD_WRITEABLE))
972 return -EROFS;
973 if (!len)
974 return 0;
975
976 if (!mtd->_write) {
977 struct mtd_oob_ops ops = {
978 .len = len,
979 .datbuf = (u8 *)buf,
980 };
981 int ret;
982
983 ret = mtd->_write_oob(mtd, to, &ops);
984 *retlen = ops.retlen;
985 return ret;
986 }
987
988 return mtd->_write(mtd, to, len, retlen, buf);
989}
990EXPORT_SYMBOL_GPL(mtd_write);
991
992
993
994
995
996
997
998
999int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1000 const u_char *buf)
1001{
1002 *retlen = 0;
1003 if (!mtd->_panic_write)
1004 return -EOPNOTSUPP;
1005 if (to < 0 || to > mtd->size || len > mtd->size - to)
1006 return -EINVAL;
1007 if (!(mtd->flags & MTD_WRITEABLE))
1008 return -EROFS;
1009 if (!len)
1010 return 0;
1011 return mtd->_panic_write(mtd, to, len, retlen, buf);
1012}
1013EXPORT_SYMBOL_GPL(mtd_panic_write);
1014
1015static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1016 struct mtd_oob_ops *ops)
1017{
1018
1019
1020
1021
1022
1023 if (!ops->datbuf)
1024 ops->len = 0;
1025
1026 if (!ops->oobbuf)
1027 ops->ooblen = 0;
1028
1029 if (offs < 0 || offs + ops->len > mtd->size)
1030 return -EINVAL;
1031
1032 if (ops->ooblen) {
1033 u64 maxooblen;
1034
1035 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1036 return -EINVAL;
1037
1038 maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
1039 mtd_div_by_ws(offs, mtd)) *
1040 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1041 if (ops->ooblen > maxooblen)
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046}
1047
1048int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1049{
1050 int ret_code;
1051 ops->retlen = ops->oobretlen = 0;
1052
1053 ret_code = mtd_check_oob_ops(mtd, from, ops);
1054 if (ret_code)
1055 return ret_code;
1056
1057
1058 if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
1059 return -EOPNOTSUPP;
1060
1061 if (mtd->_read_oob)
1062 ret_code = mtd->_read_oob(mtd, from, ops);
1063 else
1064 ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
1065 ops->datbuf);
1066
1067
1068
1069
1070
1071
1072
1073 if (unlikely(ret_code < 0))
1074 return ret_code;
1075 if (mtd->ecc_strength == 0)
1076 return 0;
1077 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1078}
1079EXPORT_SYMBOL_GPL(mtd_read_oob);
1080
1081int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1082 struct mtd_oob_ops *ops)
1083{
1084 int ret;
1085
1086 ops->retlen = ops->oobretlen = 0;
1087
1088 if (!(mtd->flags & MTD_WRITEABLE))
1089 return -EROFS;
1090
1091 ret = mtd_check_oob_ops(mtd, to, ops);
1092 if (ret)
1093 return ret;
1094
1095
1096 if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
1097 return -EOPNOTSUPP;
1098
1099 if (mtd->_write_oob)
1100 return mtd->_write_oob(mtd, to, ops);
1101 else
1102 return mtd->_write(mtd, to, ops->len, &ops->retlen,
1103 ops->datbuf);
1104}
1105EXPORT_SYMBOL_GPL(mtd_write_oob);
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1124 struct mtd_oob_region *oobecc)
1125{
1126 memset(oobecc, 0, sizeof(*oobecc));
1127
1128 if (!mtd || section < 0)
1129 return -EINVAL;
1130
1131 if (!mtd->ooblayout || !mtd->ooblayout->ecc)
1132 return -ENOTSUPP;
1133
1134 return mtd->ooblayout->ecc(mtd, section, oobecc);
1135}
1136EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1156 struct mtd_oob_region *oobfree)
1157{
1158 memset(oobfree, 0, sizeof(*oobfree));
1159
1160 if (!mtd || section < 0)
1161 return -EINVAL;
1162
1163 if (!mtd->ooblayout || !mtd->ooblayout->free)
1164 return -ENOTSUPP;
1165
1166 return mtd->ooblayout->free(mtd, section, oobfree);
1167}
1168EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1188 int *sectionp, struct mtd_oob_region *oobregion,
1189 int (*iter)(struct mtd_info *,
1190 int section,
1191 struct mtd_oob_region *oobregion))
1192{
1193 int pos = 0, ret, section = 0;
1194
1195 memset(oobregion, 0, sizeof(*oobregion));
1196
1197 while (1) {
1198 ret = iter(mtd, section, oobregion);
1199 if (ret)
1200 return ret;
1201
1202 if (pos + oobregion->length > byte)
1203 break;
1204
1205 pos += oobregion->length;
1206 section++;
1207 }
1208
1209
1210
1211
1212
1213 oobregion->offset += byte - pos;
1214 oobregion->length -= byte - pos;
1215 *sectionp = section;
1216
1217 return 0;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1234 int *section,
1235 struct mtd_oob_region *oobregion)
1236{
1237 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1238 mtd_ooblayout_ecc);
1239}
1240EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1257 const u8 *oobbuf, int start, int nbytes,
1258 int (*iter)(struct mtd_info *,
1259 int section,
1260 struct mtd_oob_region *oobregion))
1261{
1262 struct mtd_oob_region oobregion;
1263 int section, ret;
1264
1265 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1266 &oobregion, iter);
1267
1268 while (!ret) {
1269 int cnt;
1270
1271 cnt = min_t(int, nbytes, oobregion.length);
1272 memcpy(buf, oobbuf + oobregion.offset, cnt);
1273 buf += cnt;
1274 nbytes -= cnt;
1275
1276 if (!nbytes)
1277 break;
1278
1279 ret = iter(mtd, ++section, &oobregion);
1280 }
1281
1282 return ret;
1283}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1300 u8 *oobbuf, int start, int nbytes,
1301 int (*iter)(struct mtd_info *,
1302 int section,
1303 struct mtd_oob_region *oobregion))
1304{
1305 struct mtd_oob_region oobregion;
1306 int section, ret;
1307
1308 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1309 &oobregion, iter);
1310
1311 while (!ret) {
1312 int cnt;
1313
1314 cnt = min_t(int, nbytes, oobregion.length);
1315 memcpy(oobbuf + oobregion.offset, buf, cnt);
1316 buf += cnt;
1317 nbytes -= cnt;
1318
1319 if (!nbytes)
1320 break;
1321
1322 ret = iter(mtd, ++section, &oobregion);
1323 }
1324
1325 return ret;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1338 int (*iter)(struct mtd_info *,
1339 int section,
1340 struct mtd_oob_region *oobregion))
1341{
1342 struct mtd_oob_region oobregion;
1343 int section = 0, ret, nbytes = 0;
1344
1345 while (1) {
1346 ret = iter(mtd, section++, &oobregion);
1347 if (ret) {
1348 if (ret == -ERANGE)
1349 ret = nbytes;
1350 break;
1351 }
1352
1353 nbytes += oobregion.length;
1354 }
1355
1356 return ret;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1372 const u8 *oobbuf, int start, int nbytes)
1373{
1374 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1375 mtd_ooblayout_ecc);
1376}
1377EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1392 u8 *oobbuf, int start, int nbytes)
1393{
1394 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1395 mtd_ooblayout_ecc);
1396}
1397EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1412 const u8 *oobbuf, int start, int nbytes)
1413{
1414 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1415 mtd_ooblayout_free);
1416}
1417EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1432 u8 *oobbuf, int start, int nbytes)
1433{
1434 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1435 mtd_ooblayout_free);
1436}
1437EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1448{
1449 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1450}
1451EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1462{
1463 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1464}
1465EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1466
1467
1468
1469
1470
1471
1472int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1473 struct otp_info *buf)
1474{
1475 if (!mtd->_get_fact_prot_info)
1476 return -EOPNOTSUPP;
1477 if (!len)
1478 return 0;
1479 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
1480}
1481EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1482
1483int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1484 size_t *retlen, u_char *buf)
1485{
1486 *retlen = 0;
1487 if (!mtd->_read_fact_prot_reg)
1488 return -EOPNOTSUPP;
1489 if (!len)
1490 return 0;
1491 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1492}
1493EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1494
1495int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1496 struct otp_info *buf)
1497{
1498 if (!mtd->_get_user_prot_info)
1499 return -EOPNOTSUPP;
1500 if (!len)
1501 return 0;
1502 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
1503}
1504EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1505
1506int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1507 size_t *retlen, u_char *buf)
1508{
1509 *retlen = 0;
1510 if (!mtd->_read_user_prot_reg)
1511 return -EOPNOTSUPP;
1512 if (!len)
1513 return 0;
1514 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1515}
1516EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1517
1518int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1519 size_t *retlen, u_char *buf)
1520{
1521 int ret;
1522
1523 *retlen = 0;
1524 if (!mtd->_write_user_prot_reg)
1525 return -EOPNOTSUPP;
1526 if (!len)
1527 return 0;
1528 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1529 if (ret)
1530 return ret;
1531
1532
1533
1534
1535
1536 return (*retlen) ? 0 : -ENOSPC;
1537}
1538EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1539
1540int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1541{
1542 if (!mtd->_lock_user_prot_reg)
1543 return -EOPNOTSUPP;
1544 if (!len)
1545 return 0;
1546 return mtd->_lock_user_prot_reg(mtd, from, len);
1547}
1548EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1549
1550
1551int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1552{
1553 if (!mtd->_lock)
1554 return -EOPNOTSUPP;
1555 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1556 return -EINVAL;
1557 if (!len)
1558 return 0;
1559 return mtd->_lock(mtd, ofs, len);
1560}
1561EXPORT_SYMBOL_GPL(mtd_lock);
1562
1563int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1564{
1565 if (!mtd->_unlock)
1566 return -EOPNOTSUPP;
1567 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1568 return -EINVAL;
1569 if (!len)
1570 return 0;
1571 return mtd->_unlock(mtd, ofs, len);
1572}
1573EXPORT_SYMBOL_GPL(mtd_unlock);
1574
1575int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1576{
1577 if (!mtd->_is_locked)
1578 return -EOPNOTSUPP;
1579 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1580 return -EINVAL;
1581 if (!len)
1582 return 0;
1583 return mtd->_is_locked(mtd, ofs, len);
1584}
1585EXPORT_SYMBOL_GPL(mtd_is_locked);
1586
1587int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1588{
1589 if (ofs < 0 || ofs > mtd->size)
1590 return -EINVAL;
1591 if (!mtd->_block_isreserved)
1592 return 0;
1593 return mtd->_block_isreserved(mtd, ofs);
1594}
1595EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1596
1597int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1598{
1599 if (ofs < 0 || ofs > mtd->size)
1600 return -EINVAL;
1601 if (!mtd->_block_isbad)
1602 return 0;
1603 return mtd->_block_isbad(mtd, ofs);
1604}
1605EXPORT_SYMBOL_GPL(mtd_block_isbad);
1606
1607int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1608{
1609 if (!mtd->_block_markbad)
1610 return -EOPNOTSUPP;
1611 if (ofs < 0 || ofs > mtd->size)
1612 return -EINVAL;
1613 if (!(mtd->flags & MTD_WRITEABLE))
1614 return -EROFS;
1615 return mtd->_block_markbad(mtd, ofs);
1616}
1617EXPORT_SYMBOL_GPL(mtd_block_markbad);
1618
1619#ifndef __UBOOT__
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1632 unsigned long count, loff_t to, size_t *retlen)
1633{
1634 unsigned long i;
1635 size_t totlen = 0, thislen;
1636 int ret = 0;
1637
1638 for (i = 0; i < count; i++) {
1639 if (!vecs[i].iov_len)
1640 continue;
1641 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1642 vecs[i].iov_base);
1643 totlen += thislen;
1644 if (ret || thislen != vecs[i].iov_len)
1645 break;
1646 to += vecs[i].iov_len;
1647 }
1648 *retlen = totlen;
1649 return ret;
1650}
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1664 unsigned long count, loff_t to, size_t *retlen)
1665{
1666 *retlen = 0;
1667 if (!(mtd->flags & MTD_WRITEABLE))
1668 return -EROFS;
1669 if (!mtd->_writev)
1670 return default_mtd_writev(mtd, vecs, count, to, retlen);
1671 return mtd->_writev(mtd, vecs, count, to, retlen);
1672}
1673EXPORT_SYMBOL_GPL(mtd_writev);
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1700{
1701 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1702 __GFP_NORETRY | __GFP_NO_KSWAPD;
1703 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1704 void *kbuf;
1705
1706 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1707
1708 while (*size > min_alloc) {
1709 kbuf = kmalloc(*size, flags);
1710 if (kbuf)
1711 return kbuf;
1712
1713 *size >>= 1;
1714 *size = ALIGN(*size, mtd->writesize);
1715 }
1716
1717
1718
1719
1720
1721 return kmalloc(*size, GFP_KERNEL);
1722}
1723EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1724#endif
1725
1726#ifdef CONFIG_PROC_FS
1727
1728
1729
1730
1731static int mtd_proc_show(struct seq_file *m, void *v)
1732{
1733 struct mtd_info *mtd;
1734
1735 seq_puts(m, "dev: size erasesize name\n");
1736 mutex_lock(&mtd_table_mutex);
1737 mtd_for_each_device(mtd) {
1738 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1739 mtd->index, (unsigned long long)mtd->size,
1740 mtd->erasesize, mtd->name);
1741 }
1742 mutex_unlock(&mtd_table_mutex);
1743 return 0;
1744}
1745
1746static int mtd_proc_open(struct inode *inode, struct file *file)
1747{
1748 return single_open(file, mtd_proc_show, NULL);
1749}
1750
1751static const struct file_operations mtd_proc_ops = {
1752 .open = mtd_proc_open,
1753 .read = seq_read,
1754 .llseek = seq_lseek,
1755 .release = single_release,
1756};
1757#endif
1758
1759
1760
1761
1762#ifndef __UBOOT__
1763static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1764{
1765 int ret;
1766
1767 ret = bdi_init(bdi);
1768 if (!ret)
1769 ret = bdi_register(bdi, NULL, "%s", name);
1770
1771 if (ret)
1772 bdi_destroy(bdi);
1773
1774 return ret;
1775}
1776
1777static struct proc_dir_entry *proc_mtd;
1778
1779static int __init init_mtd(void)
1780{
1781 int ret;
1782
1783 ret = class_register(&mtd_class);
1784 if (ret)
1785 goto err_reg;
1786
1787 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
1788 if (ret)
1789 goto err_bdi1;
1790
1791 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1792 if (ret)
1793 goto err_bdi2;
1794
1795 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1796 if (ret)
1797 goto err_bdi3;
1798
1799 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1800
1801 ret = init_mtdchar();
1802 if (ret)
1803 goto out_procfs;
1804
1805 return 0;
1806
1807out_procfs:
1808 if (proc_mtd)
1809 remove_proc_entry("mtd", NULL);
1810err_bdi3:
1811 bdi_destroy(&mtd_bdi_ro_mappable);
1812err_bdi2:
1813 bdi_destroy(&mtd_bdi_unmappable);
1814err_bdi1:
1815 class_unregister(&mtd_class);
1816err_reg:
1817 pr_err("Error registering mtd class or bdi: %d\n", ret);
1818 return ret;
1819}
1820
1821static void __exit cleanup_mtd(void)
1822{
1823 cleanup_mtdchar();
1824 if (proc_mtd)
1825 remove_proc_entry("mtd", NULL);
1826 class_unregister(&mtd_class);
1827 bdi_destroy(&mtd_bdi_unmappable);
1828 bdi_destroy(&mtd_bdi_ro_mappable);
1829 bdi_destroy(&mtd_bdi_rw_mappable);
1830}
1831
1832module_init(init_mtd);
1833module_exit(cleanup_mtd);
1834#endif
1835
1836MODULE_LICENSE("GPL");
1837MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1838MODULE_DESCRIPTION("Core MTD registration and access routines");
1839