1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/seq_file.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
18#include <linux/err.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/of.h>
22#include <linux/proc_fs.h>
23#include <linux/idr.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/reboot.h>
28#include <linux/leds.h>
29#include <linux/debugfs.h>
30#include <linux/nvmem-provider.h>
31
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/partitions.h>
34
35#include "mtdcore.h"
36
37struct backing_dev_info *mtd_bdi;
38
39#ifdef CONFIG_PM_SLEEP
40
41static int mtd_cls_suspend(struct device *dev)
42{
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46}
47
48static int mtd_cls_resume(struct device *dev)
49{
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55}
56
57static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59#else
60#define MTD_CLS_PM_OPS NULL
61#endif
62
63static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
66 .pm = MTD_CLS_PM_OPS,
67};
68
69static DEFINE_IDR(mtd_idr);
70
71
72
73DEFINE_MUTEX(mtd_table_mutex);
74EXPORT_SYMBOL_GPL(mtd_table_mutex);
75
76struct mtd_info *__mtd_next_device(int i)
77{
78 return idr_get_next(&mtd_idr, &i);
79}
80EXPORT_SYMBOL_GPL(__mtd_next_device);
81
82static LIST_HEAD(mtd_notifiers);
83
84
85#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86
87
88
89
90static void mtd_release(struct device *dev)
91{
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
94
95
96 device_destroy(&mtd_class, index + 1);
97}
98
99static ssize_t mtd_type_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101{
102 struct mtd_info *mtd = dev_get_drvdata(dev);
103 char *type;
104
105 switch (mtd->type) {
106 case MTD_ABSENT:
107 type = "absent";
108 break;
109 case MTD_RAM:
110 type = "ram";
111 break;
112 case MTD_ROM:
113 type = "rom";
114 break;
115 case MTD_NORFLASH:
116 type = "nor";
117 break;
118 case MTD_NANDFLASH:
119 type = "nand";
120 break;
121 case MTD_DATAFLASH:
122 type = "dataflash";
123 break;
124 case MTD_UBIVOLUME:
125 type = "ubi";
126 break;
127 case MTD_MLCNANDFLASH:
128 type = "mlc-nand";
129 break;
130 default:
131 type = "unknown";
132 }
133
134 return snprintf(buf, PAGE_SIZE, "%s\n", type);
135}
136static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
137
138static ssize_t mtd_flags_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
140{
141 struct mtd_info *mtd = dev_get_drvdata(dev);
142
143 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
144}
145static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
146
147static ssize_t mtd_size_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149{
150 struct mtd_info *mtd = dev_get_drvdata(dev);
151
152 return snprintf(buf, PAGE_SIZE, "%llu\n",
153 (unsigned long long)mtd->size);
154}
155static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
156
157static ssize_t mtd_erasesize_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
159{
160 struct mtd_info *mtd = dev_get_drvdata(dev);
161
162 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
163}
164static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
165
166static ssize_t mtd_writesize_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
168{
169 struct mtd_info *mtd = dev_get_drvdata(dev);
170
171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
172}
173static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
174
175static ssize_t mtd_subpagesize_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
178 struct mtd_info *mtd = dev_get_drvdata(dev);
179 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
180
181 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
182}
183static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
184
185static ssize_t mtd_oobsize_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187{
188 struct mtd_info *mtd = dev_get_drvdata(dev);
189
190 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
191}
192static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
193
194static ssize_t mtd_oobavail_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196{
197 struct mtd_info *mtd = dev_get_drvdata(dev);
198
199 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
200}
201static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
202
203static ssize_t mtd_numeraseregions_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 struct mtd_info *mtd = dev_get_drvdata(dev);
207
208 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
209}
210static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
211 NULL);
212
213static ssize_t mtd_name_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
216 struct mtd_info *mtd = dev_get_drvdata(dev);
217
218 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
219}
220static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
221
222static ssize_t mtd_ecc_strength_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224{
225 struct mtd_info *mtd = dev_get_drvdata(dev);
226
227 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
228}
229static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
230
231static ssize_t mtd_bitflip_threshold_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234{
235 struct mtd_info *mtd = dev_get_drvdata(dev);
236
237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
238}
239
240static ssize_t mtd_bitflip_threshold_store(struct device *dev,
241 struct device_attribute *attr,
242 const char *buf, size_t count)
243{
244 struct mtd_info *mtd = dev_get_drvdata(dev);
245 unsigned int bitflip_threshold;
246 int retval;
247
248 retval = kstrtouint(buf, 0, &bitflip_threshold);
249 if (retval)
250 return retval;
251
252 mtd->bitflip_threshold = bitflip_threshold;
253 return count;
254}
255static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
256 mtd_bitflip_threshold_show,
257 mtd_bitflip_threshold_store);
258
259static ssize_t mtd_ecc_step_size_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
261{
262 struct mtd_info *mtd = dev_get_drvdata(dev);
263
264 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
265
266}
267static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
268
269static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271{
272 struct mtd_info *mtd = dev_get_drvdata(dev);
273 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
274
275 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
276}
277static DEVICE_ATTR(corrected_bits, S_IRUGO,
278 mtd_ecc_stats_corrected_show, NULL);
279
280static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282{
283 struct mtd_info *mtd = dev_get_drvdata(dev);
284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
285
286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
287}
288static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
289
290static ssize_t mtd_badblocks_show(struct device *dev,
291 struct device_attribute *attr, char *buf)
292{
293 struct mtd_info *mtd = dev_get_drvdata(dev);
294 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
295
296 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
297}
298static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
299
300static ssize_t mtd_bbtblocks_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302{
303 struct mtd_info *mtd = dev_get_drvdata(dev);
304 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
305
306 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
307}
308static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
309
310static struct attribute *mtd_attrs[] = {
311 &dev_attr_type.attr,
312 &dev_attr_flags.attr,
313 &dev_attr_size.attr,
314 &dev_attr_erasesize.attr,
315 &dev_attr_writesize.attr,
316 &dev_attr_subpagesize.attr,
317 &dev_attr_oobsize.attr,
318 &dev_attr_oobavail.attr,
319 &dev_attr_numeraseregions.attr,
320 &dev_attr_name.attr,
321 &dev_attr_ecc_strength.attr,
322 &dev_attr_ecc_step_size.attr,
323 &dev_attr_corrected_bits.attr,
324 &dev_attr_ecc_failures.attr,
325 &dev_attr_bad_blocks.attr,
326 &dev_attr_bbt_blocks.attr,
327 &dev_attr_bitflip_threshold.attr,
328 NULL,
329};
330ATTRIBUTE_GROUPS(mtd);
331
332static const struct device_type mtd_devtype = {
333 .name = "mtd",
334 .groups = mtd_groups,
335 .release = mtd_release,
336};
337
338static int mtd_partid_debug_show(struct seq_file *s, void *p)
339{
340 struct mtd_info *mtd = s->private;
341
342 seq_printf(s, "%s\n", mtd->dbg.partid);
343
344 return 0;
345}
346
347DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
348
349static int mtd_partname_debug_show(struct seq_file *s, void *p)
350{
351 struct mtd_info *mtd = s->private;
352
353 seq_printf(s, "%s\n", mtd->dbg.partname);
354
355 return 0;
356}
357
358DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
359
360static struct dentry *dfs_dir_mtd;
361
362static void mtd_debugfs_populate(struct mtd_info *mtd)
363{
364 struct device *dev = &mtd->dev;
365 struct dentry *root;
366
367 if (IS_ERR_OR_NULL(dfs_dir_mtd))
368 return;
369
370 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
371 mtd->dbg.dfs_dir = root;
372
373 if (mtd->dbg.partid)
374 debugfs_create_file("partid", 0400, root, mtd,
375 &mtd_partid_debug_fops);
376
377 if (mtd->dbg.partname)
378 debugfs_create_file("partname", 0400, root, mtd,
379 &mtd_partname_debug_fops);
380}
381
382#ifndef CONFIG_MMU
383unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
384{
385 switch (mtd->type) {
386 case MTD_RAM:
387 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
388 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
389 case MTD_ROM:
390 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
391 NOMMU_MAP_READ;
392 default:
393 return NOMMU_MAP_COPY;
394 }
395}
396EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
397#endif
398
399static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
400 void *cmd)
401{
402 struct mtd_info *mtd;
403
404 mtd = container_of(n, struct mtd_info, reboot_notifier);
405 mtd->_reboot(mtd);
406
407 return NOTIFY_DONE;
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
437 struct mtd_pairing_info *info)
438{
439 struct mtd_info *master = mtd_get_master(mtd);
440 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
441
442 if (wunit < 0 || wunit >= npairs)
443 return -EINVAL;
444
445 if (master->pairing && master->pairing->get_info)
446 return master->pairing->get_info(master, wunit, info);
447
448 info->group = 0;
449 info->pair = wunit;
450
451 return 0;
452}
453EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
480 const struct mtd_pairing_info *info)
481{
482 struct mtd_info *master = mtd_get_master(mtd);
483 int ngroups = mtd_pairing_groups(master);
484 int npairs = mtd_wunit_per_eb(master) / ngroups;
485
486 if (!info || info->pair < 0 || info->pair >= npairs ||
487 info->group < 0 || info->group >= ngroups)
488 return -EINVAL;
489
490 if (master->pairing && master->pairing->get_wunit)
491 return mtd->pairing->get_wunit(master, info);
492
493 return info->pair;
494}
495EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
496
497
498
499
500
501
502
503
504
505
506
507int mtd_pairing_groups(struct mtd_info *mtd)
508{
509 struct mtd_info *master = mtd_get_master(mtd);
510
511 if (!master->pairing || !master->pairing->ngroups)
512 return 1;
513
514 return master->pairing->ngroups;
515}
516EXPORT_SYMBOL_GPL(mtd_pairing_groups);
517
518static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
519 void *val, size_t bytes)
520{
521 struct mtd_info *mtd = priv;
522 size_t retlen;
523 int err;
524
525 err = mtd_read(mtd, offset, bytes, &retlen, val);
526 if (err && err != -EUCLEAN)
527 return err;
528
529 return retlen == bytes ? 0 : -EIO;
530}
531
532static int mtd_nvmem_add(struct mtd_info *mtd)
533{
534 struct nvmem_config config = {};
535
536 config.id = -1;
537 config.dev = &mtd->dev;
538 config.name = dev_name(&mtd->dev);
539 config.owner = THIS_MODULE;
540 config.reg_read = mtd_nvmem_reg_read;
541 config.size = mtd->size;
542 config.word_size = 1;
543 config.stride = 1;
544 config.read_only = true;
545 config.root_only = true;
546 config.no_of_node = true;
547 config.priv = mtd;
548
549 mtd->nvmem = nvmem_register(&config);
550 if (IS_ERR(mtd->nvmem)) {
551
552 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
553 mtd->nvmem = NULL;
554 } else {
555 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
556 return PTR_ERR(mtd->nvmem);
557 }
558 }
559
560 return 0;
561}
562
563
564
565
566
567
568
569
570
571
572int add_mtd_device(struct mtd_info *mtd)
573{
574 struct mtd_info *master = mtd_get_master(mtd);
575 struct mtd_notifier *not;
576 int i, error;
577
578
579
580
581
582
583 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
584 return -EEXIST;
585
586 BUG_ON(mtd->writesize == 0);
587
588
589
590
591
592 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
593 (mtd->_read && mtd->_read_oob)))
594 return -EINVAL;
595
596 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
597 !(mtd->flags & MTD_NO_ERASE)))
598 return -EINVAL;
599
600
601
602
603
604
605
606
607
608 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
609 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
610 !master->pairing || master->_writev))
611 return -EINVAL;
612
613 mutex_lock(&mtd_table_mutex);
614
615 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
616 if (i < 0) {
617 error = i;
618 goto fail_locked;
619 }
620
621 mtd->index = i;
622 mtd->usecount = 0;
623
624
625 if (mtd->bitflip_threshold == 0)
626 mtd->bitflip_threshold = mtd->ecc_strength;
627
628 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
629 int ngroups = mtd_pairing_groups(master);
630
631 mtd->erasesize /= ngroups;
632 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
633 mtd->erasesize;
634 }
635
636 if (is_power_of_2(mtd->erasesize))
637 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
638 else
639 mtd->erasesize_shift = 0;
640
641 if (is_power_of_2(mtd->writesize))
642 mtd->writesize_shift = ffs(mtd->writesize) - 1;
643 else
644 mtd->writesize_shift = 0;
645
646 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
647 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
648
649
650 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
651 error = mtd_unlock(mtd, 0, mtd->size);
652 if (error && error != -EOPNOTSUPP)
653 printk(KERN_WARNING
654 "%s: unlock failed, writes may not work\n",
655 mtd->name);
656
657 error = 0;
658 }
659
660
661
662
663 mtd->dev.type = &mtd_devtype;
664 mtd->dev.class = &mtd_class;
665 mtd->dev.devt = MTD_DEVT(i);
666 dev_set_name(&mtd->dev, "mtd%d", i);
667 dev_set_drvdata(&mtd->dev, mtd);
668 of_node_get(mtd_get_of_node(mtd));
669 error = device_register(&mtd->dev);
670 if (error)
671 goto fail_added;
672
673
674 error = mtd_nvmem_add(mtd);
675 if (error)
676 goto fail_nvmem_add;
677
678 mtd_debugfs_populate(mtd);
679
680 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
681 "mtd%dro", i);
682
683 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
684
685
686 list_for_each_entry(not, &mtd_notifiers, list)
687 not->add(mtd);
688
689 mutex_unlock(&mtd_table_mutex);
690
691
692
693
694 __module_get(THIS_MODULE);
695 return 0;
696
697fail_nvmem_add:
698 device_unregister(&mtd->dev);
699fail_added:
700 of_node_put(mtd_get_of_node(mtd));
701 idr_remove(&mtd_idr, i);
702fail_locked:
703 mutex_unlock(&mtd_table_mutex);
704 return error;
705}
706
707
708
709
710
711
712
713
714
715
716
717int del_mtd_device(struct mtd_info *mtd)
718{
719 int ret;
720 struct mtd_notifier *not;
721
722 mutex_lock(&mtd_table_mutex);
723
724 debugfs_remove_recursive(mtd->dbg.dfs_dir);
725
726 if (idr_find(&mtd_idr, mtd->index) != mtd) {
727 ret = -ENODEV;
728 goto out_error;
729 }
730
731
732
733 list_for_each_entry(not, &mtd_notifiers, list)
734 not->remove(mtd);
735
736 if (mtd->usecount) {
737 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
738 mtd->index, mtd->name, mtd->usecount);
739 ret = -EBUSY;
740 } else {
741
742 if (mtd->nvmem)
743 nvmem_unregister(mtd->nvmem);
744
745 device_unregister(&mtd->dev);
746
747 idr_remove(&mtd_idr, mtd->index);
748 of_node_put(mtd_get_of_node(mtd));
749
750 module_put(THIS_MODULE);
751 ret = 0;
752 }
753
754out_error:
755 mutex_unlock(&mtd_table_mutex);
756 return ret;
757}
758
759
760
761
762
763static void mtd_set_dev_defaults(struct mtd_info *mtd)
764{
765 if (mtd->dev.parent) {
766 if (!mtd->owner && mtd->dev.parent->driver)
767 mtd->owner = mtd->dev.parent->driver->owner;
768 if (!mtd->name)
769 mtd->name = dev_name(mtd->dev.parent);
770 } else {
771 pr_debug("mtd device won't show a device symlink in sysfs\n");
772 }
773
774 INIT_LIST_HEAD(&mtd->partitions);
775 mutex_init(&mtd->master.partitions_lock);
776}
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
807 struct mtd_part_parser_data *parser_data,
808 const struct mtd_partition *parts,
809 int nr_parts)
810{
811 int ret;
812
813 mtd_set_dev_defaults(mtd);
814
815 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
816 ret = add_mtd_device(mtd);
817 if (ret)
818 return ret;
819 }
820
821
822 ret = parse_mtd_partitions(mtd, types, parser_data);
823 if (ret > 0)
824 ret = 0;
825 else if (nr_parts)
826 ret = add_mtd_partitions(mtd, parts, nr_parts);
827 else if (!device_is_registered(&mtd->dev))
828 ret = add_mtd_device(mtd);
829 else
830 ret = 0;
831
832 if (ret)
833 goto out;
834
835
836
837
838
839
840
841
842
843 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
844 "MTD already registered\n");
845 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
846 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
847 register_reboot_notifier(&mtd->reboot_notifier);
848 }
849
850out:
851 if (ret && device_is_registered(&mtd->dev))
852 del_mtd_device(mtd);
853
854 return ret;
855}
856EXPORT_SYMBOL_GPL(mtd_device_parse_register);
857
858
859
860
861
862
863
864int mtd_device_unregister(struct mtd_info *master)
865{
866 int err;
867
868 if (master->_reboot)
869 unregister_reboot_notifier(&master->reboot_notifier);
870
871 err = del_mtd_partitions(master);
872 if (err)
873 return err;
874
875 if (!device_is_registered(&master->dev))
876 return 0;
877
878 return del_mtd_device(master);
879}
880EXPORT_SYMBOL_GPL(mtd_device_unregister);
881
882
883
884
885
886
887
888
889
890void register_mtd_user (struct mtd_notifier *new)
891{
892 struct mtd_info *mtd;
893
894 mutex_lock(&mtd_table_mutex);
895
896 list_add(&new->list, &mtd_notifiers);
897
898 __module_get(THIS_MODULE);
899
900 mtd_for_each_device(mtd)
901 new->add(mtd);
902
903 mutex_unlock(&mtd_table_mutex);
904}
905EXPORT_SYMBOL_GPL(register_mtd_user);
906
907
908
909
910
911
912
913
914
915
916int unregister_mtd_user (struct mtd_notifier *old)
917{
918 struct mtd_info *mtd;
919
920 mutex_lock(&mtd_table_mutex);
921
922 module_put(THIS_MODULE);
923
924 mtd_for_each_device(mtd)
925 old->remove(mtd);
926
927 list_del(&old->list);
928 mutex_unlock(&mtd_table_mutex);
929 return 0;
930}
931EXPORT_SYMBOL_GPL(unregister_mtd_user);
932
933
934
935
936
937
938
939
940
941
942
943
944struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
945{
946 struct mtd_info *ret = NULL, *other;
947 int err = -ENODEV;
948
949 mutex_lock(&mtd_table_mutex);
950
951 if (num == -1) {
952 mtd_for_each_device(other) {
953 if (other == mtd) {
954 ret = mtd;
955 break;
956 }
957 }
958 } else if (num >= 0) {
959 ret = idr_find(&mtd_idr, num);
960 if (mtd && mtd != ret)
961 ret = NULL;
962 }
963
964 if (!ret) {
965 ret = ERR_PTR(err);
966 goto out;
967 }
968
969 err = __get_mtd_device(ret);
970 if (err)
971 ret = ERR_PTR(err);
972out:
973 mutex_unlock(&mtd_table_mutex);
974 return ret;
975}
976EXPORT_SYMBOL_GPL(get_mtd_device);
977
978
979int __get_mtd_device(struct mtd_info *mtd)
980{
981 struct mtd_info *master = mtd_get_master(mtd);
982 int err;
983
984 if (!try_module_get(master->owner))
985 return -ENODEV;
986
987 if (master->_get_device) {
988 err = master->_get_device(mtd);
989
990 if (err) {
991 module_put(master->owner);
992 return err;
993 }
994 }
995
996 while (mtd->parent) {
997 mtd->usecount++;
998 mtd = mtd->parent;
999 }
1000
1001 return 0;
1002}
1003EXPORT_SYMBOL_GPL(__get_mtd_device);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013struct mtd_info *get_mtd_device_nm(const char *name)
1014{
1015 int err = -ENODEV;
1016 struct mtd_info *mtd = NULL, *other;
1017
1018 mutex_lock(&mtd_table_mutex);
1019
1020 mtd_for_each_device(other) {
1021 if (!strcmp(name, other->name)) {
1022 mtd = other;
1023 break;
1024 }
1025 }
1026
1027 if (!mtd)
1028 goto out_unlock;
1029
1030 err = __get_mtd_device(mtd);
1031 if (err)
1032 goto out_unlock;
1033
1034 mutex_unlock(&mtd_table_mutex);
1035 return mtd;
1036
1037out_unlock:
1038 mutex_unlock(&mtd_table_mutex);
1039 return ERR_PTR(err);
1040}
1041EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1042
1043void put_mtd_device(struct mtd_info *mtd)
1044{
1045 mutex_lock(&mtd_table_mutex);
1046 __put_mtd_device(mtd);
1047 mutex_unlock(&mtd_table_mutex);
1048
1049}
1050EXPORT_SYMBOL_GPL(put_mtd_device);
1051
1052void __put_mtd_device(struct mtd_info *mtd)
1053{
1054 struct mtd_info *master = mtd_get_master(mtd);
1055
1056 while (mtd->parent) {
1057 --mtd->usecount;
1058 BUG_ON(mtd->usecount < 0);
1059 mtd = mtd->parent;
1060 }
1061
1062 if (master->_put_device)
1063 master->_put_device(master);
1064
1065 module_put(master->owner);
1066}
1067EXPORT_SYMBOL_GPL(__put_mtd_device);
1068
1069
1070
1071
1072
1073
1074int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1075{
1076 struct mtd_info *master = mtd_get_master(mtd);
1077 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1078 struct erase_info adjinstr;
1079 int ret;
1080
1081 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1082 adjinstr = *instr;
1083
1084 if (!mtd->erasesize || !master->_erase)
1085 return -ENOTSUPP;
1086
1087 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1088 return -EINVAL;
1089 if (!(mtd->flags & MTD_WRITEABLE))
1090 return -EROFS;
1091
1092 if (!instr->len)
1093 return 0;
1094
1095 ledtrig_mtd_activity();
1096
1097 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1098 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1099 master->erasesize;
1100 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1101 master->erasesize) -
1102 adjinstr.addr;
1103 }
1104
1105 adjinstr.addr += mst_ofs;
1106
1107 ret = master->_erase(master, &adjinstr);
1108
1109 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1110 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1111 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1112 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1113 master);
1114 instr->fail_addr *= mtd->erasesize;
1115 }
1116 }
1117
1118 return ret;
1119}
1120EXPORT_SYMBOL_GPL(mtd_erase);
1121
1122
1123
1124
1125int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1126 void **virt, resource_size_t *phys)
1127{
1128 struct mtd_info *master = mtd_get_master(mtd);
1129
1130 *retlen = 0;
1131 *virt = NULL;
1132 if (phys)
1133 *phys = 0;
1134 if (!master->_point)
1135 return -EOPNOTSUPP;
1136 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1137 return -EINVAL;
1138 if (!len)
1139 return 0;
1140
1141 from = mtd_get_master_ofs(mtd, from);
1142 return master->_point(master, from, len, retlen, virt, phys);
1143}
1144EXPORT_SYMBOL_GPL(mtd_point);
1145
1146
1147int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1148{
1149 struct mtd_info *master = mtd_get_master(mtd);
1150
1151 if (!master->_unpoint)
1152 return -EOPNOTSUPP;
1153 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1154 return -EINVAL;
1155 if (!len)
1156 return 0;
1157 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1158}
1159EXPORT_SYMBOL_GPL(mtd_unpoint);
1160
1161
1162
1163
1164
1165
1166unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1167 unsigned long offset, unsigned long flags)
1168{
1169 size_t retlen;
1170 void *virt;
1171 int ret;
1172
1173 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1174 if (ret)
1175 return ret;
1176 if (retlen != len) {
1177 mtd_unpoint(mtd, offset, retlen);
1178 return -ENOSYS;
1179 }
1180 return (unsigned long)virt;
1181}
1182EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1183
1184static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1185 const struct mtd_ecc_stats *old_stats)
1186{
1187 struct mtd_ecc_stats diff;
1188
1189 if (master == mtd)
1190 return;
1191
1192 diff = master->ecc_stats;
1193 diff.failed -= old_stats->failed;
1194 diff.corrected -= old_stats->corrected;
1195
1196 while (mtd->parent) {
1197 mtd->ecc_stats.failed += diff.failed;
1198 mtd->ecc_stats.corrected += diff.corrected;
1199 mtd = mtd->parent;
1200 }
1201}
1202
1203int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1204 u_char *buf)
1205{
1206 struct mtd_oob_ops ops = {
1207 .len = len,
1208 .datbuf = buf,
1209 };
1210 int ret;
1211
1212 ret = mtd_read_oob(mtd, from, &ops);
1213 *retlen = ops.retlen;
1214
1215 return ret;
1216}
1217EXPORT_SYMBOL_GPL(mtd_read);
1218
1219int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1220 const u_char *buf)
1221{
1222 struct mtd_oob_ops ops = {
1223 .len = len,
1224 .datbuf = (u8 *)buf,
1225 };
1226 int ret;
1227
1228 ret = mtd_write_oob(mtd, to, &ops);
1229 *retlen = ops.retlen;
1230
1231 return ret;
1232}
1233EXPORT_SYMBOL_GPL(mtd_write);
1234
1235
1236
1237
1238
1239
1240
1241
1242int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1243 const u_char *buf)
1244{
1245 struct mtd_info *master = mtd_get_master(mtd);
1246
1247 *retlen = 0;
1248 if (!master->_panic_write)
1249 return -EOPNOTSUPP;
1250 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1251 return -EINVAL;
1252 if (!(mtd->flags & MTD_WRITEABLE))
1253 return -EROFS;
1254 if (!len)
1255 return 0;
1256 if (!master->oops_panic_write)
1257 master->oops_panic_write = true;
1258
1259 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1260 retlen, buf);
1261}
1262EXPORT_SYMBOL_GPL(mtd_panic_write);
1263
1264static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1265 struct mtd_oob_ops *ops)
1266{
1267
1268
1269
1270
1271
1272 if (!ops->datbuf)
1273 ops->len = 0;
1274
1275 if (!ops->oobbuf)
1276 ops->ooblen = 0;
1277
1278 if (offs < 0 || offs + ops->len > mtd->size)
1279 return -EINVAL;
1280
1281 if (ops->ooblen) {
1282 size_t maxooblen;
1283
1284 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1285 return -EINVAL;
1286
1287 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1288 mtd_div_by_ws(offs, mtd)) *
1289 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1290 if (ops->ooblen > maxooblen)
1291 return -EINVAL;
1292 }
1293
1294 return 0;
1295}
1296
1297static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1298 struct mtd_oob_ops *ops)
1299{
1300 struct mtd_info *master = mtd_get_master(mtd);
1301 int ret;
1302
1303 from = mtd_get_master_ofs(mtd, from);
1304 if (master->_read_oob)
1305 ret = master->_read_oob(master, from, ops);
1306 else
1307 ret = master->_read(master, from, ops->len, &ops->retlen,
1308 ops->datbuf);
1309
1310 return ret;
1311}
1312
1313static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1314 struct mtd_oob_ops *ops)
1315{
1316 struct mtd_info *master = mtd_get_master(mtd);
1317 int ret;
1318
1319 to = mtd_get_master_ofs(mtd, to);
1320 if (master->_write_oob)
1321 ret = master->_write_oob(master, to, ops);
1322 else
1323 ret = master->_write(master, to, ops->len, &ops->retlen,
1324 ops->datbuf);
1325
1326 return ret;
1327}
1328
1329static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1330 struct mtd_oob_ops *ops)
1331{
1332 struct mtd_info *master = mtd_get_master(mtd);
1333 int ngroups = mtd_pairing_groups(master);
1334 int npairs = mtd_wunit_per_eb(master) / ngroups;
1335 struct mtd_oob_ops adjops = *ops;
1336 unsigned int wunit, oobavail;
1337 struct mtd_pairing_info info;
1338 int max_bitflips = 0;
1339 u32 ebofs, pageofs;
1340 loff_t base, pos;
1341
1342 ebofs = mtd_mod_by_eb(start, mtd);
1343 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1344 info.group = 0;
1345 info.pair = mtd_div_by_ws(ebofs, mtd);
1346 pageofs = mtd_mod_by_ws(ebofs, mtd);
1347 oobavail = mtd_oobavail(mtd, ops);
1348
1349 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1350 int ret;
1351
1352 if (info.pair >= npairs) {
1353 info.pair = 0;
1354 base += master->erasesize;
1355 }
1356
1357 wunit = mtd_pairing_info_to_wunit(master, &info);
1358 pos = mtd_wunit_to_offset(mtd, base, wunit);
1359
1360 adjops.len = ops->len - ops->retlen;
1361 if (adjops.len > mtd->writesize - pageofs)
1362 adjops.len = mtd->writesize - pageofs;
1363
1364 adjops.ooblen = ops->ooblen - ops->oobretlen;
1365 if (adjops.ooblen > oobavail - adjops.ooboffs)
1366 adjops.ooblen = oobavail - adjops.ooboffs;
1367
1368 if (read) {
1369 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1370 if (ret > 0)
1371 max_bitflips = max(max_bitflips, ret);
1372 } else {
1373 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1374 }
1375
1376 if (ret < 0)
1377 return ret;
1378
1379 max_bitflips = max(max_bitflips, ret);
1380 ops->retlen += adjops.retlen;
1381 ops->oobretlen += adjops.oobretlen;
1382 adjops.datbuf += adjops.retlen;
1383 adjops.oobbuf += adjops.oobretlen;
1384 adjops.ooboffs = 0;
1385 pageofs = 0;
1386 info.pair++;
1387 }
1388
1389 return max_bitflips;
1390}
1391
1392int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1393{
1394 struct mtd_info *master = mtd_get_master(mtd);
1395 struct mtd_ecc_stats old_stats = master->ecc_stats;
1396 int ret_code;
1397
1398 ops->retlen = ops->oobretlen = 0;
1399
1400 ret_code = mtd_check_oob_ops(mtd, from, ops);
1401 if (ret_code)
1402 return ret_code;
1403
1404 ledtrig_mtd_activity();
1405
1406
1407 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1408 return -EOPNOTSUPP;
1409
1410 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1411 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1412 else
1413 ret_code = mtd_read_oob_std(mtd, from, ops);
1414
1415 mtd_update_ecc_stats(mtd, master, &old_stats);
1416
1417
1418
1419
1420
1421
1422
1423 if (unlikely(ret_code < 0))
1424 return ret_code;
1425 if (mtd->ecc_strength == 0)
1426 return 0;
1427 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1428}
1429EXPORT_SYMBOL_GPL(mtd_read_oob);
1430
1431int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1432 struct mtd_oob_ops *ops)
1433{
1434 struct mtd_info *master = mtd_get_master(mtd);
1435 int ret;
1436
1437 ops->retlen = ops->oobretlen = 0;
1438
1439 if (!(mtd->flags & MTD_WRITEABLE))
1440 return -EROFS;
1441
1442 ret = mtd_check_oob_ops(mtd, to, ops);
1443 if (ret)
1444 return ret;
1445
1446 ledtrig_mtd_activity();
1447
1448
1449 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1450 return -EOPNOTSUPP;
1451
1452 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1453 return mtd_io_emulated_slc(mtd, to, false, ops);
1454
1455 return mtd_write_oob_std(mtd, to, ops);
1456}
1457EXPORT_SYMBOL_GPL(mtd_write_oob);
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1476 struct mtd_oob_region *oobecc)
1477{
1478 struct mtd_info *master = mtd_get_master(mtd);
1479
1480 memset(oobecc, 0, sizeof(*oobecc));
1481
1482 if (!master || section < 0)
1483 return -EINVAL;
1484
1485 if (!master->ooblayout || !master->ooblayout->ecc)
1486 return -ENOTSUPP;
1487
1488 return master->ooblayout->ecc(master, section, oobecc);
1489}
1490EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1510 struct mtd_oob_region *oobfree)
1511{
1512 struct mtd_info *master = mtd_get_master(mtd);
1513
1514 memset(oobfree, 0, sizeof(*oobfree));
1515
1516 if (!master || section < 0)
1517 return -EINVAL;
1518
1519 if (!master->ooblayout || !master->ooblayout->free)
1520 return -ENOTSUPP;
1521
1522 return master->ooblayout->free(master, section, oobfree);
1523}
1524EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1544 int *sectionp, struct mtd_oob_region *oobregion,
1545 int (*iter)(struct mtd_info *,
1546 int section,
1547 struct mtd_oob_region *oobregion))
1548{
1549 int pos = 0, ret, section = 0;
1550
1551 memset(oobregion, 0, sizeof(*oobregion));
1552
1553 while (1) {
1554 ret = iter(mtd, section, oobregion);
1555 if (ret)
1556 return ret;
1557
1558 if (pos + oobregion->length > byte)
1559 break;
1560
1561 pos += oobregion->length;
1562 section++;
1563 }
1564
1565
1566
1567
1568
1569 oobregion->offset += byte - pos;
1570 oobregion->length -= byte - pos;
1571 *sectionp = section;
1572
1573 return 0;
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1590 int *section,
1591 struct mtd_oob_region *oobregion)
1592{
1593 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1594 mtd_ooblayout_ecc);
1595}
1596EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1613 const u8 *oobbuf, int start, int nbytes,
1614 int (*iter)(struct mtd_info *,
1615 int section,
1616 struct mtd_oob_region *oobregion))
1617{
1618 struct mtd_oob_region oobregion;
1619 int section, ret;
1620
1621 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1622 &oobregion, iter);
1623
1624 while (!ret) {
1625 int cnt;
1626
1627 cnt = min_t(int, nbytes, oobregion.length);
1628 memcpy(buf, oobbuf + oobregion.offset, cnt);
1629 buf += cnt;
1630 nbytes -= cnt;
1631
1632 if (!nbytes)
1633 break;
1634
1635 ret = iter(mtd, ++section, &oobregion);
1636 }
1637
1638 return ret;
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1656 u8 *oobbuf, int start, int nbytes,
1657 int (*iter)(struct mtd_info *,
1658 int section,
1659 struct mtd_oob_region *oobregion))
1660{
1661 struct mtd_oob_region oobregion;
1662 int section, ret;
1663
1664 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1665 &oobregion, iter);
1666
1667 while (!ret) {
1668 int cnt;
1669
1670 cnt = min_t(int, nbytes, oobregion.length);
1671 memcpy(oobbuf + oobregion.offset, buf, cnt);
1672 buf += cnt;
1673 nbytes -= cnt;
1674
1675 if (!nbytes)
1676 break;
1677
1678 ret = iter(mtd, ++section, &oobregion);
1679 }
1680
1681 return ret;
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1694 int (*iter)(struct mtd_info *,
1695 int section,
1696 struct mtd_oob_region *oobregion))
1697{
1698 struct mtd_oob_region oobregion;
1699 int section = 0, ret, nbytes = 0;
1700
1701 while (1) {
1702 ret = iter(mtd, section++, &oobregion);
1703 if (ret) {
1704 if (ret == -ERANGE)
1705 ret = nbytes;
1706 break;
1707 }
1708
1709 nbytes += oobregion.length;
1710 }
1711
1712 return ret;
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1728 const u8 *oobbuf, int start, int nbytes)
1729{
1730 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1731 mtd_ooblayout_ecc);
1732}
1733EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1748 u8 *oobbuf, int start, int nbytes)
1749{
1750 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1751 mtd_ooblayout_ecc);
1752}
1753EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1768 const u8 *oobbuf, int start, int nbytes)
1769{
1770 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1771 mtd_ooblayout_free);
1772}
1773EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1788 u8 *oobbuf, int start, int nbytes)
1789{
1790 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1791 mtd_ooblayout_free);
1792}
1793EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1804{
1805 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1806}
1807EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1818{
1819 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1820}
1821EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1822
1823
1824
1825
1826
1827
1828int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1829 struct otp_info *buf)
1830{
1831 struct mtd_info *master = mtd_get_master(mtd);
1832
1833 if (!master->_get_fact_prot_info)
1834 return -EOPNOTSUPP;
1835 if (!len)
1836 return 0;
1837 return master->_get_fact_prot_info(master, len, retlen, buf);
1838}
1839EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1840
1841int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1842 size_t *retlen, u_char *buf)
1843{
1844 struct mtd_info *master = mtd_get_master(mtd);
1845
1846 *retlen = 0;
1847 if (!master->_read_fact_prot_reg)
1848 return -EOPNOTSUPP;
1849 if (!len)
1850 return 0;
1851 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
1852}
1853EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1854
1855int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1856 struct otp_info *buf)
1857{
1858 struct mtd_info *master = mtd_get_master(mtd);
1859
1860 if (!master->_get_user_prot_info)
1861 return -EOPNOTSUPP;
1862 if (!len)
1863 return 0;
1864 return master->_get_user_prot_info(master, len, retlen, buf);
1865}
1866EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1867
1868int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1869 size_t *retlen, u_char *buf)
1870{
1871 struct mtd_info *master = mtd_get_master(mtd);
1872
1873 *retlen = 0;
1874 if (!master->_read_user_prot_reg)
1875 return -EOPNOTSUPP;
1876 if (!len)
1877 return 0;
1878 return master->_read_user_prot_reg(master, from, len, retlen, buf);
1879}
1880EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1881
1882int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1883 size_t *retlen, u_char *buf)
1884{
1885 struct mtd_info *master = mtd_get_master(mtd);
1886 int ret;
1887
1888 *retlen = 0;
1889 if (!master->_write_user_prot_reg)
1890 return -EOPNOTSUPP;
1891 if (!len)
1892 return 0;
1893 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
1894 if (ret)
1895 return ret;
1896
1897
1898
1899
1900
1901 return (*retlen) ? 0 : -ENOSPC;
1902}
1903EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1904
1905int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1906{
1907 struct mtd_info *master = mtd_get_master(mtd);
1908
1909 if (!master->_lock_user_prot_reg)
1910 return -EOPNOTSUPP;
1911 if (!len)
1912 return 0;
1913 return master->_lock_user_prot_reg(master, from, len);
1914}
1915EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1916
1917
1918int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1919{
1920 struct mtd_info *master = mtd_get_master(mtd);
1921
1922 if (!master->_lock)
1923 return -EOPNOTSUPP;
1924 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1925 return -EINVAL;
1926 if (!len)
1927 return 0;
1928
1929 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1930 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1931 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
1932 }
1933
1934 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
1935}
1936EXPORT_SYMBOL_GPL(mtd_lock);
1937
1938int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1939{
1940 struct mtd_info *master = mtd_get_master(mtd);
1941
1942 if (!master->_unlock)
1943 return -EOPNOTSUPP;
1944 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1945 return -EINVAL;
1946 if (!len)
1947 return 0;
1948
1949 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1950 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1951 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
1952 }
1953
1954 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
1955}
1956EXPORT_SYMBOL_GPL(mtd_unlock);
1957
1958int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1959{
1960 struct mtd_info *master = mtd_get_master(mtd);
1961
1962 if (!master->_is_locked)
1963 return -EOPNOTSUPP;
1964 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1965 return -EINVAL;
1966 if (!len)
1967 return 0;
1968
1969 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1970 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1971 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
1972 }
1973
1974 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
1975}
1976EXPORT_SYMBOL_GPL(mtd_is_locked);
1977
1978int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1979{
1980 struct mtd_info *master = mtd_get_master(mtd);
1981
1982 if (ofs < 0 || ofs >= mtd->size)
1983 return -EINVAL;
1984 if (!master->_block_isreserved)
1985 return 0;
1986
1987 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1988 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1989
1990 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
1991}
1992EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1993
1994int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1995{
1996 struct mtd_info *master = mtd_get_master(mtd);
1997
1998 if (ofs < 0 || ofs >= mtd->size)
1999 return -EINVAL;
2000 if (!master->_block_isbad)
2001 return 0;
2002
2003 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2004 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2005
2006 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2007}
2008EXPORT_SYMBOL_GPL(mtd_block_isbad);
2009
2010int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2011{
2012 struct mtd_info *master = mtd_get_master(mtd);
2013 int ret;
2014
2015 if (!master->_block_markbad)
2016 return -EOPNOTSUPP;
2017 if (ofs < 0 || ofs >= mtd->size)
2018 return -EINVAL;
2019 if (!(mtd->flags & MTD_WRITEABLE))
2020 return -EROFS;
2021
2022 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2023 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2024
2025 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2026 if (ret)
2027 return ret;
2028
2029 while (mtd->parent) {
2030 mtd->ecc_stats.badblocks++;
2031 mtd = mtd->parent;
2032 }
2033
2034 return 0;
2035}
2036EXPORT_SYMBOL_GPL(mtd_block_markbad);
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2050 unsigned long count, loff_t to, size_t *retlen)
2051{
2052 unsigned long i;
2053 size_t totlen = 0, thislen;
2054 int ret = 0;
2055
2056 for (i = 0; i < count; i++) {
2057 if (!vecs[i].iov_len)
2058 continue;
2059 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2060 vecs[i].iov_base);
2061 totlen += thislen;
2062 if (ret || thislen != vecs[i].iov_len)
2063 break;
2064 to += vecs[i].iov_len;
2065 }
2066 *retlen = totlen;
2067 return ret;
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2082 unsigned long count, loff_t to, size_t *retlen)
2083{
2084 struct mtd_info *master = mtd_get_master(mtd);
2085
2086 *retlen = 0;
2087 if (!(mtd->flags & MTD_WRITEABLE))
2088 return -EROFS;
2089
2090 if (!master->_writev)
2091 return default_mtd_writev(mtd, vecs, count, to, retlen);
2092
2093 return master->_writev(master, vecs, count,
2094 mtd_get_master_ofs(mtd, to), retlen);
2095}
2096EXPORT_SYMBOL_GPL(mtd_writev);
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2123{
2124 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2125 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2126 void *kbuf;
2127
2128 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2129
2130 while (*size > min_alloc) {
2131 kbuf = kmalloc(*size, flags);
2132 if (kbuf)
2133 return kbuf;
2134
2135 *size >>= 1;
2136 *size = ALIGN(*size, mtd->writesize);
2137 }
2138
2139
2140
2141
2142
2143 return kmalloc(*size, GFP_KERNEL);
2144}
2145EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2146
2147#ifdef CONFIG_PROC_FS
2148
2149
2150
2151
2152static int mtd_proc_show(struct seq_file *m, void *v)
2153{
2154 struct mtd_info *mtd;
2155
2156 seq_puts(m, "dev: size erasesize name\n");
2157 mutex_lock(&mtd_table_mutex);
2158 mtd_for_each_device(mtd) {
2159 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2160 mtd->index, (unsigned long long)mtd->size,
2161 mtd->erasesize, mtd->name);
2162 }
2163 mutex_unlock(&mtd_table_mutex);
2164 return 0;
2165}
2166#endif
2167
2168
2169
2170
2171static struct backing_dev_info * __init mtd_bdi_init(char *name)
2172{
2173 struct backing_dev_info *bdi;
2174 int ret;
2175
2176 bdi = bdi_alloc(NUMA_NO_NODE);
2177 if (!bdi)
2178 return ERR_PTR(-ENOMEM);
2179 bdi->ra_pages = 0;
2180 bdi->io_pages = 0;
2181
2182
2183
2184
2185
2186 ret = bdi_register(bdi, "%.28s-0", name);
2187 if (ret)
2188 bdi_put(bdi);
2189
2190 return ret ? ERR_PTR(ret) : bdi;
2191}
2192
2193static struct proc_dir_entry *proc_mtd;
2194
2195static int __init init_mtd(void)
2196{
2197 int ret;
2198
2199 ret = class_register(&mtd_class);
2200 if (ret)
2201 goto err_reg;
2202
2203 mtd_bdi = mtd_bdi_init("mtd");
2204 if (IS_ERR(mtd_bdi)) {
2205 ret = PTR_ERR(mtd_bdi);
2206 goto err_bdi;
2207 }
2208
2209 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2210
2211 ret = init_mtdchar();
2212 if (ret)
2213 goto out_procfs;
2214
2215 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2216
2217 return 0;
2218
2219out_procfs:
2220 if (proc_mtd)
2221 remove_proc_entry("mtd", NULL);
2222 bdi_put(mtd_bdi);
2223err_bdi:
2224 class_unregister(&mtd_class);
2225err_reg:
2226 pr_err("Error registering mtd class or bdi: %d\n", ret);
2227 return ret;
2228}
2229
2230static void __exit cleanup_mtd(void)
2231{
2232 debugfs_remove_recursive(dfs_dir_mtd);
2233 cleanup_mtdchar();
2234 if (proc_mtd)
2235 remove_proc_entry("mtd", NULL);
2236 class_unregister(&mtd_class);
2237 bdi_put(mtd_bdi);
2238 idr_destroy(&mtd_idr);
2239}
2240
2241module_init(init_mtd);
2242module_exit(cleanup_mtd);
2243
2244MODULE_LICENSE("GPL");
2245MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2246MODULE_DESCRIPTION("Core MTD registration and access routines");
2247